hexsha
string
size
int64
ext
string
lang
string
max_stars_repo_path
string
max_stars_repo_name
string
max_stars_repo_head_hexsha
string
max_stars_repo_licenses
list
max_stars_count
int64
max_stars_repo_stars_event_min_datetime
string
max_stars_repo_stars_event_max_datetime
string
max_issues_repo_path
string
max_issues_repo_name
string
max_issues_repo_head_hexsha
string
max_issues_repo_licenses
list
max_issues_count
int64
max_issues_repo_issues_event_min_datetime
string
max_issues_repo_issues_event_max_datetime
string
max_forks_repo_path
string
max_forks_repo_name
string
max_forks_repo_head_hexsha
string
max_forks_repo_licenses
list
max_forks_count
int64
max_forks_repo_forks_event_min_datetime
string
max_forks_repo_forks_event_max_datetime
string
content
string
avg_line_length
float64
max_line_length
int64
alphanum_fraction
float64
qsc_code_num_words_quality_signal
int64
qsc_code_num_chars_quality_signal
float64
qsc_code_mean_word_length_quality_signal
float64
qsc_code_frac_words_unique_quality_signal
float64
qsc_code_frac_chars_top_2grams_quality_signal
float64
qsc_code_frac_chars_top_3grams_quality_signal
float64
qsc_code_frac_chars_top_4grams_quality_signal
float64
qsc_code_frac_chars_dupe_5grams_quality_signal
float64
qsc_code_frac_chars_dupe_6grams_quality_signal
float64
qsc_code_frac_chars_dupe_7grams_quality_signal
float64
qsc_code_frac_chars_dupe_8grams_quality_signal
float64
qsc_code_frac_chars_dupe_9grams_quality_signal
float64
qsc_code_frac_chars_dupe_10grams_quality_signal
float64
qsc_code_frac_chars_replacement_symbols_quality_signal
float64
qsc_code_frac_chars_digital_quality_signal
float64
qsc_code_frac_chars_whitespace_quality_signal
float64
qsc_code_size_file_byte_quality_signal
float64
qsc_code_num_lines_quality_signal
float64
qsc_code_num_chars_line_max_quality_signal
float64
qsc_code_num_chars_line_mean_quality_signal
float64
qsc_code_frac_chars_alphabet_quality_signal
float64
qsc_code_frac_chars_comments_quality_signal
float64
qsc_code_cate_xml_start_quality_signal
float64
qsc_code_frac_lines_dupe_lines_quality_signal
float64
qsc_code_cate_autogen_quality_signal
float64
qsc_code_frac_lines_long_string_quality_signal
float64
qsc_code_frac_chars_string_length_quality_signal
float64
qsc_code_frac_chars_long_word_length_quality_signal
float64
qsc_code_frac_lines_string_concat_quality_signal
float64
qsc_code_cate_encoded_data_quality_signal
float64
qsc_code_frac_chars_hex_words_quality_signal
float64
qsc_code_frac_lines_prompt_comments_quality_signal
float64
qsc_code_frac_lines_assert_quality_signal
float64
qsc_codepython_cate_ast_quality_signal
float64
qsc_codepython_frac_lines_func_ratio_quality_signal
float64
qsc_codepython_cate_var_zero_quality_signal
bool
qsc_codepython_frac_lines_pass_quality_signal
float64
qsc_codepython_frac_lines_import_quality_signal
float64
qsc_codepython_frac_lines_simplefunc_quality_signal
float64
qsc_codepython_score_lines_no_logic_quality_signal
float64
qsc_codepython_frac_lines_print_quality_signal
float64
qsc_code_num_words
int64
qsc_code_num_chars
int64
qsc_code_mean_word_length
int64
qsc_code_frac_words_unique
null
qsc_code_frac_chars_top_2grams
int64
qsc_code_frac_chars_top_3grams
int64
qsc_code_frac_chars_top_4grams
int64
qsc_code_frac_chars_dupe_5grams
int64
qsc_code_frac_chars_dupe_6grams
int64
qsc_code_frac_chars_dupe_7grams
int64
qsc_code_frac_chars_dupe_8grams
int64
qsc_code_frac_chars_dupe_9grams
int64
qsc_code_frac_chars_dupe_10grams
int64
qsc_code_frac_chars_replacement_symbols
int64
qsc_code_frac_chars_digital
int64
qsc_code_frac_chars_whitespace
int64
qsc_code_size_file_byte
int64
qsc_code_num_lines
int64
qsc_code_num_chars_line_max
int64
qsc_code_num_chars_line_mean
int64
qsc_code_frac_chars_alphabet
int64
qsc_code_frac_chars_comments
int64
qsc_code_cate_xml_start
int64
qsc_code_frac_lines_dupe_lines
int64
qsc_code_cate_autogen
int64
qsc_code_frac_lines_long_string
int64
qsc_code_frac_chars_string_length
int64
qsc_code_frac_chars_long_word_length
int64
qsc_code_frac_lines_string_concat
null
qsc_code_cate_encoded_data
int64
qsc_code_frac_chars_hex_words
int64
qsc_code_frac_lines_prompt_comments
int64
qsc_code_frac_lines_assert
int64
qsc_codepython_cate_ast
int64
qsc_codepython_frac_lines_func_ratio
int64
qsc_codepython_cate_var_zero
int64
qsc_codepython_frac_lines_pass
int64
qsc_codepython_frac_lines_import
int64
qsc_codepython_frac_lines_simplefunc
int64
qsc_codepython_score_lines_no_logic
int64
qsc_codepython_frac_lines_print
int64
effective
string
hits
int64
7940074a7371928505748d90b9c862ee5b2d225e
82
py
Python
haystack/nodes/question_generator/__init__.py
mapapa/haystack
79fdda8a7cf393d774803608a4874f2a6e63cf6f
[ "Apache-2.0" ]
7
2022-01-22T18:58:54.000Z
2022-03-18T17:06:35.000Z
haystack/nodes/question_generator/__init__.py
mapapa/haystack
79fdda8a7cf393d774803608a4874f2a6e63cf6f
[ "Apache-2.0" ]
17
2021-12-08T18:00:58.000Z
2021-12-28T14:03:27.000Z
haystack/nodes/question_generator/__init__.py
mapapa/haystack
79fdda8a7cf393d774803608a4874f2a6e63cf6f
[ "Apache-2.0" ]
1
2022-01-05T15:24:36.000Z
2022-01-05T15:24:36.000Z
from haystack.nodes.question_generator.question_generator import QuestionGenerator
82
82
0.926829
9
82
8.222222
0.777778
0.459459
0
0
0
0
0
0
0
0
0
0
0.036585
82
1
82
82
0.936709
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
79423433cdcc39041c7fd83b1754e656cc596c82
3,178
py
Python
backend/api/models.py
AndyPaPaLeu/Disfactory
4afc370ae6b0d526891fce2b1fe0b9c687309ed1
[ "MIT" ]
null
null
null
backend/api/models.py
AndyPaPaLeu/Disfactory
4afc370ae6b0d526891fce2b1fe0b9c687309ed1
[ "MIT" ]
null
null
null
backend/api/models.py
AndyPaPaLeu/Disfactory
4afc370ae6b0d526891fce2b1fe0b9c687309ed1
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- import uuid from django.conf import settings from django.contrib.gis.db import models from django.contrib.gis.geos import Point from django.contrib.postgres.fields import JSONField class Factory(models.Model): """Factories that are potential to be illegal.""" # List of fact_type & status factory_type_list = [ ("1","金屬"), ("2-1","沖床、銑床、車床、鏜孔"), ("2-2", "焊接、鑄造、熱處理"), ("2-3", "金屬表面處理、噴漆"), ("3", "塑膠加工、射出"), ("4", "橡膠加工"), ("5", "非金屬礦物(石材)"), ("6", "食品"), ("7", "皮革"), ("8", "紡織"), ("9", "其他") ] status_list = [ ("D","已舉報"), ("F","資料不齊"), ("A","待審核") ] # All Features id = models.UUIDField( primary_key=True, default=uuid.uuid4, editable=False, verbose_name="ID", ) lat = models.FloatField() lng = models.FloatField() point = models.PointField(srid=settings.POSTGIS_SRID) landcode = models.CharField(max_length=50, blank=True, null=True) name = models.CharField(max_length=50, blank=True, null=True) factory_type = models.CharField(max_length=3, choices=factory_type_list, default="9") status = models.CharField(max_length=1, choices=status_list, default="A") status_time = models.DateTimeField(auto_now_add=True) created_at = models.DateTimeField(auto_now_add=True) updated_at = models.DateTimeField(auto_now=True) def save(self, *args, **kwargs): self.point = Point(self.lng, self.lat, srid=4326) self.point.transform(settings.POSTGIS_SRID) super(Factory, self).save(*args, **kwargs) class ReportRecord(models.Model): """Report records send by users. `ReportRecord` will be queried in advanced by admins from Citizen of the Earth, Taiwan. They will filter the most recent records out every a few weeks to catch the bad guys. """ id = models.AutoField(primary_key=True) factory = models.ForeignKey("Factory", on_delete=models.PROTECT) user_ip = models.GenericIPAddressField(default="192.168.0.1", blank=True, null=True) action_type = models.CharField(max_length=10) # PUT, POST action_body = JSONField() # request body created_at = models.DateTimeField(auto_now_add=True) contact = models.CharField(max_length=64, blank=True, null=True) others = models.CharField(max_length=1024, blank=True) class Image(models.Model): """Images of factories that are uploaded by user.""" id = models.UUIDField( primary_key=True, default=uuid.uuid4, editable=False, ) factory = models.ForeignKey( "Factory", on_delete=models.PROTECT, related_name="images", blank=True, null=True, ) report_record = models.ForeignKey( "ReportRecord", on_delete=models.PROTECT, blank=True, null=True, ) image_path = models.URLField(max_length=256) # get from Imgur created_at = models.DateTimeField(auto_now_add=True) # the DB saving time orig_time = models.DateTimeField(blank=True, null=True) # the actual photo taken time
30.854369
89
0.636249
406
3,178
4.866995
0.421182
0.036437
0.063765
0.08502
0.278846
0.236336
0.219636
0.219636
0.104251
0.060729
0
0.019192
0.22939
3,178
102
90
31.156863
0.787668
0.139711
0
0.226667
0
0
0.050483
0
0
0
0
0
0
1
0.013333
false
0
0.066667
0
0.48
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
7943f595c674438a1cfec4698c62343f1a8c742b
656
py
Python
infrastructure/crypto_ml/utils/_utils.py
ATCUWgithub/CryptoML
6010c5daf7d985217fa76197b29331457a60a306
[ "MIT" ]
1
2020-02-18T00:38:16.000Z
2020-02-18T00:38:16.000Z
infrastructure/crypto_ml/utils/_utils.py
ATCUWgithub/CryptoML
6010c5daf7d985217fa76197b29331457a60a306
[ "MIT" ]
null
null
null
infrastructure/crypto_ml/utils/_utils.py
ATCUWgithub/CryptoML
6010c5daf7d985217fa76197b29331457a60a306
[ "MIT" ]
1
2020-02-18T00:39:12.000Z
2020-02-18T00:39:12.000Z
import json as _json import datetime as _datetime def parse_timestamp(dataset, time_format="%Y-%m-%dT%H:%M:%S.000Z"): for d in dataset: d["timestamp"] = _datetime.datetime.strptime(d["timestamp"], time_format) return dataset def load_json(filename, time_format="%Y-%m-%dT%H:%M:%S.000Z"): dictionary = dict() with open(filename) as f: dictionary = _json.load(f) return parse_timestamp(dictionary, time_format) def generate_config(dataset): start_idx = 0 end_idx = len(dataset) - 1 return { "test_start": dataset[start_idx]["timestamp"], "test_end": dataset[end_idx]["timestamp"] }
29.818182
81
0.660061
91
656
4.56044
0.406593
0.096386
0.053012
0.057831
0.101205
0.101205
0.101205
0.101205
0.101205
0
0
0.015209
0.198171
656
21
82
31.238095
0.773764
0
0
0
0
0
0.14939
0.067073
0
0
0
0
0
1
0.166667
false
0
0.111111
0
0.444444
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
79442688528877f19538302cd834c0bc231e8349
959
py
Python
leetcode/two_numbers_sum.py
clnFind/DayDayAlgorithm
5644a666a3d84547d8cf00031fc2e30273cc0e9a
[ "Apache-2.0" ]
null
null
null
leetcode/two_numbers_sum.py
clnFind/DayDayAlgorithm
5644a666a3d84547d8cf00031fc2e30273cc0e9a
[ "Apache-2.0" ]
null
null
null
leetcode/two_numbers_sum.py
clnFind/DayDayAlgorithm
5644a666a3d84547d8cf00031fc2e30273cc0e9a
[ "Apache-2.0" ]
null
null
null
# -*- coding: utf-8 -*- import copy class Solution(object): """ 给定 nums = [2, 7, 11, 15], target = 9 因为 nums[0] + nums[1] = 2 + 7 = 9 所以返回 [0, 1] """ def twoSum(self, nums, target): """ :type nums: List[int] :type target: int :rtype: List[int] """ for i in range(len(nums)): nums_copy = copy.copy(nums) nums_copy.remove(nums[i]) for j in nums_copy: if nums[i] + j == target: return i, nums.index(j) return None def two_sum(self, nums, target): for num in nums: val = target - num if val in nums: return nums.index(num), nums.index(val) return None if __name__ == '__main__': l = [3, 4, 10, 2, 7] target = 9 result = Solution().twoSum(l, target) print(result) result1 = Solution().two_sum(l, target) print(result1)
21.795455
55
0.486966
127
959
3.574803
0.401575
0.013216
0.061674
0
0
0
0
0
0
0
0
0.040201
0.377477
959
43
56
22.302326
0.720268
0.168926
0
0.086957
0
0
0.010899
0
0
0
0
0
0
1
0.086957
false
0
0.043478
0
0.347826
0.086957
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
794467ea5227d786240a4dc2c21fda99810bd1c3
1,162
py
Python
bpcs/bpcs_steg_decode.py
BburnN123/bpcs
f53caede7e202ce07b51890f028b9caf73a22937
[ "MIT" ]
20
2017-04-25T21:07:24.000Z
2022-03-30T11:11:47.000Z
bpcs/bpcs_steg_decode.py
BburnN123/bpcs
f53caede7e202ce07b51890f028b9caf73a22937
[ "MIT" ]
4
2016-04-06T01:19:27.000Z
2020-09-26T18:38:29.000Z
bpcs/bpcs_steg_decode.py
BburnN123/bpcs
f53caede7e202ce07b51890f028b9caf73a22937
[ "MIT" ]
12
2017-04-02T23:10:46.000Z
2022-03-21T03:43:55.000Z
import numpy as np from .logger import log from .array_grid import get_next_grid_dims from .act_on_image import ActOnImage from .array_message import write_conjugated_message_grids from .bpcs_steg import arr_bpcs_complexity def remove_message_from_vessel(arr, alpha, grid_size): messages = [] nfound, nkept, nleft = 0, 0, 0 complexities = [] for dims in get_next_grid_dims(arr, grid_size): nfound += 1 grid = arr[tuple(dims)] cmplx = arr_bpcs_complexity(grid) if cmplx < alpha: nleft += 1 continue complexities.append(cmplx) nkept += 1 messages.append(grid) assert nfound == nkept + nleft log.critical('Found {0} out of {1} grids with complexity above {2}'.format(nkept, nfound, alpha)) return messages class BPCSDecodeImage(ActOnImage): def modify(self, alpha): return remove_message_from_vessel(self.arr, alpha, (8,8)) def decode(infile, outfile, alpha=0.45): x = BPCSDecodeImage(infile, as_rgb=True, bitplane=True, gray=True, nbits_per_layer=8) grids = x.modify(alpha) write_conjugated_message_grids(outfile, grids, alpha)
33.2
101
0.692771
160
1,162
4.83125
0.4375
0.023286
0.028461
0.03881
0
0
0
0
0
0
0
0.016447
0.215146
1,162
34
102
34.176471
0.83114
0
0
0
0
0
0.04475
0
0
0
0
0
0.033333
1
0.1
false
0
0.2
0.033333
0.4
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
7946dedb29967a5ff96a8d7cd312b2fd2bc51b15
6,859
py
Python
notebooks/02_crash_severity.py
jennan/crash_prediction
498b59704ed2aca61c78e4eb7c5558abe9edaffc
[ "MIT" ]
3
2020-12-07T04:07:04.000Z
2021-08-19T10:41:08.000Z
notebooks/02_crash_severity.py
jennan/crash_prediction
498b59704ed2aca61c78e4eb7c5558abe9edaffc
[ "MIT" ]
2
2020-12-10T19:12:02.000Z
2020-12-10T19:12:08.000Z
notebooks/02_crash_severity.py
jennan/crash_prediction
498b59704ed2aca61c78e4eb7c5558abe9edaffc
[ "MIT" ]
2
2021-04-14T14:32:39.000Z
2021-12-10T10:36:59.000Z
# # Exploration of the crash severity information in CAS data # # In this notebook, we will explore the severity of crashes, as it will be the # target of our predictive models. from pathlib import Path import numpy as np import pandas as pd import scipy.stats as st import matplotlib.pyplot as plt import seaborn as sb from crash_prediction import cas_data # set seaborn default style sb.set() # But first, we ensure we have the data or download it if needed dset_path = Path("..") / "data" / "cas_dataset.csv" if not dset_path.exists(): dset_path.parent.mkdir(parents=True, exist_ok=True) cas_data.download(dset_path) # and load it. dset = pd.read_csv(dset_path) dset.head() # The CAS dataset has 4 features that can be associated with the crash severity: # # - `crashSeverity`, severity of a crash, determined by the worst injury # sustained in the crash at time of entry, # - `fatalCount`, count of the number of fatal casualties associated with this # crash, # - `minorInjuryCount`, count of the number of minor injuries associated with # this crash, # - `seriousInjuryCount`, count of the number of serious injuries associated # with this crash. severity_features = [ "fatalCount", "seriousInjuryCount", "minorInjuryCount", "crashSeverity", ] fig, axes = plt.subplots(2, 2, figsize=(15, 12)) for ax, feat in zip(axes.flat, severity_features): counts = dset[feat].value_counts(dropna=False) counts.plot.bar(ylabel="# crashes", title=feat, ax=ax) ax.set(yscale="log") fig.tight_layout() # To check the geographical distribution, we will focus on Auckland and replace # discrete levels of `crashSeverity` with number to ease plotting. dset_auckland = dset[dset["X"].between(174.7, 174.9) & dset["Y"].between(-37, -36.8)] mapping = { "Non-Injury Crash": 1, "Minor Crash": 2, "Serious Crash": 3, "Fatal Crash": 4, } dset_auckland = dset_auckland.replace({"crashSeverity": mapping}) # Given the data set imbalance, we plot the local maxima to better see the # location of more severe car crashes. fig, axes = plt.subplots(2, 2, figsize=(15, 15)) for ax, feat in zip(axes.flat, severity_features): dset_auckland.plot.hexbin( "X", "Y", feat, gridsize=500, reduce_C_function=np.max, cmap="BuPu", title=feat, ax=ax, sharex=False, ) ax.set_xticklabels([]) ax.set_yticklabels([]) fig.tight_layout() # Few remarks coming from these plots: # # - fatal counts are (hopefully) very low, # - crashes with serious injuries are also very sparse, # - crashes with minor injuries are denser and seem to follow major axes, # - the crash severity feature looks like the most homogeneous feature, yet # highlighting some roads more than others. # # The crash severity is probably a good go-to target, as it's quite # interpretable and actionable. The corresponding ML problem is a supervised # multi-class prediction problem. # To simplify the problem, we can also just try to predict if a crash is going # to involve an injury (minor, severe or fatal) or none. Here is how it would # look like in Auckland dset_auckland["injuryCrash"] = (dset_auckland["crashSeverity"] > 1) * 1.0 dset_auckland.plot.hexbin( "X", "Y", "injuryCrash", gridsize=500, cmap="BuPu", title="Crash with injury", sharex=False, figsize=(10, 10), ) # Interestingly, the major axes do not pop up as saliently here, as we are # averaging instead of taking the local maxima. # This brings us to to the another question: is the fraction of crash with # injuries constant fraction of the number of crashes in an area? This would # imply that a simple binomial model can model locally binned data. # We first discretize space into 0.01° wide cells and count the total number of # crashes in each cell as well as the number of crashes with injuries. # + dset["X_bin"] = pd.cut( dset["X"], pd.interval_range(dset.X.min(), dset.X.max(), freq=0.01) ) dset["Y_bin"] = pd.cut( dset["Y"], pd.interval_range(dset.Y.min(), dset.Y.max(), freq=0.01) ) counts = ( dset.groupby(["X_bin", "Y_bin"], observed=True).size().reset_index(name="crash") ) injury_counts = ( dset.groupby(["X_bin", "Y_bin"], observed=True) .apply(lambda x: (x["crashSeverity"] != "Non-Injury Crash").sum()) .reset_index(name="injury") ) counts = counts.merge(injury_counts) # - # For each number of crashes in cells, we can check the fraction of crashes with # injuries. Here we see that cells with 1 or few crashes have a nearly 50/50 # chance of injuries, compared to cells with a larger number of accidents, where # it goes down to about 20%. injury_fraction = counts.groupby("crash").apply( lambda x: x["injury"].sum() / x["crash"].sum() ) ax = injury_fraction.plot(style=".", ylabel="fraction of injuries", figsize=(10, 7)) ax.set_xscale("log") # Then we can also check how good is a binomial distribution at modeling binned # data, using it to derive a 95% predictive interval. ratio = counts["injury"].sum() / counts["crash"].sum() xs = np.arange(1, counts["crash"].max() + 1) pred_intervals = st.binom(xs, ratio).ppf([[0.025], [0.975]]) # + fig, axes = plt.subplots(1, 2, figsize=(15, 7)) counts.plot.scatter(x="crash", y="injury", alpha=0.3, c="b", s=2, ax=axes[0]) axes[0].fill_between( xs, pred_intervals[0], pred_intervals[1], alpha=0.3, color="r", label="95% equal-tail interval for binomial", ) axes[0].legend() counts.plot.scatter(x="crash", y="injury", alpha=0.3, c="b", s=2, ax=axes[1]) axes[1].fill_between( xs, pred_intervals[0], pred_intervals[1], alpha=0.3, color="r", label="95% equal-tail interval for binomial", ) axes[1].legend() axes[1].set_xscale("log") axes[1].set_yscale("log") # - # The predictive interval seems to have a poor coverage, overshooting the high # counts regions and being to narrow for the regions with hundreds of crashes. # We can compute the empirical coverage of these interval to check this. counts["covered"] = counts["injury"].between( pred_intervals[0, counts["crash"] - 1], pred_intervals[1, counts["crash"] - 1] ) print(f"95% predictive interval has {counts['covered'].mean() * 100:.2f}%.") print("95% predictive interval coverage per quartile of crash counts:") mask = counts["crash"] > 1 counts[mask].groupby(pd.qcut(counts.loc[mask, "crash"], 4))["covered"].mean() # So it turns out that on a macro scale, the coverage of this simple model is # quite good, but if we split by number of crashes, the coverage isn't so good # anymore for the cells with higher number of crashes. # # Hence, including the number of crashes in a vicinity could be an relevant # predictor for the probability of crash with injury. # --- # ## Original computing environment # !date -R # !uname -a # !pip freeze
30.896396
85
0.697478
1,064
6,859
4.446429
0.327068
0.018601
0.022194
0.010991
0.146269
0.113295
0.103149
0.103149
0.09089
0.059184
0
0.021118
0.178452
6,859
221
86
31.036199
0.818279
0.460417
0
0.245614
0
0
0.170799
0.006887
0
0
0
0
0
1
0
false
0
0.061404
0
0.061404
0.017544
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
794737a97c176c9f701f94c89a9d3fa6ea1cba13
601
py
Python
python/cartpole1.py
lusing/mljs
4c708bb8e0759803ed94ead3e9cfadc3a97d6ed8
[ "MIT" ]
null
null
null
python/cartpole1.py
lusing/mljs
4c708bb8e0759803ed94ead3e9cfadc3a97d6ed8
[ "MIT" ]
null
null
null
python/cartpole1.py
lusing/mljs
4c708bb8e0759803ed94ead3e9cfadc3a97d6ed8
[ "MIT" ]
null
null
null
import gym def cartpole(): environment = gym.make('CartPole-v1') environment.reset() for i in range(1000): # environment.render() action = environment.action_space.sample() observation, reward, done, info = environment.step(action) print("Step {}:".format(i)) print("action: {}:".format(action)) print('observation: {}'.format(observation)) print('reward: {}'.format(reward)) print('done: {}'.format(done)) print('info: {}'.format(info)) if done: break if __name__ == '__main__': cartpole()
28.619048
66
0.577371
61
601
5.540984
0.459016
0.065089
0
0
0
0
0
0
0
0
0
0.011161
0.254576
601
20
67
30.05
0.743304
0.033278
0
0
0
0
0.136442
0
0
0
0
0
0
1
0.058824
false
0
0.058824
0
0.117647
0.352941
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
794855d07b967464fa463b2ba9dd7683a00f2311
3,466
py
Python
kw3pan/pancakeswap/factory/core/pancakeswap_factory.py
kkristof200/py_web3_pancakeswap
ae9dc7021b7da2365ce675f29f89e103fe44d77f
[ "MIT" ]
6
2021-05-09T12:43:37.000Z
2021-12-07T01:56:02.000Z
kw3pan/pancakeswap/factory/core/pancakeswap_factory.py
kkristof200/py_web3_pancakeswap
ae9dc7021b7da2365ce675f29f89e103fe44d77f
[ "MIT" ]
null
null
null
kw3pan/pancakeswap/factory/core/pancakeswap_factory.py
kkristof200/py_web3_pancakeswap
ae9dc7021b7da2365ce675f29f89e103fe44d77f
[ "MIT" ]
null
null
null
# ------------------------------------------------------------ Imports ----------------------------------------------------------- # # System from typing import Optional # Pip from kw3 import WrappedContract, Web3 from kw3.constants import Constants as KW3Constants # Local from ._abi import pancakeswap_factory_abi from ...liquidity_pool import PancakeswapLiquidityPool, PancakeswapBusdLiquidityPool, PancakeswapWbnbLiquidityPool from ...constants import Constants # -------------------------------------------------------------------------------------------------------------------------------- # # --------------------------------------------------- class: PancakeswapFactory -------------------------------------------------- # class PancakeswapFactory(WrappedContract): # --------------------------------------------------------- Init --------------------------------------------------------- # def __init__( self, web3: Web3 ): super().__init__( web3=web3, address=Constants.ADDRESS_PANCAKESWAP_FACTORY, abi=pancakeswap_factory_abi ) # ---------------------------------------------------- Public methods ---------------------------------------------------- # # Forwarders def liquidityPoolAddressesLength(self) -> int: return self.functions.allPairsLength().call() def liquidityPoolAddressAtIndex( self, index: int ) -> str: return self.functions.allPairs(index).call() def liquidityPoolAtIndex( self, index: int ) -> PancakeswapLiquidityPool: return PancakeswapBusdLiquidityPool( web3=self._web3, address=self.liquidityPoolAddressAtIndex( index=index ) ) # Custom def getPairAddress( self, address0: str, address1: str ) -> Optional[str]: return self.functions.getPair( Web3.toChecksumAddress(address0), Web3.toChecksumAddress(address1) ).call() def getPair( self, address0: str, address1: str ) -> Optional[PancakeswapLiquidityPool]: return self.__getPair( PancakeswapLiquidityPool, address0=address0, address1=address1 ) def getWbnbPair( self, token_address: str ) -> Optional[PancakeswapWbnbLiquidityPool]: return self.__getPair( PancakeswapWbnbLiquidityPool, address0=KW3Constants.WBNB.ADDRESS, address1=token_address ) def getBusdPair( self, token_address: str ) -> Optional[PancakeswapBusdLiquidityPool]: return self.__getPair( PancakeswapBusdLiquidityPool, address0=KW3Constants.BUSD.ADDRESS, address1=token_address ) # ---------------------------------------------------- Private methods --------------------------------------------------- # def __getPair( self, _type, address0: str, address1: str ) -> Optional[PancakeswapLiquidityPool]: pair_address = self.getPairAddress(address0, address1) return _type( self._web3, pair_address ) if pair_address else None # -------------------------------------------------------------------------------------------------------------------------------- #
28.409836
132
0.467398
206
3,466
7.694175
0.291262
0.037855
0.039748
0.04164
0.126183
0.092114
0
0
0
0
0
0.012644
0.246971
3,466
122
133
28.409836
0.594636
0.265147
0
0.308642
0
0
0
0
0
0
0
0
0
1
0.111111
false
0
0.074074
0.08642
0.296296
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
794b0eee657db516c725d2d35f15819da5d490ca
17,648
py
Python
functions_for_AirBnB.py
dalpengholic/Udacity_Boston-AirBNB-Data
ef918f4ddf8041a9f646e6fe786730f191746c2b
[ "MIT" ]
null
null
null
functions_for_AirBnB.py
dalpengholic/Udacity_Boston-AirBNB-Data
ef918f4ddf8041a9f646e6fe786730f191746c2b
[ "MIT" ]
null
null
null
functions_for_AirBnB.py
dalpengholic/Udacity_Boston-AirBNB-Data
ef918f4ddf8041a9f646e6fe786730f191746c2b
[ "MIT" ]
null
null
null
# The collection of functions for the Boston AirBnB dataset # import necessary libraries import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns from pandas.tseries.holiday import USFederalHolidayCalendar as calendar #To check holidays in the U.S import time import copy def load_bnb_files(): '''Load AirBnB files''' df_listing = pd.read_csv('./data/listings.csv') df_calendar = pd.read_csv('./data/calendar.csv') return df_listing, df_calendar # Modify df_calendar for future work # Special event : marathon, new academic season def modify_calendar(df_calendar): ''' This function creates 'year', 'month', 'day', 'weekday', and 'week_number' columns from 'date' coulmn of df_calendar and remove '$' string from 'price' coulmn. Input : a Pandas dataframe having a date data column Output : a Pandas dataframe having year, month, day, weekday, us_holiday columns ''' # Split date column into year, month,day, weekday columns # The day of the week with Monday=0, Sunday=6 # Set the range of weekends from Friday to Sunday df_calendar['year'] = pd.DatetimeIndex(df_calendar['date']).year df_calendar['month'] = pd.DatetimeIndex(df_calendar['date']).month df_calendar['day'] = pd.DatetimeIndex(df_calendar['date']).day df_calendar['weekday'] = pd.DatetimeIndex(df_calendar['date']).weekday df_calendar['week_number'] = pd.DatetimeIndex(df_calendar['date']).week df_calendar['price']= df_calendar['price'].str.replace('$','') df_calendar['price']=df_calendar['price'].str.replace(',','') df_calendar['price'] = df_calendar['price'].astype(float) # Add us_holiday column cal = calendar() holidays = cal.holidays(start=df_calendar.date.min(), end=df_calendar.date.max()) df_calendar['us_holiday'] = df_calendar.date.astype('datetime64').isin(holidays) # Add weekend column #Friday, Saturday weekend = [4,5] df_calendar['weekend'] = df_calendar.weekday.isin(weekend) # Replace values in weekday column df_calendar['weekday'].replace({0:'Monday', 1:'Tuesday', 2:'Wednesday', 3:'Thursday',4:'Friday', 5:'Saturday', 6:'Sunday'}, inplace=True) return df_calendar def add_availabledays_price(df_listing, df_cal_modified): ''' This function creates the columns of 'unavail_days', 'avail_days_weekends', 'avail_days_weekdays', 'price_weekend', and 'price_weekday' where calculated from df_cal_modified on df_listing. Input : - A Pandas dataframe made from 'listings.csv' : df_listing - A pandas dataframe modified by modify_calendar() : df_cal_modified Output : - The modified df_listing dataframe with new 'unavail_days', 'avail_days_weekends', 'avail_days_weekdays', 'price_weekend', and 'price_weekday' columns ''' id_list = df_listing.id[:] unavailable_days_array = np.array([]) avail_days_weekends_array = np.array([]) avail_days_weekdays_array = np.array([]) price_weekend_array = np.array([]) price_weekday_array = np.array([]) for i in np.nditer(id_list): tmp = df_cal_modified[(df_cal_modified.listing_id == i)] # Make a dataframe coming from df_listing with a certain id available_dict = tmp.available.value_counts().to_dict() if 'f' in available_dict: unavailable_days = tmp[tmp.available == 'f'].shape[0] else: unavailable_days = 0 if 't' in available_dict: available_weekends = tmp[(tmp.available == 't') & (tmp.weekend == True)].shape[0] available_weekdays = tmp[(tmp.available == 't') & (tmp.weekend == False)].shape[0] price_weekend = tmp[(tmp.weekend == True) & (tmp.available == 't')].price.astype(float).describe()['mean'] price_weekday = tmp[(tmp.weekend == False) & (tmp.available == 't')].price.astype(float).describe()['mean'] else: available_weekends = 0 available_weekdays = 0 price_weekend = np.nan price_weekday = np.nan unavailable_days_array = np.append(unavailable_days_array, unavailable_days) avail_days_weekends_array = np.append(avail_days_weekends_array, available_weekends) avail_days_weekdays_array = np.append(avail_days_weekdays_array, available_weekdays) price_weekend_array = np.append(price_weekend_array, price_weekend) price_weekday_array = np.append(price_weekday_array, price_weekday) df_listing['unavail_days'] = pd.Series(unavailable_days_array) df_listing['avail_days_weekends'] = pd.Series(avail_days_weekends_array) df_listing['avail_days_weekdays'] = pd.Series(avail_days_weekdays_array) df_listing['price_weekend'] = pd.Series(price_weekend_array) df_listing['price_weekday'] = pd.Series(price_weekday_array) return df_listing def clean_listing_df(df_listing): ''' This function aims to make the df_listing dataframe for data analysis by - removing irrelevant columns - changing object type columns to numeric columns or manipulating them using one hot encoding - filling NaN values - creating an integrated_score_log column by the natural log of the result from 'review_scores_rating' times 'number_of_reviews' +1 Input : - A Pandas dataframe made from 'listings.csv' : df_listing Output : - Cleaned df_listing ''' # Drop columns having 50% of nan value. There were reasons that I decided 50% the threshold for dropping columns. # 1. Easy to see the dataframe and to check the meaning of the columns. # 2. Decide which ones have to be dropped. # The candidates columns to be dropped are 'notes', 'neighbourhood_group_cleansed', 'square_feet', 'weekly_price', 'monthly_price', 'security_deposit', 'has_availability', 'license', 'jurisdiction_names'. Most of them are duplicated to other columns or irrelavant except 'security_deposit' column. I didn't do imputing by the mean or mode of the column because it can distort real shape. I didn't do one-hot-encoding to make the dataframe straightforward. 'security_deposit' has 55 unique values. df_missing = df_listing.isna().mean() df_listing_modi1 = df_listing.drop(df_missing[df_missing>0.5].index.to_list(), axis=1) # Drop columns related with urls and other irrelevant columns. # url and othe columns are all unique or useless. remove_list1 = ['listing_url', 'scrape_id', 'last_scraped', 'thumbnail_url', 'medium_url', 'picture_url', 'xl_picture_url', 'host_url', 'host_thumbnail_url', 'host_picture_url', 'country_code', 'country'] df_listing_modi1.drop(remove_list1, axis=1, inplace=True) # Drop the columns because of data overlap [city, smart_location], Only one value [state], # Drop the wrong data [market, calendar_last_scraped] remove_list2 = ['smart_location', 'state', 'name', 'summary', 'space', 'description','neighborhood_overview', 'transit','access','market','calendar_last_scraped'] df_listing_modi1.drop(remove_list2, axis=1, inplace=True) # Modify 'house_rules' column to 'house_rules_exist_tf' having True value if there is a rule. # False value, if there is no rule. # Houes_rules are different for every host. So it is not practical to use one-hot-encoding. Instead of that, # It is changed to binary type, which is there is rule in a house, True, otherwise, False. # This can save some information, which is better than just dropping. df_listing_modi1['house_rules_exist_tf']= pd.notna(df_listing_modi1.house_rules) df_listing_modi1.drop(['house_rules'], axis=1, inplace=True) # Remove columns having 1000 unique string valuses and irrelevant data remove_list3 = ['interaction', 'host_name', 'host_since', 'host_about', 'street','first_review','experiences_offered','requires_license', 'last_review','host_location','neighbourhood_cleansed','experiences_offered','requires_license'] df_listing_modi2 = df_listing_modi1.drop(remove_list3, axis=1) # Change the columns 'host_response_rate', 'host_acceptance_rate' to float type columns_change_type = ['host_response_rate','host_acceptance_rate', 'price', 'cleaning_fee'] for i in columns_change_type: df_listing_modi2[i] = df_listing_modi2[i].str.replace('%','') df_listing_modi2[i] = df_listing_modi2[i].str.replace('$','') df_listing_modi2[i] = df_listing_modi2[i].str.replace(',','') df_listing_modi2[i] = df_listing_modi2[i].astype(float) # Modify and Split values in 'amenities' column # Amenities can be one of reason that potential candidate might consider. df_listing_modi2.amenities = df_listing_modi2.amenities.str.replace("[{}]", "") df_amenities = df_listing_modi2.amenities.str.get_dummies(sep = ",") df_amenities = df_amenities.add_prefix('amenities_') df_listing_modi2 = pd.concat([df_listing_modi2, df_amenities], axis=1) df_listing_modi2 = df_listing_modi2.drop('amenities', axis=1) # Use get_dummies for columns having unique values less then 10 # It is reasonable to use one-hot-encoding if the nunber of unique values are less then 10. # It doesn't lose information, and keep the dataframe simple. columns_of_object_less10 =[] for i,j in zip(df_listing_modi2.columns.to_list(), df_listing_modi2.dtypes.to_list()): if j == object and len(df_listing_modi2[i].value_counts()) < 10 : columns_of_object_less10.append(i) df_listing_modi2 = pd.get_dummies(df_listing_modi2, columns=columns_of_object_less10, prefix=columns_of_object_less10, dummy_na=True) # Modify 'extra_people' coulmn to get boolean type of 'extra_people_fee_tf' # Instead of dropping, I decided to change 'extra_people' coulmn to binary type to save some information df_listing_modi2['extra_people'] = df_listing_modi2['extra_people'].astype(str) df_listing_modi2['extra_people']= df_listing_modi2['extra_people'].str.replace('$','') df_listing_modi2['extra_people']=df_listing_modi2['extra_people'].str.replace(',','') df_listing_modi2['extra_people'] = df_listing_modi2['extra_people'].astype(float) df_listing_modi2['extra_people'] = df_listing_modi2['extra_people'].replace(to_replace=0, value=np.nan) df_listing_modi2['extra_people_fee_tf']= pd.notna(df_listing_modi2.extra_people) df_listing_modi2 = df_listing_modi2.drop('extra_people', axis=1) # Modify and Split values in 'host_verifications' column df_listing_modi2.host_verifications = df_listing_modi2.host_verifications.str.replace("[", "") df_listing_modi2.host_verifications = df_listing_modi2.host_verifications.str.replace("]", "") df_host_verifications = df_listing_modi2.host_verifications.str.get_dummies(sep = ",") df_host_verifications = df_host_verifications.add_prefix('host_verification_') df_listing_modi2 = pd.concat([df_listing_modi2, df_host_verifications], axis=1) df_listing_modi2 = df_listing_modi2.drop(['host_verifications'], axis=1) df_listing_modi2 = df_listing_modi2.drop(['host_neighbourhood'], axis=1) # Modify 'calendar_updated' column # Instead of dropping, I decided to change 'calendar_updated' coulmn to binary type (updated within a week or not) # to save some information df_listing_modi2["calendar_updated_1weekago"] = np.where(df_listing_modi2['calendar_updated'].str.contains( "days|yesterday|today|a week ago")==True, 'yes', 'more_than_1week') df_listing_modi2 = df_listing_modi2.drop(['calendar_updated'], axis=1) # Use get_dummies for the columns 'neighbourhood', 'city', 'zipcode', 'property_type' tmp = df_listing_modi2.columns.to_list() tmp1 = df_listing_modi2.dtypes.to_list() columns_of_object_over10 =[] for i,j in zip(tmp,tmp1): if j == object and len(df_listing_modi2[i].value_counts()) > 10 : columns_of_object_over10.append(i) df_listing_modi2 = pd.get_dummies(df_listing_modi2, columns=columns_of_object_over10, prefix=columns_of_object_over10, dummy_na=True) df_listing_modi2 = pd.get_dummies(df_listing_modi2, columns=['calendar_updated_1weekago','house_rules_exist_tf','extra_people_fee_tf'], prefix=['calendar_updated_1weekago','house_rules_exist_tf','extra_people_fee_tf'], dummy_na=True) df_listing_modi2["host_response_rate_100"] = np.where(df_listing_modi2['host_response_rate'] ==100, True, False) df_listing_modi2["host_acceptance_rate_100"] = np.where(df_listing_modi2['host_acceptance_rate'] ==100, True, False) df_listing_modi2 = df_listing_modi2.drop(['host_response_rate','host_acceptance_rate','reviews_per_month'], axis=1) # bathrooms, bedrooms, beds, cleaning_fee, review_scores_rating, review_... : : fillna with mean value # The empty cell are filled with mean values of corresponding columns. Because these are numerical type, # I thought imputing with mean values is better than dropping or one-hot-encoding columns1 = ['bathrooms','bedrooms','beds','cleaning_fee','review_scores_rating','review_scores_accuracy','review_scores_cleanliness','review_scores_checkin', 'review_scores_communication','review_scores_location','review_scores_value'] df_listing_modi2[columns1] = df_listing_modi2[columns1].fillna(df_listing_modi2.mean()) df_listing_modi2.price_weekend.fillna(df_listing_modi2.price, inplace=True) df_listing_modi2.price_weekday.fillna(df_listing_modi2.price, inplace=True) df_listing_modi2['integrated_score_log'] = np.log(df_listing_modi2['review_scores_rating']*df_listing_modi2['number_of_reviews']+1) df_listing_modi2 = pd.get_dummies(df_listing_modi2, columns=['host_response_rate_100','host_acceptance_rate_100'], prefix=['host_response_rate_100','host_acceptance_rate_100']) df_listing_modi2 = df_listing_modi2.drop(['id', 'host_id', 'latitude', 'longitude','price','host_listings_count','host_total_listings_count','maximum_nights'], axis=1) return df_listing_modi2 def conditioning_listing_df(df_listing_modi2): ''' This function is for conditioning a dataframe returned by the funtion 'clean_listing_df(df_listing)'' Input : - A Pandas dataframe came from the function 'clean_listing_df(df_listing)'' Output : - Cleaned df_listing_modi2 : df_listing_modi3 ''' threshold_80 = df_listing_modi2.integrated_score_log.quantile(0.8) condition = [df_listing_modi2['integrated_score_log'] == 0, df_listing_modi2['integrated_score_log'] >= threshold_80] label_list = ['poor','high'] df_listing_modi2['y_label'] = np.select(condition, label_list, default='normal') # Drop columns related to 'y_label' column # Without dropping, the remained columns affect model's prediction df_listing_modi3 = df_listing_modi2.drop(['integrated_score_log','number_of_reviews','review_scores_rating', 'review_scores_value', 'review_scores_communication','review_scores_accuracy','review_scores_checkin','review_scores_cleanliness', 'review_scores_location', 'availability_30','availability_60', 'availability_90','availability_365','calculated_host_listings_count'], axis=1) return df_listing_modi3 def investigate(df_listing_scaled, pca, i): ''' This function checks pca components that which original features are storngly related to a pca component Input : - Dataframe : df_listing_scaled a dataframe scaled by StandardScaler() - pca instance - i : The number of pca component Output : - pos_list : Original features having positive relationship with a corresponding pca component,which are sorted in order of importance - neg_list : Original features having positive relationship with a corresponding pca component,which are sorted in order of importance ''' pos_list =[] neg_list =[] feature_names = list(df_listing_scaled.columns) weights_pca = copy.deepcopy(pca.components_[i]) combined = list(zip(feature_names, weights_pca)) combined_sorted= sorted(combined, key=lambda tup: tup[1], reverse=True) tmp_list = [list(x) for x in combined_sorted] tmp_list = [(x[0],"{0:.3f}".format(x[1])) for x in tmp_list] print("positive to pca{}:".format(i), tmp_list[0:10]) print() print("negative to pca{}:".format(i), tmp_list[-1:-11:-1]) print() for j in range(0,10): pos_list.append(tmp_list[j][0]) for k in range(1,11): neg_list.append(tmp_list[-k][0]) return pos_list, neg_list def check_difference(pos_list, neg_list, df_listing_poor, df_listing_high): ''' Print original features that are stongly related with a corresponding pca component. ''' data_pos = [[df_listing_high[x].mean(), df_listing_poor[x].mean()] for x in pos_list] data_neg = [[df_listing_high[x].mean(), df_listing_poor[x].mean()] for x in neg_list] tmp_pos = pd.DataFrame(data=data_pos , index=pos_list, columns=['high', 'poor']) tmp_neg = pd.DataFrame(data=data_neg , index=neg_list, columns=['high', 'poor']) tmp_both = pd.concat([tmp_pos, tmp_neg]) tmp_both["difference"] = tmp_both.high - tmp_both.poor tmp_both["difference"] = tmp_both["difference"].abs() result = tmp_both.sort_values(by=['difference'], ascending=False) return result
54.807453
501
0.716228
2,438
17,648
4.889664
0.183757
0.095126
0.099824
0.019126
0.383022
0.28714
0.233957
0.20812
0.178592
0.159718
0
0.016807
0.174014
17,648
321
502
54.978193
0.800988
0.301111
0
0.023529
0
0
0.189481
0.04878
0
0
0
0
0
1
0.041176
false
0
0.041176
0
0.123529
0.023529
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
794b69e64ae775672890ac0f8ee3c75b24418261
2,898
py
Python
src/junction/markdown/info_panels.py
explody/Junction
700df9385fceda00d6830816606d8854dc9cef7b
[ "MIT" ]
16
2020-04-28T07:03:26.000Z
2022-03-05T14:26:40.000Z
src/junction/markdown/info_panels.py
explody/Junction
700df9385fceda00d6830816606d8854dc9cef7b
[ "MIT" ]
14
2020-03-19T04:32:18.000Z
2021-03-05T23:54:47.000Z
src/junction/markdown/info_panels.py
explody/Junction
700df9385fceda00d6830816606d8854dc9cef7b
[ "MIT" ]
3
2021-01-19T18:39:00.000Z
2022-02-14T23:51:07.000Z
from typing import List, Any from markdown import Markdown from markdown.extensions import Extension from markdown.blockprocessors import BlockProcessor import re import xml.etree.ElementTree as etree class InfoPanelExtension(Extension): """Markdown extension for rendering the Confluence info panel macro. Only supports the "original" info panels AKA info (blue), success (green), warning (yellow), and error (red). Example: ``` Normal, introductory paragraph. Warning: info panels like this must be isolated into their own blocks with surrounding blank lines. This will be a plain old paragraph, and not included in the warning above. ``` """ def extendMarkdown(self, md: Markdown) -> None: md.registerExtension(self) md.parser.blockprocessors.register( InfoPanelBlockProcessor( "Info:", "info", "42afc5c4-fb53-4483-9f1a-a87a7ad033e6", md.parser ), "info-panel", 25, ) md.parser.blockprocessors.register( InfoPanelBlockProcessor( "Success:", "tip", "d60a142d-bc62-4f37-a091-7254c4472bdf", md.parser ), "success-panel", 25, ) md.parser.blockprocessors.register( InfoPanelBlockProcessor( "Warning:", "note", "9e14a573-943e-4691-919b-a9f6a389da71", md.parser ), "warning-panel", 25, ) md.parser.blockprocessors.register( InfoPanelBlockProcessor( "Error:", "warning", "2e759c9c-11f1-4959-82e7-901a2dc737d7", md.parser ), "error-panel", 25, ) class InfoPanelBlockProcessor(BlockProcessor): def __init__( self, prefix: str, name: str, macro_id: str, *args: Any, **kwargs: Any ): self._prefix = prefix self._block_re = re.compile( r"\s*{}.*".format(prefix), re.MULTILINE | re.DOTALL | re.VERBOSE ) self._name = name self._macro_id = macro_id super().__init__(*args, **kwargs) def test(self, parent: etree.Element, block: str) -> bool: return bool(self._block_re.match(block)) def run(self, parent: etree.Element, blocks: List[str]) -> None: raw_content = blocks.pop(0).lstrip(self._prefix).lstrip() info_panel = etree.SubElement( parent, "ac:structured-macro", { "ac:name": self._name, "ac:schema-version": "1", "ac:macro-id": self._macro_id, }, ) rich_text_body = etree.SubElement(info_panel, "ac:rich-text-body") self.parser.parseChunk(rich_text_body, raw_content) info_panel.tail = "\n" def makeExtension(**kwargs: Any) -> InfoPanelExtension: return InfoPanelExtension(**kwargs)
32.931818
103
0.596963
301
2,898
5.644518
0.448505
0.037669
0.05415
0.072984
0.139494
0.10771
0.10771
0
0
0
0
0.046715
0.29089
2,898
87
104
33.310345
0.780049
0.138716
0
0.242424
0
0
0.129019
0.058608
0
0
0
0
0
1
0.075758
false
0
0.090909
0.030303
0.227273
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
794c1314bf22e9986c1038e23ccfa6cf2ec03b66
5,096
py
Python
ppo.py
ajleite/basic-ppo
e9d823275dda3c376e3e0f7d66e8dfb815b434d8
[ "MIT" ]
2
2020-06-27T11:44:19.000Z
2022-01-11T21:23:01.000Z
ppo.py
ajleite/basic-ppo
e9d823275dda3c376e3e0f7d66e8dfb815b434d8
[ "MIT" ]
null
null
null
ppo.py
ajleite/basic-ppo
e9d823275dda3c376e3e0f7d66e8dfb815b434d8
[ "MIT" ]
null
null
null
#!/usr/bin/python3 # Copyright 2019 Abe Leite # Based on "Proximal Policy Optimization Algorithms", Schulman et al 2017 # For the benefit of my fellow CSCI-B 659 students # While I hope that this code is helpful I will not vouch for its total accuracy; # my primary aim here is to elucidate the ideas from the paper. import sys import tensorflow as tf import gym ACTORS = 8 N_CYCLES = 10000 LEARNING_RATE = 0.00025 CYCLE_LENGTH = 128 BATCH_SIZE = CYCLE_LENGTH*ACTORS CYCLE_EPOCHS = 3 MINIBATCH = 32*ACTORS GAMMA = 0.99 EPSILON = 0.1 class DiscretePPO: def __init__(self, V, pi): ''' V and pi are both keras (Sequential)s. V maps state to single scalar value; pi maps state to discrete probability distribution on actions. ''' self.V = V self.pi = pi self.old_pi = tf.keras.models.clone_model(self.pi) self.optimizer = tf.keras.optimizers.Adam(LEARNING_RATE) @tf.function def pick_action(self, S): return tf.random.categorical(self.pi(tf.expand_dims(S,axis=0)), 1)[0,0] @tf.function def train_minibatch(self, SARTS_minibatch): S, A, R, T, S2 = SARTS_minibatch next_V = tf.where(T, tf.zeros((MINIBATCH,)), self.V(S2)) next_V = tf.stop_gradient(next_V) advantage = R + GAMMA * next_V - self.V(S) V_loss = tf.reduce_sum(advantage ** 2) V_gradient = tf.gradients(V_loss, self.V.weights) self.optimizer.apply_gradients(zip(V_gradient, self.V.weights)) ratio = tf.gather(self.pi(S), A, axis=1) / tf.gather(self.old_pi(S), A, axis=1) confident_ratio = tf.clip_by_value(ratio, 1-EPSILON, 1+EPSILON) current_objective = ratio * advantage confident_objective = confident_ratio * advantage PPO_objective = tf.where(current_objective < confident_objective, current_objective, confident_objective) PPO_objective = tf.reduce_mean(PPO_objective) pi_gradient = tf.gradients(-PPO_objective, self.pi.weights) self.optimizer.apply_gradients(zip(pi_gradient, self.pi.weights)) @tf.function def train(self, SARTS_batch): S, A, R, T, S2 = SARTS_batch for _ in range(CYCLE_EPOCHS): # shuffle and split into minibatches! shuffled_indices = tf.random.shuffle(tf.range(BATCH_SIZE)) num_mb = BATCH_SIZE // MINIBATCH for minibatch_indices in tf.split(shuffled_indices, num_mb): mb_SARTS = (tf.gather(S, minibatch_indices), tf.gather(A, minibatch_indices), tf.gather(R, minibatch_indices), tf.gather(T, minibatch_indices), tf.gather(S2, minibatch_indices)) self.train_minibatch(mb_SARTS) for old_pi_w, pi_w in zip(self.old_pi.weights, self.pi.weights): old_pi_w.assign(pi_w) def train_PPO(agent, envs, render=False): episode_returns = [] current_episode_returns = [0 for env in envs] last_s = [env.reset() for env in envs] for _ in range(N_CYCLES): SARTS_samples = [] next_last_s = [] next_current_episode_returns = [] for env, s, episode_return in zip(envs, last_s, current_episode_returns): for _ in range(CYCLE_LENGTH): a = agent.pick_action(s).numpy() s2, r, t, _ = env.step(a) if render: env.render() episode_return += r SARTS_samples.append((s,a,r,t,s2)) if t: episode_returns.append(episode_return) print(f'Episode {len(episode_returns):3d}: {episode_return}') episode_return = 0 s = env.reset() else: s = s2 next_last_s.append(s) next_current_episode_returns.append(episode_return) last_s = next_last_s current_episode_returns = next_current_episode_returns SARTS_batch = [tf.stack(X, axis=0) for X in zip(*SARTS_samples)] agent.train(SARTS_batch) def make_agent(env): obs_shape = env.observation_space.shape n_actions = env.action_space.n V = tf.keras.Sequential([tf.keras.layers.InputLayer(input_shape=obs_shape), tf.keras.layers.Dense(400, activation='relu'), tf.keras.layers.Dense(300, activation='relu'), tf.keras.layers.Dense(1)]) pi = tf.keras.Sequential([tf.keras.layers.InputLayer(input_shape=obs_shape), tf.keras.layers.Dense(400, activation='relu'), tf.keras.layers.Dense(300, activation='sigmoid'), tf.keras.layers.Dense(n_actions, activation='softmax')]) return DiscretePPO(V, pi) if __name__ == '__main__': if len(sys.argv) < 2: print('Usage: python ppo.py <Env-V*> (--render)') envs = [gym.make(sys.argv[1]) for _ in range(ACTORS)] agent = make_agent(envs[0]) train_PPO(agent, envs, '--render' in sys.argv)
40.768
113
0.615385
687
5,096
4.36099
0.28821
0.028037
0.034713
0.036048
0.197597
0.122163
0.082777
0.082777
0.082777
0.082777
0
0.019325
0.279042
5,096
124
114
41.096774
0.796135
0.094388
0
0.05
0
0
0.029052
0.005679
0
0
0
0
0
1
0.06
false
0
0.03
0.01
0.12
0.02
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
794c7683b545a543ae42b9c3d18137a15b824634
2,620
py
Python
youtube_dl/views.py
Shovon588/api_collection
f348ffa8dc5c4dc69ba4c2a7d145c71e8273e0a2
[ "MIT" ]
null
null
null
youtube_dl/views.py
Shovon588/api_collection
f348ffa8dc5c4dc69ba4c2a7d145c71e8273e0a2
[ "MIT" ]
null
null
null
youtube_dl/views.py
Shovon588/api_collection
f348ffa8dc5c4dc69ba4c2a7d145c71e8273e0a2
[ "MIT" ]
null
null
null
from pytube import YouTube from rest_framework import status from rest_framework.response import Response from rest_framework.views import APIView from .serializers import YoutubeDLSerializer from .utils import make_time, make_size class YoutubeDL(APIView): serializer_class = YoutubeDLSerializer def post(self, request): serializer = self.serializer_class(data=request.data) if serializer.is_valid(): url = serializer.validated_data.get("url") try: file = YouTube(url) except: return Response({ "status": "failed", "message": "Invalid url", }, status=status.HTTP_404_NOT_FOUND) videos = file.streams thumbnail = file.thumbnail_url title = file.title duration = make_time(file.length) video_res = { "1080p": None, "720p": None, "480p": None, "360p": None, "240p": None, "144p": None } aud_size = 0 audio = None for video in videos: if video.resolution in video_res and video_res[video.resolution] is None: video_res[video.resolution] = {"resolution": video.resolution, "video_type": video.subtype, "size": make_size(video.filesize), "url": video.url} if video.type == "audio": if video.filesize > aud_size: audio = video aud_size = video.filesize video_data = [value for key, value in video_res.items() if value is not None] audio_data = None if audio is not None: audio_type = audio.subtype size = make_size(audio.filesize) url = audio.url audio_data = {"audio_type": audio_type, "size": size, "url": url} return Response({ "status": "success", "message": "Got some data.", "title": title, "duration": duration, "thumbnail": thumbnail, "video_data": video_data, }, status=status.HTTP_200_OK) return Response({"status": "failed", "message": "Something went wrong.", "error": serializer.errors}, status=status.HTTP_400_BAD_REQUEST)
34.933333
111
0.500763
247
2,620
5.157895
0.327935
0.031397
0.040031
0.040816
0.051805
0
0
0
0
0
0
0.018831
0.412214
2,620
74
112
35.405405
0.808442
0
0
0.032787
0
0
0.083206
0
0
0
0
0
0
1
0.016393
false
0
0.098361
0
0.196721
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
794d1639433e46b61c5a932940ac1015c7fa9ee0
593
py
Python
tomolab/DataSources/PET/__init__.py
TomographyLab/TomoLab
86b9a5894ef1660d7f4de39f560f1f92024b40f8
[ "Apache-2.0" ]
5
2019-06-01T13:16:00.000Z
2022-03-02T10:21:59.000Z
tomolab/DataSources/PET/__init__.py
TomographyLab/TomoLab
86b9a5894ef1660d7f4de39f560f1f92024b40f8
[ "Apache-2.0" ]
null
null
null
tomolab/DataSources/PET/__init__.py
TomographyLab/TomoLab
86b9a5894ef1660d7f4de39f560f1f92024b40f8
[ "Apache-2.0" ]
1
2019-06-01T13:19:18.000Z
2019-06-01T13:19:18.000Z
# -*- coding: utf-8 -*- # tomolab # Michele Scipioni # Harvard University, Martinos Center for Biomedical Imaging # University of Pisa __all__ = ['convert_listmode_dicom_to_interfile', 'import_interfile_projection', 'export_interfile_projection', 'import_h5f_projection', 'import_interfile_volume', 'export_interfile_volume'] from .PET_listmode import convert_listmode_dicom_to_interfile from .PET_sinogram import import_interfile_projection, export_interfile_projection, import_h5f_projection from .PET_volume import import_interfile_volume, export_interfile_volume
37.0625
105
0.812816
69
593
6.478261
0.42029
0.134228
0.089485
0.098434
0.635347
0.496644
0.308725
0.308725
0.308725
0
0
0.005747
0.11973
593
15
106
39.533333
0.850575
0.209106
0
0
0
0
0.337662
0.337662
0
0
0
0
0
1
0
false
0
0.833333
0
0.833333
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
1
0
0
2
794d44a2cc74842f8b8d00f81d2ce675f076304a
5,043
py
Python
coot/data/ht100m_dataset.py
Jabb0/coot-videotext
2da20a3f3a50b69677e59869b02cbd72945913d9
[ "Apache-2.0" ]
null
null
null
coot/data/ht100m_dataset.py
Jabb0/coot-videotext
2da20a3f3a50b69677e59869b02cbd72945913d9
[ "Apache-2.0" ]
null
null
null
coot/data/ht100m_dataset.py
Jabb0/coot-videotext
2da20a3f3a50b69677e59869b02cbd72945913d9
[ "Apache-2.0" ]
null
null
null
import json import pandas as pd import numpy as np from typing import Union, List from pathlib import Path from timeit import default_timer as timer from nntrainer import data as nn_data def _time_to_seconds(time_column): return pd.to_timedelta(time_column).dt.total_seconds() class HT100MBaseDataset: """ Dataloader for HowTo100M dataset. Based on the index csv file of the HT100M dataset this builds a wrapper around the file structure to return individual files. """ def __init__(self, dataset_root: Union[str, Path], metadata_name: str, split=None): """ Setup the dataset Args: dataset_root: path to the dataset folder metadata_name: identifier of the metadata to use. Will select the files we want to use. split: identifier of the split to use or "ALL"/None to use all data """ dataset_root = Path(dataset_root) # Read the CSV file containing information about the videos # Format is: # video_id, category_1, category_2, rank, task_id # This is used as lookup table of the existing videos csv = dataset_root.joinpath(f"meta_{metadata_name}.csv") self._metadata_csv = pd.read_csv(csv, usecols=["video_id", "split"], index_col="video_id") if split is not None and split != nn_data.DataSplitConst.ALL: self._metadata_csv = self._metadata_csv[self._metadata_csv["split"] == split] metadata_path = dataset_root.joinpath("metadata.json") if not metadata_path.exists(): raise RuntimeError(f"metadata.json for HT100M dataset not found! Path: {dataset_root}") self._metadata = json.load(metadata_path.open("rt", encoding="utf8")) self._fps = self._metadata["fps"] self._caption_root = dataset_root.joinpath("captions") # Get all available caption files self._keys = self._metadata_csv.index.to_list() # Check the dataset integrity. I.e. if all caption csv files for every index are available if not self.check_integrity(): raise RuntimeError("HT100MDataset: There are data_keys for which the features are not available!") def check_integrity(self) -> bool: """ Checks if caption files for all keys exist. This is crucial for the integrity of the dataset. Returns: True if dataset integrity is correct. """ timer_start = timer() available_keys = set([x.stem for x in self._caption_root.glob("*.csv")]) print(f"Took {timer() - timer_start:.1f} seconds for scanning caption directory. " f"Found {len(self._keys)} videos.") missing_keys = set(self._keys).difference(available_keys) keys_are_missing = len(missing_keys) != 0 if keys_are_missing: print(f"There are {len(missing_keys)} missing keys. First 10: {list(missing_keys)[:10]}") return not keys_are_missing def _read_caption_csv(self, video_id: str) -> (List[str], List[float], List[float]): cap_csv = pd.read_csv(self._caption_root.joinpath(video_id + ".csv"), usecols=["start", "end", "text"], keep_default_na=False) cap_csv = cap_csv[ # Drop clips that have no subtitles/captions (cap_csv["text"].str.len() > 0) ] return (cap_csv['text'].tolist(), _time_to_seconds(cap_csv["start"]).tolist(), _time_to_seconds(cap_csv["end"]).tolist()) def __getitem__(self, video_id: str) -> List[str]: raise NotImplementedError("GetItem cannot be called on BaseDataset") def __len__(self): """ Returns len of dataset. I.e. number of videos. """ return len(self._keys) def keys(self): return self._keys def data_keys(self): return self._keys class HT100MCaptionDataset(HT100MBaseDataset): def __getitem__(self, video_id: str) -> List[str]: sentences, _, _ = self._read_caption_csv(video_id) return sentences class HT100MDataset(HT100MBaseDataset): def __init__(self, dataset_root: Union[str, Path], metadata_name: str, split: str, max_datapoints: int = -1): super(HT100MDataset, self).__init__(dataset_root, metadata_name, split=split) # reduce dataset size if request if max_datapoints > -1: self._keys = self._keys[:max_datapoints] print(f"Reduced number of datapoints to {len(self._keys)}") def __getitem__(self, key: str): sentences, starts, stops = self._read_caption_csv(key) # Drop the same items based on the filter as before return { "fps": self._fps, "data_key": key, "segments": [ { "text": text, "start_sec": start, "stop_sec": end } for (text, start, end) in zip(sentences, starts, stops) ] }
37.917293
113
0.628594
646
5,043
4.673375
0.289474
0.036436
0.024843
0.017887
0.109308
0.094733
0.05631
0.05631
0.035773
0.035773
0
0.011463
0.273448
5,043
132
114
38.204545
0.8125
0.197303
0
0.051948
0
0
0.145417
0.012545
0
0
0
0
0
1
0.142857
false
0
0.090909
0.038961
0.376623
0.038961
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
794d94442dfccd9fb0860ed1722ed3107bbed462
1,244
py
Python
qiime_16s/combine_collapsed_otu_tables.py
lotrus28/TaboCom
b67d66e4c410375a9efa08c5e637301e78e9204b
[ "Apache-2.0" ]
null
null
null
qiime_16s/combine_collapsed_otu_tables.py
lotrus28/TaboCom
b67d66e4c410375a9efa08c5e637301e78e9204b
[ "Apache-2.0" ]
null
null
null
qiime_16s/combine_collapsed_otu_tables.py
lotrus28/TaboCom
b67d66e4c410375a9efa08c5e637301e78e9204b
[ "Apache-2.0" ]
null
null
null
import sys import re import pandas as pd def combine_otu_tables(path_to_files): with open(path_to_files) as a: filenames = a.read().splitlines() separated = {re.search(r'ERR\d+?(?=_)',x).group(0):pd.read_table(x, sep = '\t', index_col = 1, header = None,engine='python') for x in filenames} indices = [list(x.index) for x in list(separated.values())] all_taxa = sum(indices,[]) all_taxa = list(set(all_taxa)) altogether = pd.DataFrame(None, columns = list(separated.keys()), index = all_taxa) for pat in separated: altogether[pat] = separated[pat][0] altogether = altogether.fillna(0) altogether['Mean'] = altogether.mean(axis = 1) if float(pd.__version__[:4]) >= 0.17: altogether = altogether.sort_values('Mean', axis = 0, ascending=False) else: altogether = altogether.sort('Mean', axis = 0, ascending=False) return(altogether.ix[:,:-1]) def main(): # list_of_files = 'temp2.txt' # output = 'combined.txt' list_of_files = sys.argv[1] output = sys.argv[2] combined = combine_otu_tables(list_of_files) print('Combining all OTU-tables') combined.to_csv(output, sep = '\t') if __name__ == "__main__": main()
30.341463
129
0.639871
173
1,244
4.398844
0.445087
0.036794
0.043364
0.047306
0.060447
0
0
0
0
0
0
0.015228
0.208199
1,244
40
130
31.1
0.75736
0.040997
0
0
0
0
0.055462
0
0
0
0
0
0
1
0.068966
false
0
0.103448
0
0.172414
0.034483
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
794e02a9abb8bfa43732422f20b7e7e3ffdaefeb
10,950
py
Python
bambinocampones/src/bambinocampones/website/migrations/0002_auto__add_field_galeria_foi_importante.py
ProfessionalIT/customers
3dbc1989bb3494fb6de7edad67dc59b7b0385ac3
[ "MIT" ]
null
null
null
bambinocampones/src/bambinocampones/website/migrations/0002_auto__add_field_galeria_foi_importante.py
ProfessionalIT/customers
3dbc1989bb3494fb6de7edad67dc59b7b0385ac3
[ "MIT" ]
1
2015-11-08T11:49:35.000Z
2015-11-08T11:49:43.000Z
bambinocampones/src/bambinocampones/website/migrations/0002_auto__add_field_galeria_foi_importante.py
ProfessionalIT/customers
3dbc1989bb3494fb6de7edad67dc59b7b0385ac3
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- from south.utils import datetime_utils as datetime from south.db import db from south.v2 import SchemaMigration from django.db import models class Migration(SchemaMigration): def forwards(self, orm): # Adding field 'Galeria.foi_importante' db.add_column(u'website_galeria', 'foi_importante', self.gf('django.db.models.fields.BooleanField')(default=False), keep_default=False) def backwards(self, orm): # Deleting field 'Galeria.foi_importante' db.delete_column(u'website_galeria', 'foi_importante') models = { u'website.calendario': { 'Meta': {'object_name': 'Calendario'}, 'data_agendamento': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2013, 12, 4, 0, 0)'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '200'}), 'tipo': ('django.db.models.fields.CharField', [], {'default': "u'E'", 'max_length': '1'}), 'titulo': ('django.db.models.fields.CharField', [], {'max_length': '200'}) }, u'website.cardapio': { 'Meta': {'object_name': 'Cardapio'}, 'ano': ('django.db.models.fields.CharField', [], {'default': "'2013'", 'max_length': '4'}), 'cardapio_file': ('django.db.models.fields.files.FileField', [], {'max_length': '100'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'mes': ('django.db.models.fields.CharField', [], {'default': "'12'", 'max_length': '2'}), 'tipo': ('django.db.models.fields.CharField', [], {'default': "u'1'", 'max_length': '1'}) }, u'website.conteudodownload': { 'Meta': {'object_name': 'ConteudoDownload'}, 'conteudo_file': ('django.db.models.fields.files.FileField', [], {'max_length': '100'}), 'descricao': ('django.db.models.fields.CharField', [], {'max_length': '200'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'miniatura': ('django.db.models.fields.files.ImageField', [], {'max_length': '100'}), 'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '200'}), 'tipo': ('django.db.models.fields.CharField', [], {'default': "u'1'", 'max_length': '1'}), 'titulo': ('django.db.models.fields.CharField', [], {'max_length': '200'}) }, u'website.depoimento': { 'Meta': {'object_name': 'Depoimento'}, 'autor': ('django.db.models.fields.CharField', [], {'max_length': '200'}), 'conteudo': ('django.db.models.fields.TextField', [], {}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}) }, u'website.galeria': { 'Meta': {'object_name': 'Galeria'}, 'ano': ('django.db.models.fields.CharField', [], {'default': "'2013'", 'max_length': '4'}), 'descricao': ('django.db.models.fields.CharField', [], {'max_length': '200'}), 'destaque': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'foi_importante': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'mes': ('django.db.models.fields.CharField', [], {'default': "'12'", 'max_length': '2'}), 'permite_comentario': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '200'}), 'tipo': ('django.db.models.fields.CharField', [], {'default': "u'F'", 'max_length': '1'}), 'titulo': ('django.db.models.fields.CharField', [], {'max_length': '200'}) }, u'website.galeriaresource': { 'Meta': {'object_name': 'GaleriaResource'}, 'action_resource': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}), 'galeria': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['website.Galeria']"}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'upload_resource': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}), 'url_resource': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}) }, u'website.materialescolar': { 'Meta': {'object_name': 'MaterialEscolar'}, 'anexo_file': ('django.db.models.fields.files.FileField', [], {'max_length': '100'}), 'ano': ('django.db.models.fields.CharField', [], {'default': "'2013'", 'max_length': '4'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'miniatura': ('django.db.models.fields.files.ImageField', [], {'max_length': '100'}), 'servico': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['website.Servico']"}) }, u'website.menu': { 'Meta': {'object_name': 'Menu'}, 'endereco': ('django.db.models.fields.CharField', [], {'max_length': '200'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'menu_pai': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'Menu Pai'", 'null': 'True', 'to': u"orm['website.Menu']"}), 'nivel': ('django.db.models.fields.IntegerField', [], {}), 'ordem': ('django.db.models.fields.IntegerField', [], {}), 'pagina': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['website.Pagina']", 'null': 'True'}), 'palavras_chaves': ('django.db.models.fields.CharField', [], {'max_length': '200'}), 'rascunho': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '200'}), 'titulo': ('django.db.models.fields.CharField', [], {'max_length': '200'}) }, u'website.pagina': { 'Meta': {'object_name': 'Pagina'}, 'conteudo': ('django.db.models.fields.TextField', [], {}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'palavras_chaves': ('django.db.models.fields.CharField', [], {'max_length': '200'}), 'permite_comentario': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'rascunho': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '200'}), 'titulo': ('django.db.models.fields.CharField', [], {'max_length': '200'}) }, u'website.parametro': { 'Meta': {'object_name': 'Parametro'}, u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'valor': ('django.db.models.fields.CharField', [], {'max_length': '200'}) }, u'website.professor': { 'Meta': {'object_name': 'Professor'}, u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'nome': ('django.db.models.fields.CharField', [], {'max_length': '200'}), 'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '200'}) }, u'website.publicacao': { 'Meta': {'object_name': 'Publicacao'}, 'completa': ('django.db.models.fields.TextField', [], {}), 'data_hora': ('django.db.models.fields.DateTimeField', [], {}), 'data_publicacao': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'destaque': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'galeria': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['website.Galeria']", 'null': 'True', 'blank': 'True'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'introducao': ('django.db.models.fields.TextField', [], {}), 'miniatura_publicacao': ('django.db.models.fields.files.ImageField', [], {'max_length': '100'}), 'palavras_chaves': ('django.db.models.fields.CharField', [], {'max_length': '200'}), 'permite_comentario': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'rascunho': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '200'}), 'tipos': ('django.db.models.fields.CharField', [], {'default': "u'1'", 'max_length': '1'}), 'titulo': ('django.db.models.fields.CharField', [], {'max_length': '200'}) }, u'website.recomendacao': { 'Meta': {'object_name': 'Recomendacao'}, 'acao_link': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}), 'descricao': ('django.db.models.fields.CharField', [], {'max_length': '200'}), 'destaque': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'miniatura': ('django.db.models.fields.files.ImageField', [], {'max_length': '100'}), 'tipo': ('django.db.models.fields.CharField', [], {'default': "u'1'", 'max_length': '1'}) }, u'website.servico': { 'Meta': {'object_name': 'Servico'}, 'atividades_extras': ('django.db.models.fields.TextField', [], {}), 'atividades_incluidas': ('django.db.models.fields.TextField', [], {}), 'conteudo_programatico': ('django.db.models.fields.TextField', [], {}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'observacoes': ('django.db.models.fields.TextField', [], {}), 'professor': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['website.Professor']"}), 'rotina_diaria': ('django.db.models.fields.TextField', [], {}), 'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '200'}), 'titulo': ('django.db.models.fields.CharField', [], {'max_length': '200'}) } } complete_apps = ['website']
69.303797
172
0.541918
1,096
10,950
5.312044
0.119526
0.127791
0.22123
0.316043
0.798179
0.721745
0.70371
0.696153
0.673136
0.645139
0
0.017536
0.218813
10,950
158
173
69.303797
0.663082
0.009041
0
0.367347
0
0
0.562903
0.319708
0
0
0
0
0
1
0.013605
false
0
0.047619
0
0.081633
0
0
0
0
null
0
1
1
0
1
1
0
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
794e90ebf4066373e9e80503d5223bdfcb0a3273
580
py
Python
lab8/point.py
kuzkov/computational-geometry
4411231a8097e618e03b3ef0ad5836e49e837216
[ "MIT" ]
1
2021-04-04T07:34:14.000Z
2021-04-04T07:34:14.000Z
lab8/point.py
kuzkov/computational-geometry
4411231a8097e618e03b3ef0ad5836e49e837216
[ "MIT" ]
null
null
null
lab8/point.py
kuzkov/computational-geometry
4411231a8097e618e03b3ef0ad5836e49e837216
[ "MIT" ]
1
2021-02-18T09:50:10.000Z
2021-02-18T09:50:10.000Z
import math import numpy as np from vector import Vector import segment as segment_lib class Point(Vector): def direction(self, segment): det = np.linalg.det([ segment.as_vector().as_array(), segment_lib.Segment(segment.p1, self).as_vector().as_array() ]) return 1 if det > 0 else 0 if math.isclose(det, 0) else -1 # 1 left, -1 right, 0 on def inside_segment(self, segment): pass def tolist(self): return (self.x, self.y) def within_polygon(self, polygon): return polygon.contains(self)
25.217391
91
0.631034
84
580
4.261905
0.416667
0.067039
0.055866
0.083799
0
0
0
0
0
0
0
0.021077
0.263793
580
22
92
26.363636
0.81733
0.037931
0
0
0
0
0
0
0
0
0
0
0
1
0.235294
false
0.058824
0.235294
0.117647
0.705882
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
1
0
1
1
0
0
4
794ebda12bc12785e1d67a516bc488f2cac0229b
424
py
Python
setup_s3_bucket.py
chambridge/aws-cost-mgmt-access
994d3ed62601469d2d68a67e6806a3601178328a
[ "MIT" ]
null
null
null
setup_s3_bucket.py
chambridge/aws-cost-mgmt-access
994d3ed62601469d2d68a67e6806a3601178328a
[ "MIT" ]
null
null
null
setup_s3_bucket.py
chambridge/aws-cost-mgmt-access
994d3ed62601469d2d68a67e6806a3601178328a
[ "MIT" ]
null
null
null
import os from cloud.aws_service import AwsService def main(): """Execute script.""" region = os.environ.get('REGION', 'us-east-1') s3_bucket = os.environ.get('S3_BUCKET', 'costmgmtacct1234') aws = AwsService() result = aws.create_bucket(s3_bucket, region) if result: print(f'S3 bucket {s3_bucket} was created.') else: print(f'Failed creating S3 bucket {s3_bucket}.') main()
22.315789
63
0.653302
57
424
4.736842
0.526316
0.207407
0.155556
0.118519
0
0
0
0
0
0
0
0.035714
0.207547
424
18
64
23.555556
0.767857
0.035377
0
0
0
0
0.277916
0
0
0
0
0
0
1
0.083333
false
0
0.166667
0
0.25
0.166667
0
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
794f5243f54f0804ec162bec691a557c23883c30
773
py
Python
shared/charge_controller_tcp_driver/exemple_driver.py
EDF-Lab/EDF
3ab2d9e1820dfb713bbd54c91ba72d7d32d998f9
[ "MIT" ]
16
2022-02-11T14:49:04.000Z
2022-03-30T07:33:45.000Z
shared/charge_controller_tcp_driver/exemple_driver.py
EDF-Lab/EDF
3ab2d9e1820dfb713bbd54c91ba72d7d32d998f9
[ "MIT" ]
1
2022-02-16T15:23:50.000Z
2022-02-21T15:30:21.000Z
shared/charge_controller_tcp_driver/exemple_driver.py
EDF-Lab/EDF
3ab2d9e1820dfb713bbd54c91ba72d7d32d998f9
[ "MIT" ]
1
2022-03-24T10:52:28.000Z
2022-03-24T10:52:28.000Z
import sys sys.path.append("..") import time from charge_controller_tcp_driver.charge_controller_tcp_client_helper import * if __name__ == '__main__': helper = ChargeControllerTCPClientHelper("169.254.43.3", 12500) time.sleep(3) helper.set_pwm(100) print("PWM:", helper.get_pwm()) #time.sleep(10) #helper.set_ev_state("A") #print("EV State: ", helper.get_ev_state()) time.sleep(10) helper.set_pwm(50) time.sleep(2) print("PWM:", helper.get_pwm()) #print("EV State: ", helper.get_ev_state()) time.sleep(1) #helper.set_pwm(50) #print("PWM:", helper.get_pwm()) time.sleep(10) helper.set_pwm(30) time.sleep(2) print("PWM:", helper.get_pwm()) # print("EV State: ", helper.get_ev_state())
24.15625
78
0.648124
111
773
4.234234
0.315315
0.134043
0.102128
0.144681
0.551064
0.551064
0.514894
0.514894
0.514894
0.417021
0
0.052133
0.181113
773
31
79
24.935484
0.690363
0.276843
0
0.388889
0
0
0.061818
0
0
0
0
0
0
1
0
false
0
0.166667
0
0.166667
0.166667
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
794f8be8a7920197768cc08897059ca509f8735d
5,312
py
Python
tests/test_intent_classification.py
BatsResearch/zsl-kg
9bc4d4537a0f90ee3bbcefdf90ceae6dbcf48572
[ "Apache-2.0" ]
83
2021-08-30T02:50:37.000Z
2022-02-22T09:37:36.000Z
tests/test_intent_classification.py
BatsResearch/zsl-kg
9bc4d4537a0f90ee3bbcefdf90ceae6dbcf48572
[ "Apache-2.0" ]
2
2021-09-10T08:44:13.000Z
2022-01-23T17:33:35.000Z
tests/test_intent_classification.py
BatsResearch/zsl-kg
9bc4d4537a0f90ee3bbcefdf90ceae6dbcf48572
[ "Apache-2.0" ]
6
2021-09-10T07:09:41.000Z
2021-11-07T14:31:33.000Z
import os from typing import Text import torch import unittest import torch.nn as nn import torch.optim as optim from allennlp.models import Model from allennlp.data.vocabulary import Vocabulary from zsl_kg.class_encoders.auto_gnn import AutoGNN from zsl_kg.example_encoders.text_encoder import TextEncoder from zsl_kg.data.snips import SnipsDataset from allennlp.data.iterators import BasicIterator from allennlp.modules.text_field_embedders import BasicTextFieldEmbedder from zsl_kg.common.graph import NeighSampler from zsl_kg.knowledge_graph.conceptnet import ConceptNetKG from allennlp.common.tqdm import Tqdm class BiLinearModel(Model): def __init__( self, vocab: Vocabulary, example_encoder: object, class_encoder: object, joint_dim: int, bias: bool = False, ): super().__init__(vocab) self.example_encoder = example_encoder self.class_encoder = class_encoder self.text_joint = nn.Linear( self.example_encoder.output_dim, joint_dim, bias=bias ) self.class_joint = nn.Linear( self.class_encoder.output_dim, joint_dim, bias=bias ) def forward(self, batch, node_idx, kg): encoder_out = self.example_encoder(batch) text_rep = self.text_joint(encoder_out) # get label representation class_out = self.class_encoder(node_idx, kg) class_rep = self.class_joint(class_out) logits = torch.matmul(text_rep, class_rep.t()) return logits class TestIntentClassification(unittest.TestCase): def setUp( self, ): label_maps = { "train": ["weather", "music", "restaurant"], "dev": ["search", "movie"], "test": ["book", "playlist"], } data_path = "tests/test_data/datasets/snips/" datasets = [] for split in ["train", "dev", "test"]: labels = label_maps[split] label_to_idx = dict( [(label, idx) for idx, label in enumerate(labels)] ) reader = SnipsDataset(label_to_idx) path = os.path.join(data_path, f"{split}.txt") _dataset = reader.read(path) datasets.append(_dataset) self.train_dataset, self.dev_dataset, self.test_dataset = datasets vocab = Vocabulary.from_instances( self.train_dataset + self.dev_dataset + self.test_dataset ) # create the iterator self.iterator = BasicIterator(batch_size=32) self.iterator.index_with(vocab) print("Loading GloVe...") # token embed token_embed_path = os.path.join(data_path, "word_emb.pt") token_embedding = torch.load(token_embed_path) print("word embeddings created...") word_embeddings = BasicTextFieldEmbedder({"tokens": token_embedding}) # create the text encoder print("Loading the text encoder...") self.example_encoder = TextEncoder(word_embeddings, 300, 32, 20) trgcn = { "input_dim": 300, "output_dim": 64, "type": "trgcn", "gnn": [ { "input_dim": 300, "output_dim": 64, "activation": nn.ReLU(), "normalize": True, "sampler": NeighSampler(100, mode="topk"), "fh": 100, }, { "input_dim": 64, "output_dim": 64, "activation": nn.ReLU(), "normalize": True, "sampler": NeighSampler(50, mode="topk"), }, ], } self.class_encoder = AutoGNN(trgcn) self.train_graph = ConceptNetKG.load_from_disk( "tests/test_data/subgraphs/snips/train_graph" ) node_to_idx = dict( [(node, idx) for idx, node in enumerate(self.train_graph.nodes)] ) # self.train_nodes = torch.tensor( [ node_to_idx[node] for node in [ "/c/en/weather", "/c/en/music", "/c/en/restaurant", ] ] ) self.model = BiLinearModel( vocab, self.example_encoder, self.class_encoder, joint_dim=20 ) self.optimizer = optim.Adam( self.model.parameters(), lr=1e-03, weight_decay=5e-04 ) self.loss_function = nn.CrossEntropyLoss() def test_intent_classification_train(self): self.model.train() total_batch_loss = 0.0 generator_tqdm = Tqdm.tqdm( self.iterator(self.train_dataset, num_epochs=1, shuffle=False), total=self.iterator.get_num_batches(self.train_dataset), ) for batch in generator_tqdm: self.optimizer.zero_grad() logits = self.model( batch["sentence"], self.train_nodes, self.train_graph ) loss = self.loss_function(logits, batch["labels"]) total_batch_loss += loss.item() loss.backward() self.optimizer.step() self.assertLessEqual(total_batch_loss, 100.0)
31.247059
77
0.573419
570
5,312
5.12807
0.301754
0.027711
0.015395
0.015737
0.139925
0.119398
0.093055
0.07116
0.07116
0.040369
0
0.012864
0.326807
5,312
169
78
31.431953
0.80453
0.01506
0
0.094891
0
0
0.081324
0.01416
0
0
0
0
0.007299
1
0.029197
false
0
0.116788
0
0.167883
0.021898
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
79509ae0de663c69b13b3aa40296a01c2a31c785
5,077
py
Python
chase/simulation.py
Motwg/WolfAndSheep-2019
d6c50660368661fddf88dc860caac7236a791beb
[ "MIT" ]
null
null
null
chase/simulation.py
Motwg/WolfAndSheep-2019
d6c50660368661fddf88dc860caac7236a791beb
[ "MIT" ]
null
null
null
chase/simulation.py
Motwg/WolfAndSheep-2019
d6c50660368661fddf88dc860caac7236a791beb
[ "MIT" ]
null
null
null
import csv import json import logging import math import random as ran def distance(point1, point2): logging.debug("Args: {0}".format(locals())) if type(point1) != type(point2): logging.warning("Types of given arguments are different: {0} != {1}".format(point1, point2)) logging.debug("Returns: {0}".format(((point1[0]-point2[0])**2 + (point1[1]-point2[1])**2) ** 0.5)) return ((point1[0]-point2[0])**2 + (point1[1]-point2[1])**2) ** 0.5 class Animal: def __init__(self, id, x, y, move_dist): logging.info("{0}:[{1}, {2}]".format(id, x, y)) self.id = id self.x = x self.y = y self.move_dist = move_dist def __lt__(self, other): return self.id < other.id def move(self, x, y): logging.info("{0}:[{1}, {2}] => [{3}, {4}]".format(self.id, self.x, self.y, self.x+x, self.y+y)) self.x += x self.y += y def move_in_direction(self, direction): if direction == 0: self.move(0, self.move_dist) elif direction == 1: self.move(0, -self.move_dist) elif direction == 2: self.move(self.move_dist, 0) elif direction == 3: self.move(-self.move_dist, 0) elif type(direction) == Animal: degrees = math.atan2(direction.y-self.y, direction.x-self.x) self.move( self.move_dist * math.cos(degrees), self.move_dist * math.sin(degrees) ) def move_in_random_direction(self): self.move_in_direction(ran.randint(0, 3)) def distance(self, animal): return distance([self.x, self.y], [animal.x, animal.y]) def find_the_closest_animal(self, animals): dist = self.distance(animals[0]) closest = animals[0] for animal in animals: new_dist = distance([self.x, self.y], [animal.x, animal.y]) if dist > new_dist: dist = new_dist closest = animal return closest def eaten(self): logging.info("Eaten: {0}:[{1}, {2}]".format(self.id, self.x, self.y)) self.x = None self.y = None def get_pos(self): return [self.x, self.y] @staticmethod def generate_animals(animals_number, move_range, spawn_range=10.0): logging.debug("Args: {0}".format(locals())) new_animals = [] for s in range(animals_number): new_animals.append(Animal( s + 1, ran.random() * spawn_range * 2 - spawn_range, ran.random() * spawn_range * 2 - spawn_range, move_range)) logging.debug("Returns: {0}".format(new_animals)) return new_animals def save_json(json_data, filename='pos.json', save_dir='.'): logging.debug("Args: {0}".format(locals())) with open(save_dir+"/"+filename, 'w') as json_file: json.dump(json_data, json_file) def save_csv(csv_data=None, filename='alive.csv', opening_parameter='a', save_dir='.'): logging.debug("Args: {0}".format(locals())) with open(save_dir+"/"+filename, opening_parameter, newline='') as csv_file: writer = csv.writer(csv_file, delimiter=',', quotechar='|', quoting=csv.QUOTE_MINIMAL) if csv_data is not None: writer.writerow(csv_data) def simulate(wolves_sim, sheep_sim, turns_number=50, save_dir='.', wait=False): logging.debug("Args: {0}".format(locals())) sheep_eaten = [] save_csv(None, 'alive.csv', 'w', save_dir) # nadpisuje plik for t in range(turns_number): for s in sheep_sim: s.move_in_random_direction() for w in wolves_sim: closest = w.find_the_closest_animal(sheep_sim) if w.distance(closest) <= w.move_dist: w.x = closest.x w.y = closest.y closest.eaten() sheep_index = closest.id sheep_eaten.append(closest) sheep_sim.remove(closest) else: w.move_in_direction(closest) sheep_index = None print("Turn: {0}\n" "Wolf position: {1}\n" "Sheep alive: {2}\n" "Eaten sheep: {3}".format(t + 1, wolves_sim[0].get_pos(), len(sheep_sim), sheep_index)) # zapis json i csv pos = { 'round_no': t + 1, 'wolf_pos': wolves_sim[0].get_pos(), 'sheep_pos': list(map(Animal.get_pos, sorted(sheep_sim+sheep_eaten))) } save_json(pos, 'pos.json', save_dir) save_csv([t+1, len(sheep_sim)], 'alive.csv', 'a', save_dir) # oczekiwanie na klawisz if wait: input("Press Enter to continue...") # populacja owiec spadnie do 0 => koniec symulacji if len(sheep_sim) == 0: logging.info("Wolf ate every sheep. End of simulation.") break logging.debug("Returns: {0}".format(sheep_eaten)) return sheep_eaten
32.33758
104
0.554855
670
5,077
4.044776
0.208955
0.038376
0.017712
0.031365
0.279705
0.221402
0.189299
0.135793
0.110701
0.067159
0
0.0234
0.301359
5,077
156
105
32.544872
0.740626
0.020288
0
0.058333
0
0
0.081304
0
0
0
0
0
0
1
0.116667
false
0
0.041667
0.025
0.225
0.008333
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
79509e0da59087724c7ad32862f4a10871238e6b
4,518
py
Python
anchorgql/runlocal.py
vybenetwork/anchorgql
d8a8a3fa332e0076f20061689951645c0dae1642
[ "MIT" ]
1
2022-02-20T22:05:26.000Z
2022-02-20T22:05:26.000Z
anchorgql/runlocal.py
vybenetwork/anchorgql
d8a8a3fa332e0076f20061689951645c0dae1642
[ "MIT" ]
null
null
null
anchorgql/runlocal.py
vybenetwork/anchorgql
d8a8a3fa332e0076f20061689951645c0dae1642
[ "MIT" ]
null
null
null
import json import subprocess import asyncio from solana.rpc.async_api import AsyncClient from solana.publickey import PublicKey from anchorpy import Program, Provider, Wallet class bcolors: HEADER = '\033[95m' OKBLUE = '\033[94m' OKCYAN = '\033[96m' OKGREEN = '\033[92m' WARNING = '\033[93m' FAIL = '\033[91m' ENDC = '\033[0m' BOLD = '\033[1m' UNDERLINE = '\033[4m' def build_and_start_server(project_name, prd_mode): print(f'{bcolors.OKCYAN}INFO: Starting test for {project_name}') completed_process_result = subprocess.run( "npm run prod", shell=True) if completed_process_result.returncode != 0: print( f'{bcolors.FAIL}ERROR: Failed to generate Apollo GraphQL project for project: {project_name}{bcolors.ENDC}') return False print(f'{bcolors.OKGREEN}DONE: Project creation successful for project: {project_name}{bcolors.ENDC}') server_directory = "./src/server" new_process = subprocess.run( "npm start", cwd=server_directory, shell=True) if new_process.returncode != 0: print( f'{bcolors.FAIL}ERROR: Failed to start newly generated Apollo GraphQL server for project: {project_name}{bcolors.ENDC}') return False print(f'{bcolors.OKGREEN}DONE: Project startup successful for project: {project_name}{bcolors.ENDC}') return True def create_project_config(path, content): with open(path, 'w') as f: f.write(json.dumps(content)) return async def check_and_replace_with_new_idl(program_id, idl_path, anchor_provider_url): try: client = AsyncClient(anchor_provider_url) provider = Provider(client, Wallet.local()) program_id = PublicKey(program_id) idl = await Program.fetch_raw_idl( program_id, provider ) except: await client.close() return if idl is not None: with open(idl_path, 'w') as file: json.dump(idl, file) await client.close() return def main(): # On Windows, if an error happens where the channels file isn't found, you probably opened the project # from the wrong directory. Either try reopening the project from the correct directory or play with the # line below. # os.chdir('./anchorgql') config = json.load(open('channels.json')) channels_config = config['channels'] results = [] for channel in channels_config: project_name = channel['PROJECT_NAME'] program_id = channel['PROGRAM_ID'] anchor_provider_url = channel['ANCHOR_PROVIDER_URL'] idl_path = channel['IDL_PATH'] asyncio.run(check_and_replace_with_new_idl( program_id, idl_path, anchor_provider_url)) content = { "projectName": project_name, "protocol": channel["PROTOCOL"], "network": channel["NETWORK"], "programID": program_id, "anchorProviderURL": anchor_provider_url, "idlPath": idl_path, "anchorVersion": config['anchorVersion'], "idl": config['idl'], "port": config['port'], "packageJsonTemplateFile": config['packageJsonTemplateFile'], "indexTemplateFile": config['indexTemplateFile'], "typeDefTemplateFile": config['typeDefTemplateFile'], "configFile": config['configFile'], "testMode": config["testMode"], "prdMode": config["prdMode"] } create_project_config('./src/config.json', content) passed = build_and_start_server(project_name, config["prdMode"]) results.append({ "projectName": project_name, "passed": passed }) print() print("===================================================") print("===================================================") print("===================================================") print() print(f'{bcolors.OKBLUE}INFO: Test results:{bcolors.ENDC}') for result in results: if result['passed']: print( f'{bcolors.OKGREEN}{result["projectName"]}: Passed{bcolors.ENDC}') else: print( f'{bcolors.FAIL}{result["projectName"]}: Failed{bcolors.ENDC}') print() print("===================================================") print("=================== End of Run ====================") print("===================================================") if __name__ == '__main__': main()
36.144
132
0.588092
477
4,518
5.404612
0.32914
0.046936
0.040341
0.032583
0.204034
0.192397
0.169123
0.134213
0.134213
0.102405
0
0.01275
0.236166
4,518
124
133
36.435484
0.73428
0.0529
0
0.196262
0
0.018692
0.337857
0.140384
0
0
0
0
0
1
0.028037
false
0.037383
0.056075
0
0.233645
0.158879
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
7950faaf8969b4bfd67614a72bff42f402a632e6
357
py
Python
bookwyrm/activitypub/image.py
mouse-reeve/fedireads
e3471fcc3500747a1b1deaaca662021aae5b08d4
[ "CC0-1.0" ]
270
2020-01-27T06:06:07.000Z
2020-06-21T00:28:18.000Z
bookwyrm/activitypub/image.py
mouse-reeve/fedireads
e3471fcc3500747a1b1deaaca662021aae5b08d4
[ "CC0-1.0" ]
158
2020-02-10T20:36:54.000Z
2020-06-26T17:12:54.000Z
bookwyrm/activitypub/image.py
mouse-reeve/fedireads
e3471fcc3500747a1b1deaaca662021aae5b08d4
[ "CC0-1.0" ]
15
2020-02-13T21:53:33.000Z
2020-06-17T16:52:46.000Z
""" an image, nothing fancy """ from dataclasses import dataclass from .base_activity import ActivityObject @dataclass(init=False) class Document(ActivityObject): """a document""" url: str name: str = "" type: str = "Document" id: str = None @dataclass(init=False) class Image(Document): """an image""" type: str = "Image"
17
41
0.64986
42
357
5.5
0.52381
0.060606
0.155844
0.199134
0
0
0
0
0
0
0
0
0.212885
357
20
42
17.85
0.822064
0.120448
0
0.181818
0
0
0.043919
0
0
0
0
0
0
1
0
true
0
0.181818
0
0.818182
0
0
0
0
null
0
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
1
0
0
3
795109620dee96ad8eef48181ff1ae3077d016d2
477
py
Python
TradzQAI/tools/indicators/moving_average_convergence_divergence.py
kkuette/AI_project
1f46cb2536b24cb3716250f1e9705daa76af4f60
[ "Apache-2.0" ]
164
2017-11-24T13:07:04.000Z
2022-03-10T04:54:46.000Z
TradzQAI/tools/indicators/moving_average_convergence_divergence.py
kkuette/AI_project
1f46cb2536b24cb3716250f1e9705daa76af4f60
[ "Apache-2.0" ]
21
2018-09-29T10:27:10.000Z
2019-06-12T07:01:58.000Z
TradzQAI/tools/indicators/moving_average_convergence_divergence.py
kkuette/AI_project
1f46cb2536b24cb3716250f1e9705daa76af4f60
[ "Apache-2.0" ]
49
2018-05-09T17:28:52.000Z
2022-02-27T04:50:45.000Z
from .catch_errors import check_for_period_error from .exponential_moving_average import exponential_moving_average as ema def moving_average_convergence_divergence(data, short_period, long_period): """ Moving Average Convergence Divergence. Formula: EMA(DATA, P1) - EMA(DATA, P2) """ check_for_period_error(data, short_period) check_for_period_error(data, long_period) macd = ema(data, short_period) - ema(data, long_period) return macd
29.8125
75
0.761006
64
477
5.3125
0.375
0.152941
0.123529
0.167647
0.135294
0
0
0
0
0
0
0.005013
0.163522
477
15
76
31.8
0.847118
0.163522
0
0
0
0
0
0
0
0
0
0
0
1
0.142857
false
0
0.285714
0
0.571429
0
0
0
0
null
0
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
1
0
0
2
795164e9b019d5e0233e60502428b4c2cb401ddf
4,647
py
Python
scripts/scrape_cgc.py
eklipse2009/ZX-Pokemaster
113bf2e242347b475cca9eadbae4f1b67f498466
[ "MIT" ]
8
2018-11-18T00:37:25.000Z
2020-12-06T13:17:53.000Z
scripts/scrape_cgc.py
eklipse2009/ZX-Pokemaster
113bf2e242347b475cca9eadbae4f1b67f498466
[ "MIT" ]
8
2017-08-21T10:07:58.000Z
2020-03-29T18:23:37.000Z
scripts/scrape_cgc.py
eklipse2009/ZX-Pokemaster
113bf2e242347b475cca9eadbae4f1b67f498466
[ "MIT" ]
1
2021-03-04T17:43:36.000Z
2021-03-04T17:43:36.000Z
import os import glob import shutil import zipfile from functions.game_name_functions import * if (os.getcwd().endswith('scripts')): os.chdir('..') from classes.scraper import * def scrape_csscgc(): # if os.path.exists('tosec\\CSSCGC Games'): # shutil.rmtree('tosec\\CSSCGC Games') s = Scraper() template = 'https://www.yoursinclair.co.uk/csscgc/csscgc.cgi?year=' for year in range(1996, 2017): files_extracted = [] page = template + str(year) selector = s.loadUrl(page) games_tables = selector.xpath('//table[@border="1"]').extract_all() for game_table in games_tables: cells = Selector(game_table).xpath('//td//text()').extract_all() game_name = cells[0] author = cells[2] if not author.startswith('Mr'): author = putInitialsToEnd(author) filenames = list(set(cells[4].split(' ')+[cells[4]])) format = cells[10] game_represented = False for filename in filenames: if not filename: continue filename = os.path.basename(filename) ext = os.path.splitext(filename)[-1].lower() tosec_name = '{} ({})({})({})[CSSCGC]{}'.format(game_name, str(year), author, format, ext) tosec_name = tosec_name.replace('(Spectrum)', '').replace('ZX Spectrum ', '').replace('(48K)', '') tosec_name = tosec_name.replace('(128K Spectrum)', '(128K)') tosec_name = tosec_name.replace('(128K-+2)', '(+2)') tosec_name =tosec_name.replace('(unknown)', '(-)') tosec_name = getFileSystemFriendlyName(tosec_name) src = os.path.join('tosec', 'csscgc scrape', 'CSSCGC' + str(year), filename) dest = os.path.join('tosec', 'CSSCGC Games', str(year), tosec_name) # print(src, dest) if not os.path.exists(src): # print('File does not exist:', filename, 'Year:', year) continue if os.path.exists(dest): print('Conflict:', tosec_name, filename, 'Year:', year) continue os.makedirs(os.path.dirname(dest), exist_ok=True) if ext == '.zip': with zipfile.ZipFile(src, 'r') as zf: files_to_extract = [] conflict = False for zfname in zf.namelist(): zfname_ext = zfname.split('.')[-1].lower() if zfname_ext in GAME_EXTENSIONS: files_to_extract.append(zfname) for each in GAME_EXTENSIONS: if len([x for x in files_to_extract if x.endswith(each)])>1: print('Conflict:', tosec_name, src, files_to_extract, 'Year:', year) conflict = True break if not conflict and files_to_extract: for file in files_to_extract: data = zf.read(files_to_extract[0]) ext = os.path.splitext(files_to_extract[0])[-1].lower() dest = dest.replace('.zip', ext) with open(dest, 'wb+') as output: output.write(data) game_represented = True files_extracted.append(src) else: shutil.copy(src, dest) files_extracted.append(src) game_represented = True if not game_represented: print('Game not represented:', tosec_name, cells[4], 'Year:', year) for src in glob.glob(os.path.join('tosec', 'csscgc scrape', 'CSSCGC'+str(year), '*')): filename, ext = os.path.splitext(os.path.basename(src)) if ext[1:] not in GAME_EXTENSIONS+['zip']: continue if src in files_extracted: continue else: tosec_name = '{} ({})(-)[CSSCGC]{}'.format(filename.title() , str(year), ext) dest = os.path.join('tosec', 'CSSCGC Games', str(year), 'unsorted', tosec_name) os.makedirs(os.path.dirname(dest), exist_ok=True) shutil.copy(src, dest) print('Copied: ', src, 'to:', dest, 'Year:', year) if __name__=='__main__': scrape_csscgc()
49.967742
114
0.497095
483
4,647
4.641822
0.252588
0.068243
0.049955
0.032114
0.176628
0.135593
0.109723
0.109723
0.109723
0.042819
0
0.012375
0.374005
4,647
93
115
49.967742
0.758336
0.03314
0
0.174419
0
0
0.089329
0.004901
0
0
0
0
0
1
0.011628
false
0
0.069767
0
0.081395
0.046512
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
7951a7941dd6bc9bef1b322015def6a9a9e06453
328
py
Python
app/pit/forms.py
edynox/iis
594200506b641cbac249dc6e95d229bea1edeb28
[ "MIT" ]
null
null
null
app/pit/forms.py
edynox/iis
594200506b641cbac249dc6e95d229bea1edeb28
[ "MIT" ]
null
null
null
app/pit/forms.py
edynox/iis
594200506b641cbac249dc6e95d229bea1edeb28
[ "MIT" ]
null
null
null
from django.forms import ModelForm from ..models import Pit class PitForm(ModelForm): class Meta: model = Pit fields = ['location'] def __init__(self, *args, **kwargs): super(ModelForm, self).__init__(*args, **kwargs) self.fields['location'].widget.attrs['class'] = 'form-control'
25.230769
70
0.631098
37
328
5.378378
0.621622
0.140704
0
0
0
0
0
0
0
0
0
0
0.228659
328
12
71
27.333333
0.786561
0
0
0
0
0
0.100917
0
0
0
0
0
0
1
0.111111
false
0
0.222222
0
0.555556
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
1
0
0
1
7952609e9f72568f6207d4ca79537c3b28a117d4
203
py
Python
classy_start/paths.py
mfonism/django-classy-start
70b73e1a836c2ae4c3bae6f53846b07a30a81ac0
[ "MIT" ]
1
2022-01-20T18:48:46.000Z
2022-01-20T18:48:46.000Z
classy_start/paths.py
mfonism/django-classy-start
70b73e1a836c2ae4c3bae6f53846b07a30a81ac0
[ "MIT" ]
8
2020-10-05T15:27:07.000Z
2021-02-16T17:17:54.000Z
classy_start/paths.py
mfonism/django-classy-start
70b73e1a836c2ae4c3bae6f53846b07a30a81ac0
[ "MIT" ]
5
2020-10-05T18:11:44.000Z
2022-01-21T18:33:13.000Z
import pathlib TEMPLATES_DIR = pathlib.Path(__file__).resolve(strict=True).parent / 'conf' APP_TEMPLATES_DIR = TEMPLATES_DIR / 'app_template' PROJECT_TEMPLATES_DIR = TEMPLATES_DIR / 'project_template'
29
75
0.807882
26
203
5.807692
0.538462
0.397351
0.278146
0.317881
0
0
0
0
0
0
0
0
0.093596
203
6
76
33.833333
0.820652
0
0
0
0
0
0.157635
0
0
0
0
0
0
1
0
false
0
0.25
0
0.25
0
1
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
79526a360c29da4c2b5320e1dc30a9a350d4bff9
5,249
py
Python
molar/backend/database/query.py
aspuru-guzik-group/molar
a3e0c337bd8a41c94b2c25831c95048cc7614f04
[ "BSD-3-Clause" ]
4
2021-07-20T18:49:44.000Z
2021-10-15T00:58:12.000Z
molar/backend/database/query.py
aspuru-guzik-group/molar
a3e0c337bd8a41c94b2c25831c95048cc7614f04
[ "BSD-3-Clause" ]
null
null
null
molar/backend/database/query.py
aspuru-guzik-group/molar
a3e0c337bd8a41c94b2c25831c95048cc7614f04
[ "BSD-3-Clause" ]
2
2022-01-07T17:57:42.000Z
2022-01-13T21:00:20.000Z
# std from typing import Any, Dict, List, Optional, Union # external import pkg_resources import sqlalchemy from sqlalchemy.orm import aliased, Session # molar from molar.backend import schemas from molar.backend.database.utils import sqlalchemy_to_dict INFORMATION_QUERY = open( pkg_resources.resource_filename("molar", "sql/information_query.sql"), "r" ).read() def resolve_type(type: str, models, alias_registry=None): if alias_registry is None: alias_registry = {} types = type.split(".") if len(types) == 1: if isinstance(models, sqlalchemy.orm.attributes.InstrumentedAttribute): return models[types[0]].astext type_ = getattr(models, types[0], None) if type_ is not None: return type_ if types[0] in alias_registry.keys(): return alias_registry[types[0]] raise ValueError(f"Type {type} not found in database!") submodel = getattr(models, types[0], None) if submodel is None and types[0] in alias_registry.keys(): submodel = alias_registry[types[0]] if submodel is not None: return resolve_type(".".join(types[1:]), submodel, alias_registry) raise ValueError(f"Type {type} not found in database!") def query_builder( db: Session, models, types: schemas.QueryTypes, limit: int, offset: int, joins: Optional[schemas.QueryJoins] = None, filters: Optional[schemas.QueryFilters] = None, order_by: Optional[schemas.QueryOrderBys] = None, aliases: Optional[schemas.QueryAliases] = None, ): alias_registry: Dict[str, Any] = {} # Resolving aliases if aliases is not None: if not isinstance(aliases, list): aliases = [aliases] for alias in aliases: alias_registry[alias.alias] = aliased( resolve_type(alias.type, models), name=alias.alias ) # Resolving main types if not isinstance(types, list): types = [types] db_objs = [] for type_ in types: db_obj = resolve_type(type_, models, alias_registry) db_objs.append(db_obj) query = db.query(*db_objs) if joins is not None: if not isinstance(joins, list): joins = [joins] for join in joins: joined_table = resolve_type( join.type, models, alias_registry, ) onclause = None if join.on is not None: onclause = resolve_type( join.on.column1, models, alias_registry ) == resolve_type(join.on.column2, models, alias_registry) query = query.join( joined_table, onclause, isouter=True if join.join_type == "outer" else False, full=True if join.join_type == "full" else False, ) if filters is not None: filters = expand_filters(filters, models, alias_registry) query = query.filter(filters) if order_by is not None: if not isinstance(order_by, list): order_by = [order_by] order_bys = [] for ob in order_by: t = resolve_type(ob.type, models, alias_registry) if ob.order == "asc": order_bys.append(t.asc()) else: order_bys.append(t.desc()) query = query.order_by(*order_bys) query = query.offset(offset).limit(limit) return query, db_objs, types def process_query_output(db_objs, query_results, types): if len(db_objs) == 1: return [sqlalchemy_to_dict(db_objs[0], r, types[0]) for r in query_results] results = [] for result in query_results: result_dict = {} for res, db_obj, t in zip(result, db_objs, types): result_dict.update(sqlalchemy_to_dict(db_obj, res, t, add_table_name=True)) results.append(result_dict) return results def expand_filters(filters, models, alias_registry): if isinstance(filters, schemas.QueryFilterList): filters = [expand_filters(f) for f in filters.filters] if filters.op == "and": return sqlalchemy.and_(*filters) elif filters.op == "or": return sqlalchemy.or_(*filters) else: raise ValueError(f"Filter operator not supported: {filters.op}") elif isinstance(filters, schemas.QueryFilter): type = resolve_type(filters.type, models, alias_registry) operator = filters.op if filters.op == "==": operator = "__eq__" elif filters.op == "!=": operator = "__ne__" elif filters.op == ">": operator = "__gt__" elif filters.op == "<": operator = "__lt__" elif filters.op == ">=": operator = "__ge__" elif filters.op == "<=": operator = "__le__" # If value is another column value = filters.value if isinstance(value, str): try: value_type = resolve_type(value, models, alias_registry) except ValueError: pass else: value = value_type return getattr(type, operator)(value)
30.34104
87
0.594589
615
5,249
4.889431
0.209756
0.082142
0.063186
0.034919
0.136016
0.111074
0.027935
0.027935
0.027935
0
0
0.003838
0.30501
5,249
172
88
30.517442
0.82045
0.016003
0
0.052632
0
0
0.040132
0.004847
0
0
0
0
0
1
0.030075
false
0.007519
0.045113
0
0.150376
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
7952969e6d0b68c2f855a88488afaf384b30fc3f
2,377
py
Python
days/day_04/main.py
gkpotter/advent-of-code-2020
76ed77719a8f6396511dabce99d46995946edb01
[ "MIT" ]
null
null
null
days/day_04/main.py
gkpotter/advent-of-code-2020
76ed77719a8f6396511dabce99d46995946edb01
[ "MIT" ]
null
null
null
days/day_04/main.py
gkpotter/advent-of-code-2020
76ed77719a8f6396511dabce99d46995946edb01
[ "MIT" ]
null
null
null
import time import os def all_fields_present(passport): fields = ['byr','iyr','eyr','hgt','hcl','ecl','pid'] for field in fields: if field not in passport: return False return True def is_valid(passport): if not all_fields_present(passport): return False byr = passport['byr'] if not (is_year(byr) and int(byr) in range(1920, 2003)): return False iyr = passport['iyr'] if not (is_year(iyr) and int(iyr) in range(2010, 2021)): return False eyr = passport['eyr'] if not (is_year(eyr) and int(eyr) in range(2020, 2031)): return False hgt = passport['hgt'] if not valid_height(hgt): return False hcl = passport['hcl'] if not(hcl[0]=='#' and len(hcl)==7 and all(is_digit(x) or x in ['a', 'b', 'c', 'd', 'e', 'f'] for x in hcl[1:])): return False ecl = passport['ecl'] if ecl not in ['amb', 'blu', 'brn', 'gry', 'grn', 'hzl', 'oth']: return False pid = passport['pid'] if not(len(pid) == 9 and all(is_digit(x) for x in pid)): return False return True def is_year(y): return len(y) == 4 and all(is_digit(x) for x in y) def is_digit(x): try: return int(x) in range(0, 10) except: return False def valid_height(hgt): try: if hgt[2:] == 'in' and int(hgt[:2]) in range(59, 77): return True if hgt[3:] == 'cm' and int(hgt[:3]) in range(150, 194): return True return False except: return False def str_to_passport(s): passport = {} items = [x.split(':') for x in s.strip().split(' ')] for item in items: passport[item[0]] = item[1] return passport def part_one(passports): total_valid = 0 for passport in passports: total_valid += all_fields_present(passport) return total_valid def part_two(passports): total_valid = 0 for passport in passports: total_valid += is_valid(passport) return total_valid def main(): start_time = time.time() with open(os.path.dirname(__file__) + '/input.txt', 'r') as data: passports = [] s = '' for line in data.readlines(): if line == '\n': passports.append(str_to_passport(s)) s = '' else: s += line.strip()+' ' passports.append(str_to_passport(s)) part_one_ans = part_one(passports) part_two_ans = part_two(passports) print('Day 4 ({:,.3f}s)'.format(time.time() - start_time)) print(' Part 1: {}'.format(part_one_ans)) print(' Part 2: {}'.format(part_two_ans)) if __name__ == "__main__": main()
20.491379
76
0.641565
391
2,377
3.757033
0.265985
0.089857
0.021784
0.049013
0.250511
0.172907
0.098026
0.098026
0.070796
0.070796
0
0.02814
0.19268
2,377
116
77
20.491379
0.737363
0
0
0.352941
0
0
0.058452
0
0
0
0
0
0
1
0.105882
false
0.305882
0.023529
0.011765
0.376471
0.035294
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
1
0
0
0
0
0
1
795299febd0881f339bf75a4c01b525d81a4103e
1,089
py
Python
fa_management_server/models/role.py
Msms-NJ/fa_management_server
6787e35a5ac27c27c61fcaa0f508a78f4dc6e8f9
[ "MIT" ]
null
null
null
fa_management_server/models/role.py
Msms-NJ/fa_management_server
6787e35a5ac27c27c61fcaa0f508a78f4dc6e8f9
[ "MIT" ]
null
null
null
fa_management_server/models/role.py
Msms-NJ/fa_management_server
6787e35a5ac27c27c61fcaa0f508a78f4dc6e8f9
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- """Role models.""" from dataclasses import dataclass from array import array from .database import Column, Model, SurrogatePK, db, reference_col, relationship from sqlalchemy.dialects.postgresql import ARRAY @dataclass class Role(SurrogatePK, Model): """用户角色信息表""" __tablename__ = "roles" # 配置JSON返回字段信息 name: str id: str remarks: str web_menus: array update_date: str # role 角色数据权限 data_scope # 0 默认值 1 只能看到自己数据 2 能看到当前所在机构下的数据 3 能看到系统中的所有数据 DATA_SCOPE_DEFAULT = 0 DATA_SCOPE_SELF = 1 DATA_SCOPE_OFFICE = 2 DATA_SCOPE_ALL = 3 # 配置数据库字段信息 name = Column(db.String(80), unique=True, nullable=False) users = relationship("UserRole", back_populates="role") data_scope = Column(db.SmallInteger, nullable=False) web_menus = Column(ARRAY(db.String)) def __init__(self, **kwargs): """Create instance.""" db.Model.__init__(self, **kwargs) def __repr__(self): """Represent instance as a unique string.""" return "<Role({name})>".format(name=self.name)
26.560976
81
0.673095
134
1,089
5.238806
0.537313
0.076923
0.039886
0
0
0
0
0
0
0
0
0.012821
0.212121
1,089
40
82
27.225
0.805361
0.176309
0
0
0
0
0.03555
0
0
0
0
0
0
1
0.083333
false
0
0.166667
0
0.916667
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
79539f171b11d067d372dd7f773b3f28ca1c0d21
5,696
py
Python
caller.py
xopherw/algotrader
6daafe165d7eb4d5d34b2a7051e102f15bcb71dd
[ "MIT" ]
null
null
null
caller.py
xopherw/algotrader
6daafe165d7eb4d5d34b2a7051e102f15bcb71dd
[ "MIT" ]
null
null
null
caller.py
xopherw/algotrader
6daafe165d7eb4d5d34b2a7051e102f15bcb71dd
[ "MIT" ]
1
2022-01-19T14:49:42.000Z
2022-01-19T14:49:42.000Z
import requests, datetime as dt, numpy as np, pandas as pd, pytz from dateutil.relativedelta import relativedelta # Call for raw data (NASDAQ) def nsdq_data(ticker): try: today = dt.datetime.now(pytz.timezone('US/Eastern')).date() past = today - relativedelta(years= 5) price = current_price(ticker.upper()) new_data = {"date" : today.strftime("%m/%d/%Y"), "close" : price} headers = {'user-agent' : "-"} url = "https://api.nasdaq.com/api" post = f"/quote/{ticker.upper()}/historical" params = { "assetclass" : "stocks", "fromdate" : past, "limit" : '100000', } r = requests.get(url + post, headers=headers, params=params).json() # data cleaning and formatting # Remove unnecessary data and reverse order data = pd.DataFrame(r["data"]["tradesTable"]["rows"][::-1]) data[['close']] = data[['close']].replace('\$|,', '', regex=True).astype(float) # Convert 'close' to float type data = data.append(new_data, ignore_index=True) # Append latest data (aproaching closing time) # Calculate and add ema3, ema10, and slope to data ema3 = data['close'].ewm(span=3, adjust=False).mean() ema10 = data['close'].ewm(span=10, adjust=False).mean() slope= np.gradient(data['close']) data['ema3'] = ema3 data['ema10'] = ema10 data['slope'] = slope return data except Exception as e: print("NSDQ Data Error: ", e) pass # Call for current price def current_price(ticker): try: url = f"https://api.nasdaq.com/api/quote/{ticker}/info?assetclass=stocks" headers = {'user-agent' : "-"} r = requests.get(url, headers=headers).json()['data'] return round(float(r['primaryData']['lastSalePrice'].strip('$')), 2) except Exception as e: print("Current Price Error:", e) pass # Call for order def order(ticker, qty, order, api): try: side = "buy" if order else "sell" url = "https://paper-api.alpaca.markets" post = "/v2/orders" headers = { "APCA-API-KEY-ID" : api.alpaca_api, "APCA-API-SECRET-KEY" : api.alpaca_secret, } params = { "symbol" : ticker.upper(), "qty" : str(qty), "side" : side, "type" : "market", "time_in_force" : "day" } r = requests.post(url + post, headers=headers, json=params) print("Status Code:", r.status_code) except Exception as e: print("Order Error:", e) pass # Call to list bought stocks def stock_list(api): try: url = "https://paper-api.alpaca.markets" post = "/v2/positions" headers = { "APCA-API-KEY-ID" : api.alpaca_api, "APCA-API-SECRET-KEY" : api.alpaca_secret, } r = requests.get(url + post, headers=headers).json() return r except Exception as e: print("Stock List Error:", e) pass # Call for stock quantity bought def qty(ticker, api): try: url = "https://paper-api.alpaca.markets" post = "/v2/positions/" + ticker.upper() headers = { "APCA-API-KEY-ID" : api.alpaca_api, "APCA-API-SECRET-KEY" : api.alpaca_secret, } r = requests.get(url + post, headers=headers) return r.json()["qty"] if(r.status_code == 200) else None except Exception as e: print("Quantity Error:", e) pass # Call for buying power def money(api): try: url = "https://paper-api.alpaca.markets" post = "/v2/account" headers = { "APCA-API-KEY-ID" : api.alpaca_api, "APCA-API-SECRET-KEY" : api.alpaca_secret, } r = requests.get(url + post, headers=headers).json()["buying_power"] money = round(float(r), 2) return money except Exception as e: print("Buying Power Error:", e) pass # Call for calendar (check if holiday) def calendar(date, api): try: url = "https://paper-api.alpaca.markets" post = f"/v2/calendar" headers = { "APCA-API-KEY-ID" : api.alpaca_api, "APCA-API-SECRET-KEY" : api.alpaca_secret, } params = { "start" : date, "end" : date, } r = requests.get(url + post, headers=headers, params=params).json() d = r[0]["date"] return d except Exception as e: print("Calendar Error:", e) pass # Call for open/close time (params: "Open" or "Clos" only, case senstive and no 'e' for "Clos") def market_hour(market_time): try: url = "https://api.nasdaq.com/api/market-info" headers = {'user-agent' : "-"} r = requests.get(url, headers=headers).json()['data'] hour = dt.datetime.strptime(r[f'market{market_time}ingTime'].strip(' ET'),"%b %d, %Y %I:%M %p") return hour except Exception as e: print("Market time Error:", e) pass # Call for next open time def next_open_time(api): try: url = "https://paper-api.alpaca.markets" post = f"/v2/clock" headers = { "APCA-API-KEY-ID" : api.alpaca_api, "APCA-API-SECRET-KEY" : api.alpaca_secret, } r = requests.get(url + post, headers=headers).json() next_open = dt.datetime.strptime(r['next_open'][:-6],"%Y-%m-%dT%H:%M:%S") return next_open except Exception as e: print("Next open time Error:", e) pass
34.107784
119
0.548631
703
5,696
4.399716
0.243243
0.052376
0.049467
0.052376
0.455545
0.335597
0.320724
0.320724
0.309408
0.309408
0
0.009059
0.302317
5,696
167
120
34.107784
0.76925
0.086728
0
0.443662
0
0
0.217618
0.011565
0
0
0
0
0
1
0.06338
false
0.06338
0.014085
0
0.133803
0.070423
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
1
0
0
0
0
0
1
79540db7343cd37c04169f2c2a9534f0c0ea7d5c
1,187
py
Python
code/math_examples.py
rustam-fork/ml-course-uz
e1554d4c69bf0e421aa596d77aab65639df1ff73
[ "MIT" ]
21
2018-01-05T09:24:49.000Z
2021-04-24T03:25:25.000Z
code/math_examples.py
rustam-fork/ml-course-uz
e1554d4c69bf0e421aa596d77aab65639df1ff73
[ "MIT" ]
1
2019-11-11T18:34:53.000Z
2019-11-13T15:56:10.000Z
code/math_examples.py
rustam-fork/ml-course-uz
e1554d4c69bf0e421aa596d77aab65639df1ff73
[ "MIT" ]
13
2018-01-05T10:26:47.000Z
2022-01-25T07:48:33.000Z
import numpy as np import matplotlib.pyplot as plt from mpl_toolkits.mplot3d import Axes3D from matplotlib import cm def draw_parabola(steps=50): x = np.linspace(-4, 4, steps) plt.plot(x, x ** 2) plt.axvline(x=0, color='b', linestyle='dashed') def draw_paraboloid(steps=50): fig = plt.figure(figsize=(10, 10)) ax = fig.gca(projection='3d') x = np.linspace(-1, 1, steps) y = np.linspace(-1, 1, steps) X, Y = np.meshgrid(x, y) Z = X ** 2 + Y ** 2 ax.plot_surface(X, Y, Z, cmap=cm.coolwarm) def draw_mishra_bird(): fig = plt.figure(figsize=(14, 10)) x = np.arange(-10, 1, 0.1) y = np.arange(-6, 0.5, 0.1) X, Y = np.meshgrid(x, y) ax = plt.gca(projection='3d') Z = np.sin(Y) * np.exp((1 - np.cos(X)) ** 2) + np.cos(X) * np.cos(X) * np.exp((1 - np.sin(Y)) ** 2) + (X - Y) ** 2 ax.plot_surface(X, Y, Z, cmap=cm.coolwarm) ax.view_init(20, -60) def draw_hyperbolic_paraboloid(): fig = plt.figure(figsize=(10, 10)) ax = fig.gca(projection='3d') x = np.linspace(-1, 1, 50) y = np.linspace(-1, 1, 50) X, Y = np.meshgrid(x, y) Z = X ** 2 - Y ** 2 ax.plot_surface(X, Y, Z, cmap=cm.coolwarm)
27.604651
118
0.57877
215
1,187
3.144186
0.274419
0.029586
0.022189
0.071006
0.43787
0.378698
0.357988
0.357988
0.357988
0.357988
0
0.065288
0.225779
1,187
43
119
27.604651
0.670294
0
0
0.30303
0
0
0.010943
0
0
0
0
0
0
1
0.121212
false
0
0.121212
0
0.242424
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
79549336a4241631b02e785750cc140cfc8710c5
15,643
py
Python
crawler/crawler_2.py
marxlee/py-tools
4c3699b2a5dd5cb4477a4e339b8f91161cbe3bef
[ "Apache-2.0" ]
null
null
null
crawler/crawler_2.py
marxlee/py-tools
4c3699b2a5dd5cb4477a4e339b8f91161cbe3bef
[ "Apache-2.0" ]
null
null
null
crawler/crawler_2.py
marxlee/py-tools
4c3699b2a5dd5cb4477a4e339b8f91161cbe3bef
[ "Apache-2.0" ]
null
null
null
from urllib import request import random import json # 摸你请求头 url = r'https://www.baidu.com/s?cl=3&tn=baidutop10&fr=top1000&wd=%E7%9F%B3%E7%94%B0%E7%BA%AF%E4%B8%80%E6%84%9F%E6%9F%93%E6%96%B0%E5%86%A0&rsv_idx=2&rsv_dl=fyb_n_homepage&hisfilter=1' # 代理列表 agent_list = [ 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.163 Safari/537.36', 'Mozilla/5.0 (iPhone; CPU iPhone OS 13_2_3 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/13.0.3 Mobile/15E148 Safari/604.1', ] #头信息 headers = { 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9', # 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.163 Safari/537.36' } # 随机代理 agent = random.choice(agent_list) headers['User-Agent'] = agent # 方法体 def print_url(url, header): # 设置超时时间处理 time_out = 1 req_str = request.Request(url=url, headers=header) try: resp = request.urlopen(req_str, timeout=time_out) data = resp.read().decode() print(data) except: print("超时") finally: request.urlcleanup() def print_url_http(url, header): ''' GET POST PUT DELETE UPDATE HEAD OPTIONS ''' json.loads() pass def get_json_val(): str = data_json_str() jd = json.loads(str) print(jd['sodar_query_id']) # json = data_json() # print(json['sodar_query_id']) def data_json(): data = {"sodar_query_id":"YcqaXvPrIMSW2QTPjZeQAQ","injector_basename":"sodar2","bg_hash_basename":"r_kJ4x66L0q9ptqPN1EZdQZJVGt7LCWecB4z-4tOz0Y","bg_binary":"ALzbBj814lyYEaftZLAVu8KNpcS+Et40flMgUba+katdDRF9kHyC5ekeOn+SnF/oOv/75lAHEFYOblxjV5F4SQhJh/HX5oNaB6yQEscwY+2xY7zf1AOQAdXlwstcQsfcf91ydo9bJs3/nAnh41iqmA3KkV9TfstrgriG5sc8NSoUWQywuHf7ZDZeun3Y92u01kXYPGO8rYRMrwOmuOuo1G4VKz01yCxYiTBspPgxnf7FUa45yXGKR151XIRz4IxwZBgy/9IfJW7j0hUjlY/0miYrdQDTKGXvXdhU+YZvQF9FqLDIrYhg5FTB7SlWwIxZrImc8w8pALEU2idJLMue130yPHz7GfnNs6cIoIb8v+Y5v78QUCPflrJP6GxBEej+a3Fmb2hm7pk2iK4hbMb3guNpMSIou8PIP4nd5KQrpDzuG/WOiaSZIuMfkYYifAhSdi6nam3SMto07vPYW4L1XOy4QCvmkbrMwE8A8FLNrC6IzhIPi3cURKXSE6sI/UFoo8jBYaD/961bsfjDRip/stsq5XCf+P2EhgLW9Yl95ddjtReaObOpV5Di5pMhexp0DaCjfmXZyOrZ+LA3UYcOarlSsAIEJZ85HTn7EiJl+DVPSXPmQSy8LAywMyAVuPtKwanswYNiqlYtayDAlPJI26Om2TOeZzO0lRASIyxK6zkms+YajVYJ1z2wNvnv81D1PzH5N9YbWjImivcqNOHZxF/88olXY6oHG+zBqOVTOLyFahFjD7ftMXKFncA9mnEKC/UNXEkdClNu8B63x/aUHyb4u398Eru3PAupW6gnasf404viputMyvkrGgr7AhTRVJNK4Zt5GoQ8znxJCJZ0TRrGH4XgKFIkcgYopx4fmYGc5hP4q4mqFDouvH/Q0NGjx2YpICYE5CSfG1iIV76XO6nTrZ7Fn4zfE+mkgmm7LU/yAGXu2mjeTL0K2nEyOtgcuxq5POsRRtyN3BpNFRZDG06NxTEVZPbbRnm6aEaL4dntcmYsrLu2bFw2nMywczkpyV3ld+jeItdjeLaeRMjEqxhfR21xsMg3AenilDzpPaYlBCosMK3h/MA1nCwLxGENmjHp4lFYPHJohRnMj2Bbs4ROeG7uZoVg/NTmNiagecZC3+xy7+e+hNSS1Dmdq/lSpYLwJPsgrRRutCBRY/Ie2rfToKEt5juHeg9ExyWA8QJpHOPmIwgvoTXlTjWnQoObJuvlwVlJiT3fFDhmox/tAtiy4HzzQeIXekN8mZu1Lee6qlJ0HFE5jP6FVfDZsdn1VPKe8l01YpktU107evEA8rzrdoTnpPAj+d0IRwTh0HylyKHuulw6RD1MOJxPHTY06aGf5IRjpsz+YOKLR/+UPGiTZq4fc12OXYI/rHZTEfcSQu+lkh2zi2q8NAcRBrexYG6WN9UQ7+q5bPxAOEKxtB265eA1JQVd13LIPlBEJEbNCcvBiQiAzA2wDEqR793VpC0EuCDXuCuHwYGuF23YaKqhOaapZS9xVT8aDwKpdo005BdGvyu5Bux2q23npsv3xDE++5F/ny3z57M1cbpfLJQ4YzMVFyNisvqR5rdY71Ms2mTXy/DyoS022LI21D1RMsc16qKD7oCm00M/ggQVC1X7tJDwl0oe/3iisPHUJRiI79FkGbazm9AbQQKUH2LnMPjZ6GEMLkVpQGhglE/yYwVVpsP/PRdK1Cdftg7OADzPty8G1Q5uFyvdmWmIuR5nbW9bebKvhYFCJZHm2DcWgu8tN5NG5/5lrGpqxoNqxaxPwzAocDdU0xwMajHidsg0nkMruMNd997EUOEIdHPvZZFbBG+4ZDZgaYLGRuxGF2lOYNNxMG7qZfoXV5Vw4h/G0Iy7hy6DXRnZCQWOXuGM6wGqwdG3yy085+gqnOyEclnbgsaVo7Ohz4P1u34rFRoSd+yoHs5Cy4iqCBZtu1o71jKxP+/yVbb+UGMNOOnSnrTO1Qs6MHYnQ+7yrN1AVKKwaNFFtsVKp4dW5vv0+6CJ1TmiEuVekSTR6pQ7FYjjvdAXwob0OZDFoxXY7kAFxrIuHXqgzJ0cG1DjxFJtV1JGCAU2vPtS6iYoNbpQX2GRMQx31yWVG4CO0IYJWjraUwvswrtIFbxkJMP2H8GF1AaV4gLV10ZbNsX8V1m0SwPsburH/3ECRLu3IpU6VLdP53WrtBxF4cidDtgaBin9NuQp0bP9wC3TIR0nZ2OD5yDRPw//pGAzZqIMLhvB2AbrLt4qCFvOWKDxJ39Thy9HOyqJh2DEZ/oWUr496RdSvmYqH5yn/pmYFN+gAqgB33wIsbYJxQtGfT2NsIS8yVka1031cP0azO43smM9dXbkU6HVaxOS5Y1U5PR9pjxAilePqS+PUVOIegsGpLfR4rfjXFQt72kpCTNKG+y8/XWH6Brb2THTzGEF1UrNUZfc6+jJ5fflgGAOuECRgzJwr9x0bToMdomF5vrbaLcGbX+Rqw7+ob7GQ5/E9UmFaAOOeDIGd0eX0hwLP1ZEKnkW+4LHFY5h1L51tUIZVPFnsJ1dxEeGXU7zp2SIJ8nbdcXO3WP6o9Q38Hrrw6udiFNZT9lhKujoBYgUZ/d0EDZCS0JuB/vR4u9uHKic0PBVeZpiUtjlaPJbrdHJK5J+JycwsifHqXKeMUDPOkNdPptuif8vsrXnpTgIqVEXFwYI1SCXr/0/hWhm3kz8ZVMPoPyPSehNFvD5/heLy4BCxaW60SjKfDMWiyliTQRDFsnFJZ+CguIE9tYjkwkdtv6yRQI70ltEWhYEHsX0+uZdixmo3wMPT7xjT6wL7891UFDJIFy8WtwTj5VzdN5nSgwlh+yGF9Djn9ihLSN5EebavuLDiJYNlvVOA2mMKSdeB8jvFcwyH5Q8opwQZUWrdrahdkTRK98S3HoGlyMx2u5x+YUgNxrKUJZxfbI/53aDuS2BV2LY2jtVnXQohEll0afDuVvmWNfJ8SQ2tHwX/YWuYYFKUg05ZF8yfxBdn9oezJMLorAa4wyomHtoowUL2j1ITOYZaG46V+sC6Uwf1T9VCDA3Dyugwz34e+NErKouptm99HeY22BzpTvUutUGo4/0m5Wt5CvbX1fEBeTWMb6BZ4sdP/PxJpR+vxBIFStciwLqBYIlVF/TKzKK0OR4gZp/QF4Z2GZPQUSQ4ZMQST3zhcMIsxNnzThwhDQifjvlTBhfM5bNtV6mNtPzQ9UbY5Qk6/88YFt5jJPaVhnfnaZtC9D7WlO3aNSIJ8QmNhg3J3dp6BiCjKMzBjCkXmlOcWGjTO5oQ1p2HKUubHNxQDpmmLthX8n15qLusnaQUeKSf+vFxcneT4DicqBNpECnPSfbwcIZqbDpwGLjNsRNebJwEI2xdbX+MBOPVQ303ptQHEMychPD+tbi7SCTIgJcHfAfRYAW5/AxbzelIwrk/6PC+a60CSW3OOLuOAoP5CLpeg+zRWW6CL5k9DdFDf4ve2vGu+k9V+2JagU56Ea8YCHOQ5VIzqkF3jIh6LkhCYmCyjFBGQLz4Cvu5OGI7TLC9v5/LQhshoqrEcc/JexcJzbx1i/l7In6HW5Zp1BpJvtruexwzKsbZKclmaG4HzPEGUKHgzwDDkMTFYSU2qPpncqPw6NtBp8og4n1KjyAXpfYecFU5tQVDyeUc7tMUgV0BE/WsXoheOKx7Cvo3bRuySPhSih+PBGp6FzP/S/rLxPOmZ/Lcf2F0IXXtR0Cj4gHXhigNou+PrhTgmeW1ayRnYYJ8Ps5JCP5nW5i2EAlH5SvcyAaoXIb2T3l1z7TmEEVLMRC3k5d+fqxB1AEIYZLvLMoCO2tFBh6L7u3Vyh/k0SchaqKKI9U/JVG/l4QwFqpZ8E+C/p15UVgwMwHaAFBKULWncbwNiSk0R2H46n5Ol7+2kv2yfkFvdYrf7VsKD76/6JOCQydMM2BKmL1NL91N+Yd0hmaYBrrFIxVzxkjP8VULgCRwylKpsTBdYp0nvfVeWU+vq1CXy2hhOxzWMVRmMAE9FO6Fux0fprVdrkxDgLk50mhP7Eq8kfnzpXc3ItSgAddB1JCvUdYzhnsQh+F/viDl5iub0LIeF+Kp+HyemXDTkf9OVM1DGwp3CxgNIam2Z1/UxTVC76H8cKhjeo8yOhzoVF0p46N/o2eOmhB55ZcWKvFESKuRMbV+MjcSAhWE+76v8VgxrfwoIfhg2YlwLfMTiapbfMZ5tSh5rutxOuReIAbh8Mo/IYBesQQ2SybvA2GFg7Mcfe2rC+LEIhwXkm4GZkFahH9UWw4m1VUBmty2V9GcIUwp1/vUNfBCvDA8zyM7+r6P1SHjU4DkKVa0qIqF7AEwqASIbg2gjDMuxHyZ+c1izFQLu/8Nf3WFZUNcpMy92jd+wjICK0HzTKJYUVmraEPAQ96bvuibSo9COX9jAhC0xiG6AXurIm+bExk7Bq49uzkDe2AuK8xc3/ygHsr1pqCP/W99SKv2pds52hZb+ezghamFhznJ67EZIWawes9YJ1khIX6i2/N5qTvgFjv4C7d5IQVuMJgY9On9IbwuLJXnr8Shmy7vcc57b2irRiuKmDW4Vc4SBpRwW7wgvjpeuTwvsZyQgDrWFpKvY8PgrOK9MkXdnLPg3kkgFZF7CVHsogJZa3CVoA9uS4D7RT5hm9gsdVkxMkop+//w5bg1+fm/hrGD8wSmYNzLvld6IJOZxQWhE5JPe+WNzC5zEITxZGomzdKYDHRqp+0tQF8xVyHyZPuWPSgqAE/e5jyJ5m/sBa5Vl5oyKxajcv+gKZJhPiOfMLvgX7/+I8mFVccLz4kljK0KUhIScmYQBjWpAlN8JE2yzh2KmEhiTGqNsA9D9MbsRxZ3O3v9GauT2TYcH/EQCLvqftFn05a4Asz/car34eE7UcMcYvUvn0FYiIpHWmxHXAVCxZQ7+u4XQr/ulMxjKgOOeVFBfYcYl5uBc+U/UWM2nimDDF8q3Ugyybv6lTTke31qSGAqYvZLfHCV2CGK/Z2a83Fq6QOROsSdL1pntMU2jNLt6hC3XXzzeATmGTWPxuJXikRvueMc097kOn6G0NyU0qK4HDvymMcPhlibsSiBIPnoUzv6Had7ED6A7ccKy8hzk9ZZx0BGMoZjnAlpJJGK7HC57yTzsg05tX7NRcP5r9MNN/uBF9nJzY5ggZaQIETXUhfoxCfwY/Ce6nP0iHFHdPlsCbydHefp1dgyjPzQMvI6l9OG9n3OSLh9+rKmYQMyz1pi4aHcvt8CzqYhRKlPQEP1xNchQ0IXBhrm2Mi7SER0nimnz07nF1Ki9mPGk757hCsQz+xGwOj7oz1YeCtFT7vISs/kX9zeOtcpnfUlS0roQkwz1tQU2aTsZ5A42vyFRKRE0rv1KASXsiDNZd0/jkhmcneYQxD3L0ttYjsUg2BP/clXNyVWEoTsPs17xtZb+zZ0bAo29G0CEmFlx9n7PewUJOEqzv0s/W9jP0iIBNEsQ9mWQr6Brar3wQRrfjLk6ip8HUNh+YhhSjW0eSA9NsgQE6GaPKaGe03dNQOk8Yu5O1WrNOP+/Wjn2vWTb8TMbusjEgGG7BjGM5YlchUSurpXob/EPZAaR9gbMPt4CtHKUhB87t256CPGqoYxAVNcEhglUOM/p9hEjwkKZ3dB0AOqKswNtb+Nja9vgMFFCte6dOTXDRuHlyKL6IenAIo+5JBYX15WlGhCHiiWXQpbJoFbjeie3fxjGDjRzr8us5tvKUHXQJQCVW6SlKk1uFImLIdngwkXUpv2hypJX8KRtf4uLPu3+x50HIS5g38o9wdVgPjcPxAIEB3fcyEl0IWAx1eUm1LU8h11yx+gzQ/snBaV2vt1VEvLtNtPFZVYvIDuSpsWY8bv8owdZd4wHB1lJZgAp9bBiSTGGEJMlCOuu4lQDOL/Aj3XMW8SSg5zTZblxdxayss3hIkrtoct1YVxe0itQSpG/OR+m3ZNOLr43J2gFN3MagHZwPuGBZC0kW+7nyZM7Sp7FZA/1+A08ddSL3luh/dCaPTVtk6tY1q1t9JH6dcsl77+Kh4nslE0YRA0qQQQIsqz75n7Bu05aFw+g6oYBgqAs4p0uVoWSKtTtfucPHy8gwCn8lh8jeIpk0mWS64OXXPWqyPptuCOZvJPemmP5uYB9MWLrf1QZmZMWgVZHuMmQXXobMTjGz+Dsw/eEVP+nVL8ftDDxwEDT0XpUckl0v3Qt3Np44jFKNLIcm6CIobyN0QQuouOZEmAVVXcJP6NYclNMd3zdKoVVGzFZS0GqX1Qmw+U4rlS0Knl9p2vDtP/HMWcCtnTNP9KZjRF6sJr2Vu+/4oi4f0JwvbUrHdkcED64VFA53ZxvqAKIPE1ebZjFq6SH6BXXl+CkWGqBUAe4HGh+u1QEKNPGA4ETZV4GNTOKbCP98CEmzf7Vo2nxTZ+0F34OUgMtQgrLTYcy0yZLB/Dk7nCgFO3zRLsNZUpX+KQRkSZ/aqiXJpwDRDh4aL2e40ENPHVI5nbWvuQaT44TG8WMIL60jr5WKgj921RMDAeCWipSP6LLtCHwZrTc2UiJugF/AC2WgY4L3/T0MTIK2"} return data def data_json_str(): data = {"sodar_query_id": "YcqaXvPrIMSW2QTPjZeQAQ", "injector_basename": "sodar2", "bg_hash_basename": "r_kJ4x66L0q9ptqPN1EZdQZJVGt7LCWecB4z-4tOz0Y", "bg_binary": "ALzbBj814lyYEaftZLAVu8KNpcS+Et40flMgUba+katdDRF9kHyC5ekeOn+SnF/oOv/75lAHEFYOblxjV5F4SQhJh/HX5oNaB6yQEscwY+2xY7zf1AOQAdXlwstcQsfcf91ydo9bJs3/nAnh41iqmA3KkV9TfstrgriG5sc8NSoUWQywuHf7ZDZeun3Y92u01kXYPGO8rYRMrwOmuOuo1G4VKz01yCxYiTBspPgxnf7FUa45yXGKR151XIRz4IxwZBgy/9IfJW7j0hUjlY/0miYrdQDTKGXvXdhU+YZvQF9FqLDIrYhg5FTB7SlWwIxZrImc8w8pALEU2idJLMue130yPHz7GfnNs6cIoIb8v+Y5v78QUCPflrJP6GxBEej+a3Fmb2hm7pk2iK4hbMb3guNpMSIou8PIP4nd5KQrpDzuG/WOiaSZIuMfkYYifAhSdi6nam3SMto07vPYW4L1XOy4QCvmkbrMwE8A8FLNrC6IzhIPi3cURKXSE6sI/UFoo8jBYaD/961bsfjDRip/stsq5XCf+P2EhgLW9Yl95ddjtReaObOpV5Di5pMhexp0DaCjfmXZyOrZ+LA3UYcOarlSsAIEJZ85HTn7EiJl+DVPSXPmQSy8LAywMyAVuPtKwanswYNiqlYtayDAlPJI26Om2TOeZzO0lRASIyxK6zkms+YajVYJ1z2wNvnv81D1PzH5N9YbWjImivcqNOHZxF/88olXY6oHG+zBqOVTOLyFahFjD7ftMXKFncA9mnEKC/UNXEkdClNu8B63x/aUHyb4u398Eru3PAupW6gnasf404viputMyvkrGgr7AhTRVJNK4Zt5GoQ8znxJCJZ0TRrGH4XgKFIkcgYopx4fmYGc5hP4q4mqFDouvH/Q0NGjx2YpICYE5CSfG1iIV76XO6nTrZ7Fn4zfE+mkgmm7LU/yAGXu2mjeTL0K2nEyOtgcuxq5POsRRtyN3BpNFRZDG06NxTEVZPbbRnm6aEaL4dntcmYsrLu2bFw2nMywczkpyV3ld+jeItdjeLaeRMjEqxhfR21xsMg3AenilDzpPaYlBCosMK3h/MA1nCwLxGENmjHp4lFYPHJohRnMj2Bbs4ROeG7uZoVg/NTmNiagecZC3+xy7+e+hNSS1Dmdq/lSpYLwJPsgrRRutCBRY/Ie2rfToKEt5juHeg9ExyWA8QJpHOPmIwgvoTXlTjWnQoObJuvlwVlJiT3fFDhmox/tAtiy4HzzQeIXekN8mZu1Lee6qlJ0HFE5jP6FVfDZsdn1VPKe8l01YpktU107evEA8rzrdoTnpPAj+d0IRwTh0HylyKHuulw6RD1MOJxPHTY06aGf5IRjpsz+YOKLR/+UPGiTZq4fc12OXYI/rHZTEfcSQu+lkh2zi2q8NAcRBrexYG6WN9UQ7+q5bPxAOEKxtB265eA1JQVd13LIPlBEJEbNCcvBiQiAzA2wDEqR793VpC0EuCDXuCuHwYGuF23YaKqhOaapZS9xVT8aDwKpdo005BdGvyu5Bux2q23npsv3xDE++5F/ny3z57M1cbpfLJQ4YzMVFyNisvqR5rdY71Ms2mTXy/DyoS022LI21D1RMsc16qKD7oCm00M/ggQVC1X7tJDwl0oe/3iisPHUJRiI79FkGbazm9AbQQKUH2LnMPjZ6GEMLkVpQGhglE/yYwVVpsP/PRdK1Cdftg7OADzPty8G1Q5uFyvdmWmIuR5nbW9bebKvhYFCJZHm2DcWgu8tN5NG5/5lrGpqxoNqxaxPwzAocDdU0xwMajHidsg0nkMruMNd997EUOEIdHPvZZFbBG+4ZDZgaYLGRuxGF2lOYNNxMG7qZfoXV5Vw4h/G0Iy7hy6DXRnZCQWOXuGM6wGqwdG3yy085+gqnOyEclnbgsaVo7Ohz4P1u34rFRoSd+yoHs5Cy4iqCBZtu1o71jKxP+/yVbb+UGMNOOnSnrTO1Qs6MHYnQ+7yrN1AVKKwaNFFtsVKp4dW5vv0+6CJ1TmiEuVekSTR6pQ7FYjjvdAXwob0OZDFoxXY7kAFxrIuHXqgzJ0cG1DjxFJtV1JGCAU2vPtS6iYoNbpQX2GRMQx31yWVG4CO0IYJWjraUwvswrtIFbxkJMP2H8GF1AaV4gLV10ZbNsX8V1m0SwPsburH/3ECRLu3IpU6VLdP53WrtBxF4cidDtgaBin9NuQp0bP9wC3TIR0nZ2OD5yDRPw//pGAzZqIMLhvB2AbrLt4qCFvOWKDxJ39Thy9HOyqJh2DEZ/oWUr496RdSvmYqH5yn/pmYFN+gAqgB33wIsbYJxQtGfT2NsIS8yVka1031cP0azO43smM9dXbkU6HVaxOS5Y1U5PR9pjxAilePqS+PUVOIegsGpLfR4rfjXFQt72kpCTNKG+y8/XWH6Brb2THTzGEF1UrNUZfc6+jJ5fflgGAOuECRgzJwr9x0bToMdomF5vrbaLcGbX+Rqw7+ob7GQ5/E9UmFaAOOeDIGd0eX0hwLP1ZEKnkW+4LHFY5h1L51tUIZVPFnsJ1dxEeGXU7zp2SIJ8nbdcXO3WP6o9Q38Hrrw6udiFNZT9lhKujoBYgUZ/d0EDZCS0JuB/vR4u9uHKic0PBVeZpiUtjlaPJbrdHJK5J+JycwsifHqXKeMUDPOkNdPptuif8vsrXnpTgIqVEXFwYI1SCXr/0/hWhm3kz8ZVMPoPyPSehNFvD5/heLy4BCxaW60SjKfDMWiyliTQRDFsnFJZ+CguIE9tYjkwkdtv6yRQI70ltEWhYEHsX0+uZdixmo3wMPT7xjT6wL7891UFDJIFy8WtwTj5VzdN5nSgwlh+yGF9Djn9ihLSN5EebavuLDiJYNlvVOA2mMKSdeB8jvFcwyH5Q8opwQZUWrdrahdkTRK98S3HoGlyMx2u5x+YUgNxrKUJZxfbI/53aDuS2BV2LY2jtVnXQohEll0afDuVvmWNfJ8SQ2tHwX/YWuYYFKUg05ZF8yfxBdn9oezJMLorAa4wyomHtoowUL2j1ITOYZaG46V+sC6Uwf1T9VCDA3Dyugwz34e+NErKouptm99HeY22BzpTvUutUGo4/0m5Wt5CvbX1fEBeTWMb6BZ4sdP/PxJpR+vxBIFStciwLqBYIlVF/TKzKK0OR4gZp/QF4Z2GZPQUSQ4ZMQST3zhcMIsxNnzThwhDQifjvlTBhfM5bNtV6mNtPzQ9UbY5Qk6/88YFt5jJPaVhnfnaZtC9D7WlO3aNSIJ8QmNhg3J3dp6BiCjKMzBjCkXmlOcWGjTO5oQ1p2HKUubHNxQDpmmLthX8n15qLusnaQUeKSf+vFxcneT4DicqBNpECnPSfbwcIZqbDpwGLjNsRNebJwEI2xdbX+MBOPVQ303ptQHEMychPD+tbi7SCTIgJcHfAfRYAW5/AxbzelIwrk/6PC+a60CSW3OOLuOAoP5CLpeg+zRWW6CL5k9DdFDf4ve2vGu+k9V+2JagU56Ea8YCHOQ5VIzqkF3jIh6LkhCYmCyjFBGQLz4Cvu5OGI7TLC9v5/LQhshoqrEcc/JexcJzbx1i/l7In6HW5Zp1BpJvtruexwzKsbZKclmaG4HzPEGUKHgzwDDkMTFYSU2qPpncqPw6NtBp8og4n1KjyAXpfYecFU5tQVDyeUc7tMUgV0BE/WsXoheOKx7Cvo3bRuySPhSih+PBGp6FzP/S/rLxPOmZ/Lcf2F0IXXtR0Cj4gHXhigNou+PrhTgmeW1ayRnYYJ8Ps5JCP5nW5i2EAlH5SvcyAaoXIb2T3l1z7TmEEVLMRC3k5d+fqxB1AEIYZLvLMoCO2tFBh6L7u3Vyh/k0SchaqKKI9U/JVG/l4QwFqpZ8E+C/p15UVgwMwHaAFBKULWncbwNiSk0R2H46n5Ol7+2kv2yfkFvdYrf7VsKD76/6JOCQydMM2BKmL1NL91N+Yd0hmaYBrrFIxVzxkjP8VULgCRwylKpsTBdYp0nvfVeWU+vq1CXy2hhOxzWMVRmMAE9FO6Fux0fprVdrkxDgLk50mhP7Eq8kfnzpXc3ItSgAddB1JCvUdYzhnsQh+F/viDl5iub0LIeF+Kp+HyemXDTkf9OVM1DGwp3CxgNIam2Z1/UxTVC76H8cKhjeo8yOhzoVF0p46N/o2eOmhB55ZcWKvFESKuRMbV+MjcSAhWE+76v8VgxrfwoIfhg2YlwLfMTiapbfMZ5tSh5rutxOuReIAbh8Mo/IYBesQQ2SybvA2GFg7Mcfe2rC+LEIhwXkm4GZkFahH9UWw4m1VUBmty2V9GcIUwp1/vUNfBCvDA8zyM7+r6P1SHjU4DkKVa0qIqF7AEwqASIbg2gjDMuxHyZ+c1izFQLu/8Nf3WFZUNcpMy92jd+wjICK0HzTKJYUVmraEPAQ96bvuibSo9COX9jAhC0xiG6AXurIm+bExk7Bq49uzkDe2AuK8xc3/ygHsr1pqCP/W99SKv2pds52hZb+ezghamFhznJ67EZIWawes9YJ1khIX6i2/N5qTvgFjv4C7d5IQVuMJgY9On9IbwuLJXnr8Shmy7vcc57b2irRiuKmDW4Vc4SBpRwW7wgvjpeuTwvsZyQgDrWFpKvY8PgrOK9MkXdnLPg3kkgFZF7CVHsogJZa3CVoA9uS4D7RT5hm9gsdVkxMkop+//w5bg1+fm/hrGD8wSmYNzLvld6IJOZxQWhE5JPe+WNzC5zEITxZGomzdKYDHRqp+0tQF8xVyHyZPuWPSgqAE/e5jyJ5m/sBa5Vl5oyKxajcv+gKZJhPiOfMLvgX7/+I8mFVccLz4kljK0KUhIScmYQBjWpAlN8JE2yzh2KmEhiTGqNsA9D9MbsRxZ3O3v9GauT2TYcH/EQCLvqftFn05a4Asz/car34eE7UcMcYvUvn0FYiIpHWmxHXAVCxZQ7+u4XQr/ulMxjKgOOeVFBfYcYl5uBc+U/UWM2nimDDF8q3Ugyybv6lTTke31qSGAqYvZLfHCV2CGK/Z2a83Fq6QOROsSdL1pntMU2jNLt6hC3XXzzeATmGTWPxuJXikRvueMc097kOn6G0NyU0qK4HDvymMcPhlibsSiBIPnoUzv6Had7ED6A7ccKy8hzk9ZZx0BGMoZjnAlpJJGK7HC57yTzsg05tX7NRcP5r9MNN/uBF9nJzY5ggZaQIETXUhfoxCfwY/Ce6nP0iHFHdPlsCbydHefp1dgyjPzQMvI6l9OG9n3OSLh9+rKmYQMyz1pi4aHcvt8CzqYhRKlPQEP1xNchQ0IXBhrm2Mi7SER0nimnz07nF1Ki9mPGk757hCsQz+xGwOj7oz1YeCtFT7vISs/kX9zeOtcpnfUlS0roQkwz1tQU2aTsZ5A42vyFRKRE0rv1KASXsiDNZd0/jkhmcneYQxD3L0ttYjsUg2BP/clXNyVWEoTsPs17xtZb+zZ0bAo29G0CEmFlx9n7PewUJOEqzv0s/W9jP0iIBNEsQ9mWQr6Brar3wQRrfjLk6ip8HUNh+YhhSjW0eSA9NsgQE6GaPKaGe03dNQOk8Yu5O1WrNOP+/Wjn2vWTb8TMbusjEgGG7BjGM5YlchUSurpXob/EPZAaR9gbMPt4CtHKUhB87t256CPGqoYxAVNcEhglUOM/p9hEjwkKZ3dB0AOqKswNtb+Nja9vgMFFCte6dOTXDRuHlyKL6IenAIo+5JBYX15WlGhCHiiWXQpbJoFbjeie3fxjGDjRzr8us5tvKUHXQJQCVW6SlKk1uFImLIdngwkXUpv2hypJX8KRtf4uLPu3+x50HIS5g38o9wdVgPjcPxAIEB3fcyEl0IWAx1eUm1LU8h11yx+gzQ/snBaV2vt1VEvLtNtPFZVYvIDuSpsWY8bv8owdZd4wHB1lJZgAp9bBiSTGGEJMlCOuu4lQDOL/Aj3XMW8SSg5zTZblxdxayss3hIkrtoct1YVxe0itQSpG/OR+m3ZNOLr43J2gFN3MagHZwPuGBZC0kW+7nyZM7Sp7FZA/1+A08ddSL3luh/dCaPTVtk6tY1q1t9JH6dcsl77+Kh4nslE0YRA0qQQQIsqz75n7Bu05aFw+g6oYBgqAs4p0uVoWSKtTtfucPHy8gwCn8lh8jeIpk0mWS64OXXPWqyPptuCOZvJPemmP5uYB9MWLrf1QZmZMWgVZHuMmQXXobMTjGz+Dsw/eEVP+nVL8ftDDxwEDT0XpUckl0v3Qt3Np44jFKNLIcm6CIobyN0QQuouOZEmAVVXcJP6NYclNMd3zdKoVVGzFZS0GqX1Qmw+U4rlS0Knl9p2vDtP/HMWcCtnTNP9KZjRF6sJr2Vu+/4oi4f0JwvbUrHdkcED64VFA53ZxvqAKIPE1ebZjFq6SH6BXXl+CkWGqBUAe4HGh+u1QEKNPGA4ETZV4GNTOKbCP98CEmzf7Vo2nxTZ+0F34OUgMtQgrLTYcy0yZLB/Dk7nCgFO3zRLsNZUpX+KQRkSZ/aqiXJpwDRDh4aL2e40ENPHVI5nbWvuQaT44TG8WMIL60jr5WKgj921RMDAeCWipSP6LLtCHwZrTc2UiJugF/AC2WgY4L3/T0MTIK2"} data = json.dumps(data) return data # 读取本地 def load_location(): with open('../files/json.txt', 'rt') as f: text = f.read() print(text) print(type(text)) js = json.loads(text) print(js['sodar_query_id']) pass # 写入本地 def write_location(): with open('../files/json.txt', 'rt') as f: text = f.read() with open('../files/json1.txt', 'w') as f1: f1.write(text) if __name__ == '__main__': # print_url(url=url, header=headers) # load_location() write_location() pass
140.927928
6,714
0.915681
773
15,643
18.452781
0.463131
0.002804
0.004206
0.002524
0.925547
0.925547
0.925547
0.925547
0.925547
0.925547
0
0.144264
0.032666
15,643
111
6,715
140.927928
0.798374
0.020456
0
0.160714
0
0.107143
0.918402
0.874656
0
1
0
0
0
1
0.125
false
0.053571
0.053571
0
0.214286
0.142857
0
0
1
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
1
1
0
0
0
0
0
0
1
1
null
1
0
0
0
0
0
0
1
0
0
0
0
0
13
7954a7bbe8ccac9a9d76513832ed91b4c1c715ad
3,075
py
Python
tests/onegov/town6/test_views.py
politbuero-kampagnen/onegov-cloud
20148bf321b71f617b64376fe7249b2b9b9c4aa9
[ "MIT" ]
null
null
null
tests/onegov/town6/test_views.py
politbuero-kampagnen/onegov-cloud
20148bf321b71f617b64376fe7249b2b9b9c4aa9
[ "MIT" ]
null
null
null
tests/onegov/town6/test_views.py
politbuero-kampagnen/onegov-cloud
20148bf321b71f617b64376fe7249b2b9b9c4aa9
[ "MIT" ]
null
null
null
import onegov.core import onegov.org from tests.shared import utils def test_view_permissions(): utils.assert_explicit_permissions(onegov.org, onegov.org.OrgApp) def test_notfound(client): notfound_page = client.get('/foobar', expect_errors=True) assert "Seite nicht gefunden" in notfound_page assert notfound_page.status_code == 404 def test_links(client): root_url = client.get('/').pyquery('.side-navigation a').attr('href') client.login_admin() root_page = client.get(root_url) new_link = root_page.click("Verknüpfung") assert "Neue Verknüpfung" in new_link new_link.form['title'] = 'Google' new_link.form['url'] = 'https://www.google.ch' link = new_link.form.submit().follow() assert "Sie wurden nicht automatisch weitergeleitet" in link assert 'https://www.google.ch' in link client.get('/auth/logout') root_page = client.get(root_url) assert "Google" in root_page google = root_page.click("Google", index=0) assert google.status_code == 302 assert google.location == 'https://www.google.ch' def test_clipboard(client): client.login_admin() page = client.get('/topics/organisation') assert 'paste-link' not in page page = page.click( 'Kopieren', extra_environ={'HTTP_REFERER': page.request.url} ).follow() assert 'paste-link' in page page = page.click('Einf').form.submit().follow() assert '/organisation/organisation' in page.request.url def test_clipboard_separation(client): client.login_admin() page = client.get('/topics/organisation') page = page.click('Kopieren') assert 'paste-link' in client.get('/topics/organisation') # new client (browser) -> new clipboard client = client.spawn() client.login_admin() assert 'paste-link' not in client.get('/topics/organisation') def test_gobal_tools(client): links = client.get('/').pyquery('.globals a') assert links == [] client.login_admin() links = client.get('/').pyquery('.globals a') assert links != [] def test_top_navigation(client): links = client.get('/').pyquery('.side-navigation a span') assert links.text() == 'Organisation Themen Kontakt Aktuelles' def test_announcement(client): client.login_admin() color = '#006fbb' bg_color = '#008263' text = 'This is an announcement which appears on top of the page' settings = client.get('/header-settings') # test default not giving the color assert settings.form['left_header_announcement_bg_color'].value == ( '#FBBC05' ) assert settings.form['left_header_announcement_font_color'].value == ( '#000000' ) settings.form['left_header_announcement'] = text settings.form['left_header_announcement_bg_color'] = bg_color settings.form['left_header_announcement_font_color'] = color page = settings.form.submit().follow() assert text in page assert ( f'<div id="announcement" style="color: {color}; ' f'background-color: {bg_color};">' ) in page
27.212389
74
0.67935
392
3,075
5.170918
0.293367
0.057721
0.047361
0.054267
0.325111
0.23483
0.174642
0.091761
0.052294
0
0
0.009592
0.186341
3,075
112
75
27.455357
0.80056
0.023089
0
0.16
0
0
0.272909
0.061979
0
0
0
0
0.28
1
0.106667
false
0
0.04
0
0.146667
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
795502273dc48fdf684fe2e0b8c17dbaab75cc3f
8,530
pyw
Python
main.pyw
Niyco/Cipher-tool
a0689daf8e8a087571d447efe6e98c206364316f
[ "MIT" ]
null
null
null
main.pyw
Niyco/Cipher-tool
a0689daf8e8a087571d447efe6e98c206364316f
[ "MIT" ]
null
null
null
main.pyw
Niyco/Cipher-tool
a0689daf8e8a087571d447efe6e98c206364316f
[ "MIT" ]
null
null
null
import tkinter as tk from tkinter import filedialog from Solve_stages import * from Text_stages import * from Analysis_stages import * from Output import * root = tk.Tk() root.title("Cipher program") root.geometry("1500x500") root.state("zoomed") #apparently windows only def getOutputText(): text = "" for stage in stages: if stage.check_var.get(): if decode_var.get() == 1: #encode is selected text = stage.encode(text) else: #decode is selected text = stage.decode(text) return text def updateOutputText(): text = getOutputText() right_text.delete(1.0, tk.END) right_text.insert(tk.END,text) for stage in stages: if stage.check_var.get(): stage.updateOutputWidget(text, right_text) def updateStageEditor(): for child in stage_editor.winfo_children(): child.grid_forget() stages[selected_stage.get()].display() root.focus_set() stage_editor = tk.Frame(root, width=10, height=10)#Size is the same as right_text, they will expand equally to fill the space stage_editor.grid(row=0, column=0, rowspan=4, sticky="NESW") stage_editor.grid_propagate(0) #stops the contents of the window affecting the size stages = [] def addStage(stage): stages.append(stage) updateStagesFrame() stages[len(stages)-1].button.select() #select the newly added stage updateStageEditor() updateOutputText() selected_stage = tk.IntVar() stages_frame = tk.Frame(root) stages_frame.grid(row=0, column=1, sticky="NS", columnspan=3) #Radiobuttons to select between encode and decode decode_var = tk.IntVar() decodeBox = tk.Radiobutton(root, text="Decode", variable=decode_var,value=-1,command=updateOutputText) encodeBox = tk.Radiobutton(root, text="Encode", variable=decode_var,value=1,command=updateOutputText) decode_var.set(-1) #set to decode as default decodeBox.grid(row=1,column=1,columnspan=3) encodeBox.grid(row=2,column=1,columnspan=3) #Up, Delete, and Down buttons def stageUp(): if len(stages) > 1 and selected_stage.get() > 1: stages.insert(selected_stage.get()-1, stages.pop(selected_stage.get())) selected_stage.set(selected_stage.get()-1) updateStagesFrame() updateOutputText() def stageDown(): if len(stages) > 1 and selected_stage.get() < len(stages)-1 and selected_stage.get() != 0: stages.insert(selected_stage.get()+1, stages.pop(selected_stage.get())) selected_stage.set(selected_stage.get()+1) updateStagesFrame() updateOutputText() def deleteStage(): if len(stages) > 1 and selected_stage.get() != 0: stages.pop(selected_stage.get()) selected_stage.set(selected_stage.get()-1) updateStagesFrame() updateStageEditor() updateOutputText() stage_up_button = tk.Button(root, text = "↑",command=stageUp,takefocus=0) stage_delete_button = tk.Button(root, text = "×",command=deleteStage,takefocus=0) stage_down_button = tk.Button(root, text = "↓",command=stageDown,takefocus=0) stage_up_button.grid(row=3, column=1, sticky="ESW") stage_delete_button.grid(row=3,column=2, sticky="ESW") stage_down_button.grid(row=3, column=3, sticky="ESW") #Shortcuts for selecting the next and previous stage def stageSelectUp(event): if selected_stage.get() > 0: selected_stage.set(selected_stage.get()-1) updateStagesFrame() updateStageEditor() def stageSelectDown(event): if selected_stage.get() < len(stages) - 1: selected_stage.set(selected_stage.get()+1) updateStagesFrame() updateStageEditor() root.bind("<Control-Tab>", stageSelectUp) root.bind("<Control-Shift-Tab>", stageSelectDown) root.bind("<Control-Prior>", stageSelectUp) #Control + page up root.bind("<Control-Next>", stageSelectDown) #Control + page down def updateStagesFrame(): for button in stages_frame.winfo_children(): button.destroy() for stage_index in range(len(stages)): stage = stages[stage_index] stage.button = tk.Radiobutton(stages_frame, text=stage.name, variable = selected_stage, value = stage_index, command=updateStageEditor, indicatoron = 0, width = 20, takefocus=0) stage.check_var = tk.BooleanVar() stage.check_var.set(True) stage.checkbox = tk.Checkbutton(stages_frame, variable = stage.check_var, command=updateOutputText, takefocus=0) if stage_index == 0: #Input cannot be disabled, so don't show the checkbox stage.checkbox.config(state="disabled") stage.button.grid(column=1, row=stage_index) stage.checkbox.grid(column=0, row=stage_index) updateStagesFrame() right_text = tk.Text(root, takefocus=0, width=10, height=10, font=("Courier", 10)) right_text.grid(row=0, column=4, rowspan=4, sticky="NESW") right_text.grid_propagate(0) tk.Grid.columnconfigure(root, 0, weight=1) tk.Grid.columnconfigure(root, 1, weight=0) tk.Grid.columnconfigure(root, 2, weight=0) tk.Grid.columnconfigure(root, 3, weight=0) tk.Grid.columnconfigure(root, 4, weight=1) tk.Grid.rowconfigure(root, 0, weight=1) tk.Grid.rowconfigure(root, 1, weight=0) tk.Grid.columnconfigure(stage_editor, 0, weight=1) tk.Grid.rowconfigure(stage_editor, 0, weight=1) #========== def add(menu, StageClass): #Helper function to make adding stages neater menu.add_command(label= StageClass.name,#Takes the name from the class command=lambda:addStage(StageClass(stage_editor, #passes the stage editor frame to draw to updateOutputText))) #and a callback for when things change and the output text needs updating #Functions for file menu operations: def openCom(): text = "" try: with filedialog.askopenfile() as file: for line in file: text += line stages[0].textbox.delete(1.0, tk.END) stages[0].textbox.insert(tk.END,text) except AttributeError:#Catch error if the user cancels the dialog pass def clearCom(): global stages stages[0].textbox.delete(1.0, tk.END) stages = [stages[0]] selected_stage.set(0) updateStageEditor() updateStagesFrame() def saveCom(): text = getOutputText() try: with filedialog.asksaveasfile() as file: file.write(text) except AttributeError: pass def copyCom(): text = "" for stage in stages: text = stage.process(text) root.clipboard_clear() root.clipboard_append(text) root.update() menu = tk.Menu(root) file_menu = tk.Menu(menu, tearoff=0) file_menu.add_command(label="Open", command=openCom) file_menu.add_command(label="Clear", command = clearCom) file_menu.add_command(label="Save", command=saveCom) file_menu.add_command(label="Copy output", command=copyCom) menu.add_cascade(label="File", menu = file_menu) ana_menu = tk.Menu(menu, tearoff=0) add(ana_menu, Length) add(ana_menu, PlayfairDetect) add(ana_menu, FrequencyAnalyse) add(ana_menu, Doubles) add(ana_menu, Triples) add(ana_menu, IoC) add(ana_menu, WordFinder) add(ana_menu, VigenereKeyword) add(ana_menu, ColumnarKeyword) menu.add_cascade(label="Analyse", menu=ana_menu) text_menu = tk.Menu(menu, tearoff=0) add(text_menu, Capitalise) add(text_menu, Lowercase) add(text_menu, Swapcase) add(text_menu, Strip) add(text_menu, RemoveSpaces) add(text_menu, Reverse) add(text_menu, Block) menu.add_cascade(label="Text stage", menu=text_menu) solve_menu = tk.Menu(menu, tearoff=0) add(solve_menu, CaesarShift) add(solve_menu, Substitution) add(solve_menu, Affine) add(solve_menu, Vigenere) #add(solve_menu, Transposition) #this one doesn't work add(solve_menu, RailFence) add(solve_menu, Scytale) add(solve_menu, Morse) menu.add_cascade(label="Solve stage", menu=solve_menu) #Functions for the output menu operations def changeFontSize(change): currentSize = int(right_text.cget("font").split(" ")[1]) right_text.config(font=("Courier", currentSize + change)) stages[0].textbox.config(font=("Courier", currentSize + change)) output_menu = tk.Menu(menu, tearoff=0) add(output_menu, OutputHighlight) add(output_menu, Blank) output_menu.add_command(label="Increase font size", command=lambda:changeFontSize(1)) output_menu.add_command(label="Decrease font size", command=lambda:changeFontSize(-1)) right_text.tag_configure("highlight", foreground = "red") menu.add_cascade(label="Output", menu=output_menu) root.config(menu=menu) addStage(Input(stage_editor, updateOutputText)) root.mainloop()
36.609442
149
0.710785
1,164
8,530
5.089347
0.215636
0.054862
0.045915
0.022957
0.292876
0.216577
0.16813
0.124578
0.113774
0.067016
0
0.015551
0.163189
8,530
232
150
36.767241
0.813953
0.095545
0
0.199005
0
0
0.034711
0
0
0
0
0
0
1
0.079602
false
0.00995
0.029851
0
0.114428
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
7956dd9954a869adae25776f34d9cfad6f7f2ede
1,912
py
Python
mp/data/pytorch/domain_prediction_dataset_wrapper.py
MECLabTUDA/OOD-Gen
f85ea9106ae1425f18e34c9d82fa3ca4925d8d9e
[ "MIT" ]
null
null
null
mp/data/pytorch/domain_prediction_dataset_wrapper.py
MECLabTUDA/OOD-Gen
f85ea9106ae1425f18e34c9d82fa3ca4925d8d9e
[ "MIT" ]
null
null
null
mp/data/pytorch/domain_prediction_dataset_wrapper.py
MECLabTUDA/OOD-Gen
f85ea9106ae1425f18e34c9d82fa3ca4925d8d9e
[ "MIT" ]
null
null
null
from mp.data.pytorch.pytorch_dataset import PytorchDataset from mp.data.datasets.dataset import Instance import copy import torch class DomainPredictionDatasetWrapper(PytorchDataset): r"""Wraps a PytorchDataset to reuse its instances.x and replacing the labels""" def __init__(self, pytorch_ds, target_idx): """ Args: pytorch_ds (PytorchSegmentationDataset): the Dataset that need to be wrapped target_idx (int): the target idx for domain prediction, corresponding to this dataset """ class Dummy: def __init__(self): self.instances = pytorch_ds.instances self.hold_out_ixs = [] self.original_ds = pytorch_ds # Ugly # noinspection PyTypeChecker super().__init__(dataset=Dummy(), size=pytorch_ds.size) # Copy the predictor, but prevent it from reshaping the prediction self.predictor = copy.copy(pytorch_ds.predictor) self.predictor.reshape_pred = False # Create new target as one hot encoded # self.target = torch.zeros((1, target_cnt), dtype=self.instances[0].y.tensor.dtype) # self.target[:, target_idx] = 1 self.target = torch.tensor([target_idx], dtype=self.instances[0].y.tensor.dtype) # Modify instances self.instances = [Instance(inst.x, self.target, inst.name, inst.class_ix, inst.group_id) for inst in self.instances] def get_subject_dataloader(self, subject_ix): r"""Get a list of input/target pairs equivalent to those if the dataset was only of subject with index subject_ix. For evaluation purposes. """ # Generate the original subject dataloader and replace the target subject_dataloader = self.original_ds.get_subject_dataloader(subject_ix) return [(x, self.target) for x, _ in subject_dataloader]
40.680851
97
0.671548
239
1,912
5.200837
0.41841
0.043443
0.01609
0.030571
0.049879
0.049879
0.049879
0
0
0
0
0.00278
0.247385
1,912
46
98
41.565217
0.861015
0.373954
0
0
0
0
0
0
0
0
0
0
0
1
0.136364
false
0
0.181818
0
0.454545
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
7956e8f8d0ec0fef2a695bd6195a66b5f1e4e0e9
420
py
Python
docs/python/attachments/animals.py
Benbinbin/blog-data
e98b6560253bb6a1aa35e08b4ba36d03194920d1
[ "MIT" ]
null
null
null
docs/python/attachments/animals.py
Benbinbin/blog-data
e98b6560253bb6a1aa35e08b4ba36d03194920d1
[ "MIT" ]
null
null
null
docs/python/attachments/animals.py
Benbinbin/blog-data
e98b6560253bb6a1aa35e08b4ba36d03194920d1
[ "MIT" ]
null
null
null
class Dog: def speak(self): print("Woof!") def __init__(self, name): self.name = name def hear(self, words): if self.name in words: self.speak() class Husky(Dog): origin = "Siberia" def speak(self): print("Awoo!") class Chihuahua(Dog): origin = "Mexico" def speak(self): print("Yip!") class Labrador(Dog): origin = "Canada"
14
30
0.538095
50
420
4.44
0.44
0.108108
0.162162
0.22973
0
0
0
0
0
0
0
0
0.32381
420
29
31
14.482759
0.78169
0
0
0.166667
0
0
0.078759
0
0
0
0
0
0
1
0.277778
false
0
0
0
0.666667
0.166667
0
0
0
null
0
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
0
1
0
0
3
795999b8a086d2a92c7c0d0019a508d781dcdb36
4,889
py
Python
code/visualization/2020/04/0_0_compression_tucker_sparse_facto_select_lr.py
lucgiffon/psm-nets
dec43c26281febf6e5c8b8f42bfb78098ae7101d
[ "MIT" ]
1
2021-07-15T07:05:18.000Z
2021-07-15T07:05:18.000Z
code/visualization/2020/04/0_0_compression_tucker_sparse_facto_select_lr.py
lucgiffon/psm-nets
dec43c26281febf6e5c8b8f42bfb78098ae7101d
[ "MIT" ]
2
2021-07-15T06:12:47.000Z
2021-07-16T10:05:36.000Z
code/visualization/2020/04/0_0_compression_tucker_sparse_facto_select_lr.py
lucgiffon/psm-nets
dec43c26281febf6e5c8b8f42bfb78098ae7101d
[ "MIT" ]
null
null
null
import pathlib import pandas as pd from palmnet.visualization.utils import get_palminized_model_and_df, get_df import matplotlib.pyplot as plt import numpy as np import logging import plotly.graph_objects as go import plotly.express as px from pprint import pprint as pprint mpl_logger = logging.getLogger('matplotlib') mpl_logger.setLevel(logging.ERROR) dataset = { "Cifar10": "--cifar10", "Cifar100": "--cifar100", "SVHN": "--svhn", "MNIST": "--mnist" } basemodels = { "Cifar100": ["--cifar100-vgg19", "--cifar100-resnet20", "--cifar100-resnet50"], "Cifar10": ["--cifar10-vgg19"], "SVHN": ["--svhn-vgg19"], "MNIST": ["--mnist-lenet"] } def show_for_tucker(): # compression_method = ["tucker", "tensortrain"] # df = df.apply(pd.to_numeric, errors='coerce') dct_config_lr = dict() lst_name_trace_low = list() for dataname in dataset: df_data = df[df[dataset[dataname]] == 1] for base_model_name in basemodels[dataname]: df_model = df_data[df_data[base_model_name] == 1] for index, row in df_model.iterrows(): fig = go.Figure() csv_file = pathlib.Path(row["results_dir"]) / row["output_file_csvcbprinter"] df_csv = pd.read_csv(csv_file) win_size = 5 lr_values = df_csv["lr"].values lr_values_log = np.log10(lr_values) lr_rolling_mean = pd.Series(lr_values_log).rolling(window=win_size).mean().iloc[win_size - 1:].values loss_rolling_mean = df_csv["loss"].rolling(window=win_size).mean().iloc[win_size - 1:].values if all(np.isnan(loss_rolling_mean)): continue delta_loss = (np.hstack([loss_rolling_mean, [0]]) - np.hstack([[0], loss_rolling_mean]))[1:-1] delta_loss_rolling_mean = pd.Series(delta_loss).rolling(window=win_size).mean().iloc[win_size - 1:].values lr_rolling_mean_2x = pd.Series(lr_rolling_mean).rolling(window=win_size).mean().iloc[win_size - 1:].values lr_rolling_mean_2x_exp = 10 ** lr_rolling_mean_2x # fig.add_trace(go.Scatter(x=lr_rolling_mean_exp, y=loss_rolling_mean, name="sp_fac {} - hiearchical {}".format(row["--sparsity-factor"], row["--hierarchical"]))) fig.add_trace(go.Scatter(x=lr_rolling_mean_2x_exp[:-1], y=delta_loss_rolling_mean, name="")) argmin_loss = np.argmin(delta_loss_rolling_mean) val = lr_rolling_mean_2x_exp[:-1][argmin_loss] log_val = np.log10(val) approx = 10 ** np.around(log_val, decimals=0) sparsity = int(row["--sparsity-factor"]) hierarchical = bool(row["--hierarchical"]) str_hierarchical = " H" if hierarchical else "" try: nb_fac = int(row["--nb-factor"]) except ValueError: nb_fac = None name_trace = f"tucker_sparse_facto-{dataset[dataname]}-{base_model_name}-Q={nb_fac}-K={sparsity}{str_hierarchical}" print(len(delta_loss_rolling_mean), name_trace) if len(delta_loss_rolling_mean) < 10: lst_name_trace_low.append(name_trace) continue dct_config_lr[name_trace] = approx # title_str = "{}:{} - {} - keep first :{}".format(dataname, base_model_name, "tucker", keep_first) fig.update_layout(barmode='group', title=name_trace, xaxis_title="lr", yaxis_title="loss", xaxis_type="log", xaxis={'type': 'category'}, ) # fig.show() pprint(dct_config_lr) pprint(lst_name_trace_low) if __name__ == "__main__": root_source_dir = pathlib.Path("/home/luc/PycharmProjects/palmnet/results/") expe_path = "2020/04/0_0_compression_tucker_sparse_facto_select_lr" expe_path_errors = "2020/04/0_0_compression_tucker_sparse_facto_select_lr_errors" src_results_dir = root_source_dir / expe_path src_results_dir_errors = root_source_dir / expe_path_errors get_df_and_assign = lambda x: get_df(x).assign(results_dir=str(x)) df = get_df_and_assign(src_results_dir) df_errors = get_df_and_assign(src_results_dir_errors) df = pd.concat([df, df_errors]) df = df.dropna(subset=["failure"]) df = df[df["failure"] == 0] df = df.drop(columns="oar_id").drop_duplicates() root_output_dir = pathlib.Path("/home/luc/PycharmProjects/palmnet/reports/figures/") output_dir = root_output_dir / expe_path / "line_plots" output_dir.mkdir(parents=True, exist_ok=True) show_for_tucker()
40.07377
178
0.607895
617
4,889
4.473258
0.290113
0.071739
0.054348
0.036232
0.243478
0.190942
0.181884
0.131159
0.131159
0.106522
0
0.022867
0.266517
4,889
122
179
40.07377
0.746793
0.074044
0
0.022472
0
0
0.140867
0.072534
0
0
0
0
0
1
0.011236
false
0
0.101124
0
0.11236
0.05618
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
795b1f096f5aa18037e59346d95e4b832947c2de
8,209
py
Python
spectrocrunch/sources/tests/test_polarization.py
woutdenolf/spectrocrunch
fde4b6e0f462f464ce7af6a942b355d3d8f39f77
[ "MIT" ]
3
2018-04-16T15:51:36.000Z
2019-12-16T11:21:05.000Z
spectrocrunch/sources/tests/test_polarization.py
woutdenolf/spectrocrunch
fde4b6e0f462f464ce7af6a942b355d3d8f39f77
[ "MIT" ]
null
null
null
spectrocrunch/sources/tests/test_polarization.py
woutdenolf/spectrocrunch
fde4b6e0f462f464ce7af6a942b355d3d8f39f77
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- import unittest import cmath import numpy as np from scipy import integrate from .. import polarization from ...utils import instance from ...patch import jsonpickle class test_polarization(unittest.TestCase): def _equal_params(self, params1, params2): for k, v in params1.items(): if instance.isstring(v): self.assertEqual(v, params2[k]) else: np.testing.assert_allclose(v, params2[k]) def _gen_jones(self, n=20): x = np.random.uniform(low=-10, high=10, size=4 * n).reshape((n, 4)) for xi in x: yield polarization.Jones(xi[0] + xi[1] * 1j, xi[2] + xi[3] * 1j) def _gen_stokes(self, n=20): x = np.random.uniform(low=-10, high=10, size=3 * n).reshape((n, 3)) for xi in x: S0 = np.sqrt(sum(xi[1:] ** 2)) * np.random.uniform(low=1, high=1.5) yield polarization.Stokes(S0, *xi) def test_convert_representation(self): def f1(x, attr): return getattr(x, attr) def f2(x, attr): return getattr(x, attr) % 360 attrs = { "coherency_matrix": f1, "dop": f1, "dolp": f1, "docp": f1, "hdolp": f1, "polangle": f2, } for J1 in self._gen_jones(): S1 = J1.to_stokes() J2 = S1.to_jones() S2 = J2.to_stokes() J3 = S2.to_jones() self._equal_params(J2.to_params(), J3.to_params()) self._equal_params(S1.to_params(), S2.to_params()) self.assertEqual(J1.dop, 1) for attr, f in attrs.items(): a = f(J1, attr) np.testing.assert_allclose(a, f(S1, attr)) np.testing.assert_allclose(a, f(J2, attr)) np.testing.assert_allclose(a, f(S2, attr)) np.testing.assert_allclose(a, f(J3, attr)) np.testing.assert_allclose(J1.norm, J2.norm) np.testing.assert_allclose( J1.phase_difference % 360, J2.phase_difference % 360 ) np.testing.assert_allclose(J2.to_numpy(), J3.to_numpy()) np.testing.assert_allclose(S1.to_numpy(), S2.to_numpy()) np.testing.assert_allclose(S1.to_numpy(), S2.to_numpy()) def test_stokes(self): for S in self._gen_stokes(): tmp = S.decompose() Spol, Sunpol = tmp["pol"], tmp["unpol"] np.testing.assert_allclose( S.intensity, S.intensity_polarized + S.intensity_unpolarized ) np.testing.assert_allclose(S.intensity_polarized, Spol.intensity) np.testing.assert_allclose(S.intensity_unpolarized, Sunpol.intensity) np.testing.assert_allclose(S.dop, S.intensity_polarized / S.intensity) np.testing.assert_allclose( S.coherency_matrix, Spol.coherency_matrix + Sunpol.coherency_matrix ) J = S.to_jones(allowloss=True) np.testing.assert_allclose(J.intensity, Spol.intensity) S2 = polarization.Stokes.from_params(**S.to_params()) np.testing.assert_allclose(S.to_numpy(), S2.to_numpy()) def test_jones(self): for J in self._gen_jones(): np.testing.assert_allclose( J.to_numpy(), J.to_stokes().to_jones(phase0=J.phase0).to_numpy() ) np.testing.assert_allclose(J.coherency_matrix.trace(), J.norm ** 2) J2 = polarization.Jones.from_params(**J.to_params()) np.testing.assert_allclose(J.to_numpy(), J2.to_numpy()) J.plot_efield(animate=True) def test_intensity(self): for J in self._gen_jones(): S = J.to_stokes() Jparams = J.to_params() Sparams = S.to_params() IJ, IS = np.random.uniform(low=1, high=10, size=2) J.intensity = IJ S.intensity = IS Jparams["intensity"] = IJ Sparams["intensity"] = IS self._equal_params(J.to_params(), Jparams) self._equal_params(S.to_params(), Sparams) for S in self._gen_stokes(): Sparams = S.to_params() IS = np.random.uniform(low=1, high=10) S.intensity = IS Sparams["intensity"] = IS self._equal_params(S.to_params(), Sparams) def test_rotate(self): for J1 in self._gen_jones(): S1 = J1.to_stokes() azimuth = np.random.uniform(low=0, high=2 * np.pi) # change-of-frame J2 = J1.rotate(azimuth) S2 = S1.rotate(azimuth) self._equal_params(S2.to_params(), J2.to_stokes().to_params()) R = polarization.JonesMatrixRotation(-azimuth) Ri = polarization.JonesMatrixRotation(azimuth) np.testing.assert_allclose( R.dot(J1.coherency_matrix).dot(Ri), J2.coherency_matrix ) np.testing.assert_allclose( R.dot(S1.coherency_matrix).dot(Ri), S2.coherency_matrix ) def test_thomson(self): for J1 in self._gen_jones(): S1 = J1.to_stokes() azimuth = np.random.uniform(low=0, high=2 * np.pi) polar = np.random.uniform(low=0, high=np.pi) J2 = J1.thomson_scattering(azimuth, polar) S2 = S1.thomson_scattering(azimuth, polar) self._equal_params(S2.to_params(), J2.to_stokes().to_params()) angle = polarization.ThomsonRotationAngle(azimuth) # change-of-frame R = polarization.JonesMatrixRotation(-angle) Ri = polarization.JonesMatrixRotation(angle) Mth = polarization.JonesMatrixThomson(polar) Mthi = Mth np.testing.assert_allclose( Mth.dot(R).dot(J1.coherency_matrix).dot(Ri).dot(Mthi), J2.coherency_matrix, ) np.testing.assert_allclose( Mth.dot(R).dot(S1.coherency_matrix).dot(Ri).dot(Mthi), S2.coherency_matrix, ) np.testing.assert_allclose( S2.intensity, S1.thomson_intensity(azimuth, polar) ) def integrand(azimuth, polar): return S1.thomson_intensity( np.degrees(azimuth), np.degrees(polar) ) * np.sin(polar) thomsonsc = ( integrate.dblquad( integrand, 0, np.pi, lambda x: 0, lambda x: 2 * np.pi )[0] / S1.intensity ) np.testing.assert_allclose(thomsonsc, 8 * np.pi / 3) def test_compton(self): for S1 in self._gen_stokes(): azimuth = np.random.uniform(low=0, high=2 * np.pi) polar = np.random.uniform(low=0, high=np.pi) energy = np.random.uniform(low=5.0, high=20.0) S2 = S1.compton_scattering(azimuth, polar, energy) np.testing.assert_allclose( S2.intensity, S1.compton_intensity(azimuth, polar, energy) ) def test_serialize(self): g1 = next(iter(self._gen_jones())) g2 = jsonpickle.loads(jsonpickle.dumps(g1)) self.assertEqual(g1, g2) g1 = next(iter(self._gen_stokes())) g2 = jsonpickle.loads(jsonpickle.dumps(g1)) self.assertEqual(g1, g2) def test_suite(): """Test suite including all test suites""" testSuite = unittest.TestSuite() testSuite.addTest(test_polarization("test_jones")) testSuite.addTest(test_polarization("test_stokes")) testSuite.addTest(test_polarization("test_convert_representation")) testSuite.addTest(test_polarization("test_intensity")) testSuite.addTest(test_polarization("test_rotate")) testSuite.addTest(test_polarization("test_thomson")) testSuite.addTest(test_polarization("test_compton")) testSuite.addTest(test_polarization("test_serialize")) return testSuite if __name__ == "__main__": import sys mysuite = test_suite() runner = unittest.TextTestRunner() if not runner.run(mysuite).wasSuccessful(): sys.exit(1)
35.081197
83
0.575466
997
8,209
4.564694
0.161484
0.053395
0.088991
0.136454
0.489563
0.383432
0.283894
0.165458
0.13909
0.13909
0
0.028132
0.302838
8,209
233
84
35.23176
0.76708
0.011085
0
0.233696
0
0
0.023921
0.003329
0
0
0
0
0.168478
1
0.081522
false
0
0.043478
0.016304
0.152174
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
795b834e229f484b2777e3dde64e6efd9b1ae8d7
1,166
py
Python
AlphaDDA1/Othello/ringbuffer.py
KazuhisaFujita/AlphaDDA
664742567883cf3e08c2c53b3bce3112b8cc0560
[ "MIT" ]
11
2021-11-13T01:43:28.000Z
2021-12-19T06:40:34.000Z
AlphaZero/Othello66/ringbuffer.py
KazuhisaFujita/AlphaDDA
664742567883cf3e08c2c53b3bce3112b8cc0560
[ "MIT" ]
null
null
null
AlphaZero/Othello66/ringbuffer.py
KazuhisaFujita/AlphaDDA
664742567883cf3e08c2c53b3bce3112b8cc0560
[ "MIT" ]
null
null
null
#--------------------------------------- #Since : 2019/04/24 #Update: 2019/07/25 # -*- coding: utf-8 -*- #--------------------------------------- import numpy as np class RingBuffer: def __init__(self, buf_size): self.size = buf_size self.buf = [] for i in range(self.size): self.buf.append([]) self.start = 0 self.end = 0 def add(self, el): self.buf[self.end] = el self.end = (self.end + 1) % self.size if self.end == self.start: self.start = (self.start + 1) % self.size def Get_buffer(self): array = [] for i in range(self.size): buf_num = (self.end - i) % self.size array.append(self.buf[buf_num]) return array def Get_buffer_start_end(self): array = [] for i in range(self.size): buf_num = (self.start + i) % self.size if self.buf[buf_num] == []: return array array.append(self.buf[buf_num]) return array def get(self): val = self.buf[self.start] self.start =(self.start + 1) % self.size return val
26.5
53
0.482847
149
1,166
3.677852
0.261745
0.131387
0.094891
0.131387
0.498175
0.498175
0.419708
0.419708
0.419708
0.288321
0
0.028025
0.326758
1,166
43
54
27.116279
0.670064
0.116638
0
0.375
0
0
0
0
0
0
0
0
0
1
0.15625
false
0
0.03125
0
0.34375
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
795e54dfa1e363bb6a92f080ec47772f29a13da6
811
py
Python
fastapi_react_admin/__init__.py
sqllq/fastapi-react-admin
a0384061bd36881bcd334edb54eab5ab610ebbfd
[ "MIT" ]
null
null
null
fastapi_react_admin/__init__.py
sqllq/fastapi-react-admin
a0384061bd36881bcd334edb54eab5ab610ebbfd
[ "MIT" ]
null
null
null
fastapi_react_admin/__init__.py
sqllq/fastapi-react-admin
a0384061bd36881bcd334edb54eab5ab610ebbfd
[ "MIT" ]
null
null
null
""" ______ _ _ _____ _ _ _ | ____| | | (_) | __ \ | | /\ | | (_) | |__ __ _ ___| |_ __ _ _ __ _ | |__) |___ ___| |_ / \ __| |_ __ ___ _ _ __ | __/ _` / __| __/ _` | '_ \| | | _ // _ \/ __| __| / /\ \ / _` | '_ ` _ \| | '_ \ | | | (_| \__ \ || (_| | |_) | | | | \ \ __/ (__| |_ / ____ \ (_| | | | | | | | | | | |_| \__,_|___/\__\__,_| .__/|_| |_| \_\___|\___|\__| /_/ \_\__,_|_| |_| |_|_|_| |_| | | |_| """ from .config import router from .auth import admin_login_view from .core import ReactAppAdmin, ReactTortoiseModelAdmin from .commands import create_super_user, compile_app_admin, compile_model_admin __version__ = "0.0.1"
45.055556
89
0.393342
31
811
5.645161
0.677419
0
0
0
0
0
0
0
0
0
0
0.00625
0.408138
811
17
90
47.705882
0.358333
0.701603
0
0
0
0
0.022321
0
0
0
0
0
0
1
0
false
0
0.8
0
0.8
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
1
0
0
3
795f708e3eddaecd36d179568af03258f48e6ef1
8,202
py
Python
ANOVA.py
AngusNicolson/factorial_experiment_analysis
a499642c38cb22a2ce13b93dda82c622193e7e35
[ "MIT" ]
null
null
null
ANOVA.py
AngusNicolson/factorial_experiment_analysis
a499642c38cb22a2ce13b93dda82c622193e7e35
[ "MIT" ]
null
null
null
ANOVA.py
AngusNicolson/factorial_experiment_analysis
a499642c38cb22a2ce13b93dda82c622193e7e35
[ "MIT" ]
null
null
null
import itertools import numpy as np import matplotlib.pyplot as plt import seaborn as sns import pandas as pd from scipy.stats import f from scipy.stats import norm class ANOVA: """Analyse DOE experiments using ANOVA. NB: n > 1 for the code to work, where n is the number of repeats. Model: y = y_average i.e. all factors have no effect on the response. Hence the sum of squares is a measure of how much the factor effects the response. Replace with linear model??""" def __init__(self, data): #Initialise variables and define simple statistical values self.data = data self.num_factors = len(self.data.columns) - 1 self.factors = list(self.data.columns[:-1]) self.sum_y = data.iloc[:,-1].sum() self.unique_dict = self.unique_values_dict(self.data) self.levels = {} #Determine all interactions between factors sources_of_variation = [] for interaction_level in range(self.num_factors): combos = itertools.combinations(self.factors, interaction_level + 1) for combo in combos: sources_of_variation.append(self.make_interaction_name(combo)) sources_of_variation.append('Error') sources_of_variation.append('Total') #Create ANOVA table self.table = pd.DataFrame(columns =['Sum of Squares', 'Degrees of Freedom', 'Mean Square', 'F0', 'P-Value'], index=sources_of_variation) #Needed for functions later, even though the data ends up in the table. #Code is designed like this because initally more dictionaries were used instead of pandas dataframe. self.sum_of_squares = [{}]*self.num_factors #Determine number of repeats. Must be the same for all measurements. total = 1 for factor in self.factors: level = len(self.unique_dict[factor]) self.levels[factor] = level total = total*level self.n = len(self.data)/total self.total = len(self.data) #Most of the complicated equations are contained within this loop/function for interaction_level in range(self.num_factors): self.calculate_interactions(interaction_level + 1) #Create the table from component parts #Sum of squares self.table['Sum of Squares'] = pd.DataFrame(self.sum_of_squares).max() self.table.loc['Total', 'Sum of Squares'] = (data.iloc[:,-1]**2).sum() - (self.sum_y**2)/(self.total) prefactor = self.make_prefactor(self.factors) final_subtotal = (1/(prefactor*self.n)) * (self.data.groupby(self.factors).sum().iloc[:,-1]**2).sum() - (self.sum_y**2)/self.total self.table.loc['Error', 'Sum of Squares']= self.table.loc['Total', 'Sum of Squares'] - final_subtotal #Degrees of freedom self.table.loc['Total', 'Degrees of Freedom'] = self.total - 1 self.table.loc['Error', 'Degrees of Freedom'] = (self.total/self.n) * (self.n - 1) #Mean square self.table['Mean Square'] = self.table['Sum of Squares']/self.table['Degrees of Freedom'] #F0 self.table['F0'] = self.table['Mean Square']/self.table.loc['Error', 'Mean Square'] #P-value self.f_function = f(self.n, self.total/self.n) self.table['P-Value'] = self.f_function.sf(list(self.table['F0'])) #Remove values which have no meaning. Only calculated in the first place because it was simpler to code. self.table.iloc[-2:, -2:] = np.NaN self.table.iloc[-1, -3] = np.NaN self.table.iloc[:, :-1] = self.table.iloc[:, :-1].astype(float) #F0 for statistical significance P<0.05 self.calculate_F0_significance_level() #Residuals for model y = average_y self.calculate_residuals() def calculate_interactions(self, interaction_level): """Calculates sum of squares and degrees of freedom for a specified interaction level and saves them in the self.table dataframe. interaction_level = 1 ---> Main factors interaction_level = 2 ---> 2-factor interactions interaction_level = 3 ---> 3-factor interactions ...""" combinations = itertools.combinations(self.factors, interaction_level) subtotals = {} effects = {} for combo in combinations: interaction_factors = list(combo) interaction = self.make_interaction_name(interaction_factors) prefactor = self.make_prefactor(interaction_factors) self.table.loc[interaction, 'Degrees of Freedom'] = self.calculate_degrees_of_freedom(interaction_factors) subtotals[interaction] = (1/(prefactor*self.n)) * (self.data.groupby(interaction_factors).sum().iloc[:,-1]**2).sum() - (self.sum_y**2)/self.total effects[interaction] = subtotals[interaction] for level in range(interaction_level - 1) : factor_combos = itertools.combinations(combo, level + 1) for factor_combo in factor_combos: name = self.make_interaction_name(factor_combo) effects[interaction] += -self.sum_of_squares[level][name] self.sum_of_squares[interaction_level - 1] = effects def calculate_degrees_of_freedom(self, interaction_factors): dof = 1 for factor in interaction_factors: dof = (self.levels[factor] - 1) * dof return dof def unique_values_dict(self, df): unique_dict = {} for column in df.columns: unique_dict[column] = df[column].unique() return unique_dict def make_prefactor(self, interaction_factors): #Determine prefactor. Multiply all factor levels together which aren't the main factor prefactor = 1 for factor in self.factors: if factor not in interaction_factors: prefactor = prefactor * self.levels[factor] return prefactor def make_interaction_name(self, interaction_factors): interaction = '' for factor in interaction_factors: interaction = interaction + ':' + factor interaction = interaction[1:] return interaction def calculate_F0_significance_level(self, sig=0.05): self.significance = self.f_function.isf(sig) def calculate_residuals(self): self.sigma = np.sqrt(self.table.loc['Error', 'Mean Square']) tmp_data = self.data.set_index(self.factors) self.residuals = (tmp_data - tmp_data.groupby(self.factors).mean()).iloc[:, -1].values/self.sigma def plot_residuals(self): """Makes a normal probability plot of residuals""" residuals = sorted(self.residuals) df = pd.DataFrame(columns=['Residuals'], data=residuals) df['Position'] = df.index + 1 df['f'] = (df.Position - 0.375)/(len(df) + 0.25) df['z'] = norm.ppf(df.f) plt.figure() sns.regplot(x='Residuals', y='z', data=df) plt.show() def plot_normal(self): """Makes a normal probability plot of the response""" tmp_data = self.data.iloc[:, -1].values tmp_data.sort() df = pd.DataFrame(columns=['Response'], data=tmp_data) df['Position'] = df.index + 1 df['f'] = (df.Position - 0.375)/(len(df) + 0.25) df['z'] = norm.ppf(df.f) plt.figure() sns.regplot(x='Response', y='z', data=df) plt.show() def plot_pareto_chart(self): ANOVA_table = self.table.sort_values(by='F0') plt.figure() plt.barh(ANOVA_table.index, ANOVA_table['F0']) plt.xlabel('F0') plt.ylabel('Term') plt.axvline(x = self.significance, linestyle='--') three_data = pd.read_csv('test_data.csv') three = ANOVA(three_data) #Doesn't work for n < 2 five_data = pd.read_csv('example_data.csv') five_data.drop(columns=['order'], inplace=True) five = ANOVA(five_data)
42.278351
157
0.617045
1,036
8,202
4.766409
0.214286
0.04192
0.031592
0.020251
0.237141
0.170514
0.113609
0.076347
0.051235
0.051235
0
0.012161
0.268105
8,202
193
158
42.497409
0.810428
0.175933
0
0.136
0
0
0.059467
0
0
0
0
0
0
1
0.088
false
0
0.056
0
0.184
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
795f95b9ee59eba0d720fd1de7316678421773f4
6,010
py
Python
datmo/core/entity/snapshot.py
datmo/datmo
a456d196006b67ce56af96cb4900682eab747bef
[ "MIT" ]
331
2018-03-30T14:33:59.000Z
2022-01-10T19:43:32.000Z
datmo/core/entity/snapshot.py
KIMS-Github/datmo
a456d196006b67ce56af96cb4900682eab747bef
[ "MIT" ]
274
2018-04-08T17:12:44.000Z
2020-07-29T02:45:22.000Z
datmo/core/entity/snapshot.py
KIMS-Github/datmo
a456d196006b67ce56af96cb4900682eab747bef
[ "MIT" ]
28
2018-05-03T21:57:22.000Z
2020-12-31T04:18:42.000Z
import os from datetime import datetime from datmo.core.util.json_store import JSONStore from datmo.core.util.misc_functions import prettify_datetime, printable_object, format_table class Snapshot(): """Snapshot is an entity object to represent a version of the model. These snapshots are the building blocks upon which models can be shared and reproduced. Snapshots consist of 5 main components which are represented as well in the attributes listed below 1) Source code 2) Dependency environment 3) Large files not included in source code 4) Configurations of your model, features, data, etc 5) Performance metrics that evaluate your model Note ---- All attributes of the class in the ``Attributes`` section must be serializable by the DB Parameters ---------- dictionary : dict id : str, optional the id of the entity (default is None; storage driver has not assigned an id yet) model_id : str the parent model id for the entity message : str long description of snapshot code_id : str code reference associated with the snapshot environment_id : str id for environment used to create snapshot file_collection_id : str file collection associated with the snapshot config : dict key, value pairs of configurations stats : dict key, value pairs of metrics and statistics task_id : str, optional task id associated with snapshot (default is None, means no task_id set) label : str, optional short description of snapshot (default is None, means no label set) visible : bool, optional True if visible to user via list command else False (default is True to show users unless otherwise specified) created_at : datetime.datetime, optional (default is datetime.utcnow(), at time of instantiation) updated_at : datetime.datetime, optional (default is same as created_at, at time of instantiation) Attributes ---------- id : str or None the id of the entity model_id : str the parent model id for the entity message : str long description of snapshot code_id : str code reference associated with the snapshot environment_id : str id for environment used to create snapshot file_collection_id : str file collection associated with the snapshot config : dict key, value pairs of configurations stats : dict key, value pairs of metrics and statistics task_id : str or None task id associated with snapshot label : str or None short description of snapshot visible : bool True if visible to user via list command else False created_at : datetime.datetime updated_at : datetime.datetime """ def __init__(self, dictionary): self.id = dictionary.get('id', None) self.model_id = dictionary['model_id'] self.message = dictionary['message'] self.code_id = dictionary['code_id'] self.environment_id = dictionary['environment_id'] self.file_collection_id = dictionary['file_collection_id'] self.config = dictionary['config'] self.stats = dictionary['stats'] self.task_id = dictionary.get('task_id', None) self.label = dictionary.get('label', None) self.visible = dictionary.get('visible', True) self.created_at = dictionary.get('created_at', datetime.utcnow()) self.updated_at = dictionary.get('updated_at', self.created_at) def __eq__(self, other): return self.id == other.id if other else False def __str__(self): if self.label: final_str = '\033[94m' + "snapshot " + self.id + '\033[0m' final_str = final_str + '\033[94m' + " (" + '\033[0m' final_str = final_str + '\033[93m' + '\033[1m' + "label: " + self.label + '\033[0m' final_str = final_str + '\033[94m' + ")" + '\033[0m' + os.linesep else: final_str = '\033[94m' + "snapshot " + self.id + '\033[0m' + os.linesep final_str = final_str + "Date: " + prettify_datetime( self.created_at) + os.linesep table_data = [] if self.task_id: table_data.append(["Task", "-> " + self.task_id]) table_data.append(["Visible", "-> " + str(self.visible)]) # Components table_data.append(["Code", "-> " + self.code_id]) table_data.append(["Environment", "-> " + self.environment_id]) table_data.append(["Files", "-> " + self.file_collection_id]) table_data.append(["Config", "-> " + str(self.config)]) table_data.append(["Stats", "-> " + str(self.stats)]) final_str = final_str + format_table(table_data) final_str = final_str + os.linesep + " " + self.message + os.linesep + os.linesep return final_str def __repr__(self): return self.__str__() def save_config(self, filepath): JSONStore(os.path.join(filepath, 'config.json'), self.config) return def save_stats(self, filepath): JSONStore(os.path.join(filepath, 'stats.json'), self.stats) return def to_dictionary(self, stringify=False): attr_dict = self.__dict__ pruned_attr_dict = { attr: val for attr, val in attr_dict.items() if not callable(getattr(self, attr)) and not attr.startswith("__") } if stringify: for key in ["config", "stats", "message", "label"]: pruned_attr_dict[key] = printable_object(pruned_attr_dict[key]) for key in ["created_at", "updated_at"]: pruned_attr_dict[key] = prettify_datetime( pruned_attr_dict[key]) return pruned_attr_dict
38.280255
95
0.621631
747
6,010
4.840696
0.227577
0.033186
0.029038
0.026549
0.333518
0.311394
0.262721
0.235896
0.235896
0.219027
0
0.012829
0.286689
6,010
156
96
38.525641
0.830651
0.419468
0
0.030769
0
0
0.111007
0
0.030769
0
0
0
0
1
0.107692
false
0
0.061538
0.030769
0.276923
0.030769
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
7961d1af5a2c494ba659aefe30c177aba0152b99
3,895
py
Python
ranking/train_LM.py
yzhhome/JDQA
68e1d0259d316b3577a1f2fafa773b50f1885762
[ "MIT" ]
1
2021-12-21T10:50:21.000Z
2021-12-21T10:50:21.000Z
ranking/train_LM.py
kalanile/JDQA
68e1d0259d316b3577a1f2fafa773b50f1885762
[ "MIT" ]
null
null
null
ranking/train_LM.py
kalanile/JDQA
68e1d0259d316b3577a1f2fafa773b50f1885762
[ "MIT" ]
1
2021-12-21T10:50:20.000Z
2021-12-21T10:50:20.000Z
''' @Author: dengzaiyong @Date: 2021-08-21 15:16:08 @LastEditTime: 2021-08-27 19:37:08 @LastEditors: dengzaiyong @Desciption: 训练tfidf, word2vec, fasttext语言模型 @FilePath: /JDQA/ranking/train_LM.py ''' import os from collections import defaultdict from gensim import models, corpora import config import pandas as pd import jieba from utils.tools import create_logger logger = create_logger(config.root_path + '/logs/train_LM.log') class Trainer(object): def __init__(self): self.data = self.data_reader(config.rank_train_file) + \ self.data_reader(config.rank_test_file) + \ self.data_reader(config.rank_dev_file) self.stopwords = open(config.stopwords_path).readlines() self.preprocessor() self.train() self.saver() def data_reader(self, path): """ 读取数据集,返回question1和question2所有的句子 """ sentences = [] df = pd.read_csv(path, sep='\t', encoding='utf-8') question1 = df['question1'].values question2 = df['question2'].values sentences.extend(list(question1)) sentences.extend(list(question2)) return sentences def preprocessor(self): """ 分词,并生成计算tfidf需要的数据 """ logger.info('loading data...') # 对所有句子进行分词 self.data = [[word for word in jieba.cut(sentence)] for sentence in self.data] # 计算每个词出现的次数 self.freq = defaultdict(int) for sentence in self.data: for word in sentence: self.freq[word] += 1 # 过滤出现次数小于1的词 self.data = [[word for word in sentence if self.freq[word] > 1] \ for sentence in self.data] logger.info('building dictionary...') # 构建词典 self.dictionary = corpora.Dictionary(self.data) # 保存词典 self.dictionary.save(config.temp_path + '/model/ranking/ranking.dict') # 构建语料库 self.corpus = [self.dictionary.doc2bow(text) for text in self.data] # 语料库序列化保存 corpora.MmCorpus.serialize(config.temp_path + '/model/ranking/ranking.mm', self.corpus) def train(self): logger.info('train tfidf model...') self.tfidf = models.TfidfModel(self.corpus, normalize=True) logger.info('train word2vec model...') self.w2v = models.Word2Vec(self.data, vector_size=config.embed_dim, window=2, min_count=2, sample=6e-5, min_alpha=0.0007, alpha=0.03, workers=4, negative=15, epochs=10) self.w2v.build_vocab(self.data) self.w2v.train(self.data, total_examples=self.w2v.corpus_count, epochs=15, report_delay=1) logger.info('train fasttext model...') self.fast = models.FastText(self.data, vector_size=config.embed_dim, window=3, min_count=1, epochs=10, min_n=3, max_n=6, word_ngrams=1) def saver(self): logger.info(' save tfidf model ...') self.tfidf.save(os.path.join(config.temp_path, 'model/ranking/tfidf.model')) logger.info(' save word2vec model ...') self.w2v.save(os.path.join(config.temp_path, 'model/ranking/w2v.model')) logger.info(' save fasttext model ...') self.fast.save(os.path.join(config.temp_path, 'model/ranking/fast.model')) if __name__ == "__main__": Trainer()
32.458333
95
0.537869
416
3,895
4.920673
0.362981
0.058622
0.034196
0.046409
0.218368
0.175867
0.09575
0.09575
0.058622
0
0
0.030965
0.353273
3,895
120
96
32.458333
0.781659
0.077279
0
0.026316
0
0
0.098161
0.035078
0
0
0
0
0
1
0.065789
false
0
0.092105
0
0.184211
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
7962461ca47687b7819e6dc00edee38793e1d6d0
4,680
py
Python
dao/ImageDAO.py
NEU-CSYE6225-SEC03/webservice
416cff5e3c8c88ce59333393a933ea88b3b8e2c0
[ "MIT" ]
null
null
null
dao/ImageDAO.py
NEU-CSYE6225-SEC03/webservice
416cff5e3c8c88ce59333393a933ea88b3b8e2c0
[ "MIT" ]
null
null
null
dao/ImageDAO.py
NEU-CSYE6225-SEC03/webservice
416cff5e3c8c88ce59333393a933ea88b3b8e2c0
[ "MIT" ]
1
2022-03-09T23:46:32.000Z
2022-03-09T23:46:32.000Z
import uuid import datetime import pymysql from tool.Config import Config from tool.Logger import Logger class ImageDAO(object): def __init__(self, connect_pool): self.connect_pool = connect_pool async def userImageExist(self, user_id: str): selectResult = None async with self.connect_pool.acquire() as conn: async with conn.cursor() as cursor: try: await cursor.execute("SELECT user_id FROM image WHERE user_id = %s", [user_id, ]) selectResult = await cursor.fetchone() Logger.getInstance().info('execute sql to determine exist of image by user_id [%s]' % user_id) except Exception as e: Logger.getInstance().exception(e) return selectResult is not None async def getUserImage(self, user_id: str): selectResult = None async with self.connect_pool.acquire() as conn: async with conn.cursor() as cursor: try: await cursor.execute( "SELECT id, file_name, user_id, url, upload_date FROM image WHERE user_id = %s", [user_id, ]) Logger.getInstance().info('execute sql to get info of image by user_id[%s]' % user_id) selectResult = await cursor.fetchone() except Exception as e: Logger.getInstance().exception(e) if selectResult is not None: return { 'id': selectResult[0], 'file_name': selectResult[1], 'user_id': selectResult[2], 'url': selectResult[3], 'upload_date': selectResult[4].strftime("%Y-%m-%d") } else: return None async def updateUserImage(self, file_name: str, url: str, user_id: str): affectRowNum = 0 async with self.connect_pool.acquire() as conn: async with conn.cursor() as cursor: try: affectRowNum = await cursor.execute( "UPDATE image SET file_name = %s, url = %s, upload_date = %s where user_id = %s", [file_name, url, datetime.datetime.now().strftime("%Y-%m-%d"), user_id, ]) Logger.getInstance().info('execute sql for updating image info by user_id[%s]' % user_id) await conn.commit() except Exception as e: Logger.getInstance().exception(e) if affectRowNum: return True else: return False async def deleteUserImage(self, user_id: str): affectRowNum = 0 async with self.connect_pool.acquire() as conn: async with conn.cursor() as cursor: try: affectRowNum = await cursor.execute( "DELETE FROM image WHERE user_id = %s", [user_id, ] ) Logger.getInstance().info('execute sql for deleting image info by user_id[%s]' % user_id) await conn.commit() except Exception as e: Logger.getInstance().exception(e) if affectRowNum: return True else: return False async def createUserImage(self, file_name: str, url: str, user_id: str): table = 'image' data = { 'id': str(uuid.uuid1()), 'file_name': file_name, 'url': url, 'user_id': user_id, 'upload_date': datetime.datetime.now().strftime("%Y-%m-%d"), } keys = ', '.join(data.keys()) values = ', '.join(['%s'] * len(data)) insert_sql = "INSERT INTO {table} ({keys}) VALUES ({values})".format(table=table, keys=keys, values=values) affectRowNum = 0 async with self.connect_pool.acquire() as conn: async with conn.cursor() as cursor: try: affectRowNum = await cursor.execute(insert_sql, tuple(data.values())) await conn.commit() Logger.getInstance().info( 'execute sql for inserting a image, affectRowNum[{}], insert sql[{}], values[{}]'.format( affectRowNum, insert_sql, tuple(data.values()))) except Exception as e: Logger.getInstance().exception(e) if affectRowNum: return True, data else: return False, data
37.142857
115
0.519444
494
4,680
4.813765
0.192308
0.065601
0.023549
0.03238
0.643818
0.623633
0.594617
0.569386
0.513457
0.43524
0
0.003114
0.382479
4,680
125
116
37.44
0.819723
0
0
0.461538
0
0.009615
0.141239
0
0
0
0
0
0
1
0.009615
false
0
0.048077
0
0.153846
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
79627b9bf273d6f20eed09dec297c797623cc255
4,773
py
Python
tests/app/dao/test_inbound_shortnumbers_dao.py
GouvQC/notification-api
e865b8b92a9a45c7cee006f427dcf77d71b09d6d
[ "MIT" ]
1
2021-08-13T13:46:04.000Z
2021-08-13T13:46:04.000Z
tests/app/dao/test_inbound_shortnumbers_dao.py
GouvQC/notification-api
e865b8b92a9a45c7cee006f427dcf77d71b09d6d
[ "MIT" ]
null
null
null
tests/app/dao/test_inbound_shortnumbers_dao.py
GouvQC/notification-api
e865b8b92a9a45c7cee006f427dcf77d71b09d6d
[ "MIT" ]
1
2021-09-29T18:25:48.000Z
2021-09-29T18:25:48.000Z
import pytest from sqlalchemy.exc import IntegrityError from app.dao.inbound_shortnumbers_dao import ( dao_get_inbound_shortnumbers, dao_get_inbound_shortnumber_for_service, dao_get_available_inbound_shortnumbers, dao_set_inbound_shortnumber_to_service, dao_set_inbound_shortnumber_active_flag, dao_allocate_shortnumber_for_service, dao_add_inbound_shortnumber) from app.models import InboundShortNumber from tests.app.db import create_service, create_inbound_shortnumber def test_get_inbound_shortnumbers(notify_db, notify_db_session, sample_inbound_shortnumbers): res = dao_get_inbound_shortnumbers() assert len(res) == len(sample_inbound_shortnumbers) assert res == sample_inbound_shortnumbers def test_get_available_inbound_shortnumbers(notify_db, notify_db_session): inbound_shortnumber = create_inbound_shortnumber(shortnumber='1') res = dao_get_available_inbound_shortnumbers() assert len(res) == 1 assert res[0] == inbound_shortnumber def test_set_service_id_on_inbound_shortnumber(notify_db, notify_db_session, sample_inbound_shortnumbers): service = create_service(service_name='test service') numbers = dao_get_available_inbound_shortnumbers() dao_set_inbound_shortnumber_to_service(service.id, numbers[0]) res = InboundShortNumber.query.filter(InboundShortNumber.service_id == service.id).all() assert len(res) == 1 assert res[0].service_id == service.id def test_after_setting_service_id_that_inbound_shortnumber_is_unavailable( notify_db, notify_db_session, sample_inbound_shortnumbers): service = create_service(service_name='test service') shortnumbers = dao_get_available_inbound_shortnumbers() assert len(shortnumbers) == 1 dao_set_inbound_shortnumber_to_service(service.id, shortnumbers[0]) res = dao_get_available_inbound_shortnumbers() assert len(res) == 0 def test_setting_a_service_twice_will_raise_an_error(notify_db, notify_db_session): create_inbound_shortnumber(shortnumber='1') create_inbound_shortnumber(shortnumber='2') service = create_service(service_name='test service') shortnumbers = dao_get_available_inbound_shortnumbers() dao_set_inbound_shortnumber_to_service(service.id, shortnumbers[0]) with pytest.raises(IntegrityError) as e: dao_set_inbound_shortnumber_to_service(service.id, shortnumbers[1]) assert 'duplicate key value violates unique constraint' in str(e.value) @pytest.mark.parametrize("active", [True, False]) def test_set_inbound_shortnumber_active_flag(notify_db, notify_db_session, sample_service, active): inbound_shortnumber = create_inbound_shortnumber(shortnumber='1') dao_set_inbound_shortnumber_to_service(sample_service.id, inbound_shortnumber) dao_set_inbound_shortnumber_active_flag(sample_service.id, active=active) inbound_shortnumber = dao_get_inbound_shortnumber_for_service(sample_service.id) assert inbound_shortnumber.active is active def test_dao_allocate_shortnumber_for_service(notify_db_session): shortnumber = '078945612' inbound_shortnumber = create_inbound_shortnumber(shortnumber=shortnumber) service = create_service() updated_inbound_shortnumber = dao_allocate_shortnumber_for_service(service_id=service.id, inbound_shortnumber_id=inbound_shortnumber.id) assert service.get_inbound_shortnumber() == shortnumber assert updated_inbound_shortnumber.service_id == service.id def test_dao_allocate_shortnumber_for_service_raises_if_inbound_shortnumber_already_taken(notify_db_session, sample_service): shortnumber = '078945612' inbound_shortnumber = create_inbound_shortnumber(shortnumber=shortnumber, service_id=sample_service.id) service = create_service(service_name="Service needs an inbound shortnumber") with pytest.raises(Exception) as exc: dao_allocate_shortnumber_for_service(service_id=service.id, inbound_shortnumber_id=inbound_shortnumber.id) assert 'is not available' in str(exc.value) def test_dao_allocate_shortnumber_for_service_raises_if_invalid_inbound_shortnumber(notify_db_session, fake_uuid): service = create_service(service_name="Service needs an inbound shortnumber") with pytest.raises(Exception) as exc: dao_allocate_shortnumber_for_service(service_id=service.id, inbound_shortnumber_id=fake_uuid) assert 'is not available' in str(exc.value) def test_dao_add_inbound_shortnumber(notify_db_session): inbound_shortnumber = '12345678901' dao_add_inbound_shortnumber(inbound_shortnumber) res = dao_get_available_inbound_shortnumbers() assert len(res) == 1 assert res[0].short_number == inbound_shortnumber
39.775
125
0.803059
601
4,773
5.918469
0.141431
0.23278
0.04217
0.069722
0.67079
0.594321
0.535845
0.458814
0.445319
0.392747
0
0.010843
0.130526
4,773
119
126
40.109244
0.846265
0
0
0.294872
0
0
0.04714
0
0
0
0
0
0.205128
1
0.128205
false
0
0.064103
0
0.192308
0
0
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
7962e2d4ed65e0f87126ca65657b5d805b1ac6cf
2,363
py
Python
profiletool.py
SimpleProxy/myproject02
13d0c657e2e324af78467eb2edfae2d22669573f
[ "MIT" ]
1
2020-10-21T21:32:42.000Z
2020-10-21T21:32:42.000Z
profiletool.py
kelvesc/myproject02
13d0c657e2e324af78467eb2edfae2d22669573f
[ "MIT" ]
null
null
null
profiletool.py
kelvesc/myproject02
13d0c657e2e324af78467eb2edfae2d22669573f
[ "MIT" ]
null
null
null
#!/bin/python3 # -*- coding: utf-8 -*- # file name: profiletool.py # standart libraries from time import sleep from time import process_time_ns as timer_ns # to call the respective routines import subprocess as ps # local imports import pyfactorial as pyf import mathfactorial as mtf def _vector(): return range(2, 501, 2) def _mod_asm(num): ps.run(["./asmmodifier.sh", num]) sleep(0.01) def user_defined_fac(n): return pyf.iterative_factorial(n) def mathlib_defined_fac(n): return mtf.factorial(n) def vm_defined_fac(n): ps.run(["./vm_code/hack_machine/CPUEmulator.sh", "./vm_code/test/Factorial.tst", "2&>1 >/dev/null"], capture_output=True, text=True) def test_user_factorial(): results = open("./results/vector_nxt_user.txt", "w") results.seek(0,2) totalTime = 0 for num in _vector(): start = timer_ns() fac = user_defined_fac(int(num)) end = timer_ns() dt = end - start totalTime += dt results.write(f"{num} {dt}\n") print(f"factorial of {num} took {dt} nanoseconds") sleep(0.02) print(f"Total time elapsed: {totalTime} nanoseconds") results.close() def test_math_factorial(): results = open("./results/vector_nxt_mathlib.txt", "w") results.seek(0,2) totalTime = 0 for num in _vector(): start = timer_ns() fac = mathlib_defined_fac(int(num)) end = timer_ns() dt = end - start totalTime += dt results.write(f"{num} {dt}\n") print(f"factorial of {num} took {dt} nanoseconds") sleep(0.02) print(f"Total time elapsed: {totalTime} nanoseconds") results.close() def test_vm_factorial(): results = open("./results/vector_nxt_vm.txt", "w") results.seek(0,2) totalTime = 0 for num in _vector(): _mod_asm(str(num)) # modify asm file start = timer_ns() vm_defined_fac(int(num)) end = timer_ns() dt = end - start totalTime += dt results.write(f"{num} {dt}\n") print(f"factorial of {num} took {dt} nanoseconds") sleep(0.02) print(f"Total time elapsed: {totalTime} nanoseconds") results.close() if __name__ == "__main__": test_user_factorial() test_math_factorial() test_vm_factorial()
22.084112
59
0.611934
321
2,363
4.317757
0.29595
0.035354
0.02381
0.058442
0.555556
0.555556
0.477633
0.477633
0.477633
0.477633
0
0.017065
0.25603
2,363
106
60
22.292453
0.771331
0.060093
0
0.514286
0
0
0.216802
0.069106
0
0
0
0
0
1
0.114286
false
0
0.071429
0.042857
0.228571
0.085714
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
7964d5e0d6c5bbff30057dd541992a4595176f15
760
py
Python
urmovie/views/image_view.py
xuyangliu/UR
8a3c94dd6b6f16bf233167333464c0429ad269d8
[ "Apache-2.0" ]
null
null
null
urmovie/views/image_view.py
xuyangliu/UR
8a3c94dd6b6f16bf233167333464c0429ad269d8
[ "Apache-2.0" ]
null
null
null
urmovie/views/image_view.py
xuyangliu/UR
8a3c94dd6b6f16bf233167333464c0429ad269d8
[ "Apache-2.0" ]
null
null
null
# Author:Sunny Liu from django.shortcuts import HttpResponse from django.shortcuts import render from django.shortcuts import redirect from urmovie import models from django.views.decorators.csrf import csrf_exempt import hashlib,os """ 内容简介: 1.爬虫情况下,对电影封面的添加 """ @csrf_exempt def uploadImg(request): if request.method == 'POST': print(type(request.FILES.get('img'))) new_img = models.Image( image_file=request.FILES.get('img'), image_name = "hahaha.jpg", ) new_img.save() return render(request, 'uploadimg.html') @csrf_exempt def showImg(request): imgs = models.Image.objects.all() content = { 'imgs':imgs, } return render(request, 'showimg.html', content)
24.516129
52
0.665789
93
760
5.365591
0.505376
0.08016
0.114228
0.150301
0
0
0
0
0
0
0
0.001684
0.218421
760
31
53
24.516129
0.838384
0.021053
0
0.086957
0
0
0.071429
0
0
0
0
0
0
1
0.086957
false
0
0.26087
0
0.434783
0.043478
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
7964ebe5d975dfd2d7d9cc2c69f05839abcd1197
2,983
py
Python
fastreid/layers/norm_layers/batch_re_norm2d.py
SZLSP/reid2020NAIC
d0eaee768e0be606417a27ce5ea2b3071b5a9bc2
[ "Apache-2.0" ]
2
2021-05-12T13:36:46.000Z
2021-08-15T10:35:08.000Z
fastreid/layers/norm_layers/batch_re_norm2d.py
SZLSP/reid2020NAIC
d0eaee768e0be606417a27ce5ea2b3071b5a9bc2
[ "Apache-2.0" ]
1
2021-12-28T12:49:49.000Z
2021-12-28T12:49:49.000Z
fastreid/layers/norm_layers/batch_re_norm2d.py
SZLSP/reid2020NAIC
d0eaee768e0be606417a27ce5ea2b3071b5a9bc2
[ "Apache-2.0" ]
null
null
null
import torch import torch.nn as nn from torch.cuda.amp import custom_fwd class BatchReNorm2D(nn.Module): """Batch Re-Normalization Parameters num_features – C from an expected input of size (N, C, H, W) eps – a value added to the denominator for numerical stability. Default: 1e-5 momentum – the value used for the running_mean and running_var computation. Can be set to None for cumulative moving average (i.e. simple average). Default: 0.1 affine – a boolean value that when set to True, this module has learnable affine parameters. Default: True r_max - a hyper parameter. The paper used rmax = 1 for the first 5000 training steps, after which these were gradually relaxed to reach rmax=3 at 40k steps. d_max - a hyper parameter. The paper used dmax = 0 for the first 5000 training steps, after which these were gradually relaxed to reach dmax=5 at 25k steps. Shape: Input: (N, C, H, W) Output: (N, C, H, W) (same shape as input) Examples: >>> m = BatchReNorm2d(100) >>> input = torch.randn(20, 100, 35, 45) >>> output = m(input) """ def __init__(self, num_features, r_max=1, d_max=0, eps=1e-3, momentum=0.01, affine=True): super(BatchReNorm2D, self).__init__() self.affine = affine if self.affine: self.weight = nn.Parameter(torch.ones((1, num_features, 1, 1))) self.bias = nn.Parameter(torch.zeros((1, num_features, 1, 1))) self.register_buffer('running_var', torch.ones(1, num_features, 1, 1)) self.register_buffer('running_mean', torch.zeros(1, num_features, 1, 1)) self.r_max, self.d_max = r_max, d_max self.eps, self.momentum = eps, momentum def update_stats(self, input): batch_mean = input.mean((0, 2, 3), keepdim=True) batch_var = input.var((0, 2, 3), keepdim=True) batch_std = (batch_var + self.eps).sqrt() running_std = (self.running_var + self.eps).sqrt() r = torch.clamp(batch_std / running_std, min=1 / self.r_max, max=self.r_max).detach() d = torch.clamp((batch_mean - self.running_mean) / running_std, min=-self.d_max, max=self.d_max).detach() self.running_mean.lerp_(batch_mean, self.momentum) self.running_var.lerp_(batch_var, self.momentum) return batch_mean, batch_std, r, d @custom_fwd(cast_inputs=torch.float32) def forward(self, input): if self.training: with torch.no_grad(): mean, std, r, d = self.update_stats(input) input = (input - mean) / std * r + d else: mean, std = self.running_mean, self.running_var input = (input - mean) / (self.running_var + self.eps).sqrt() if self.affine: return self.weight * input + self.bias return input if __name__ == '__main__': m = BatchReNorm2D(100) input = torch.randn(20, 100, 35, 45) output = m(input)
41.430556
168
0.636272
448
2,983
4.09375
0.299107
0.041985
0.026172
0.028353
0.29771
0.29771
0.249727
0.217012
0.176663
0.134133
0
0.034035
0.251425
2,983
71
169
42.014085
0.78549
0.325176
0
0.05
0
0
0.01593
0
0
0
0
0
0
1
0.075
false
0
0.075
0
0.25
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
7965ee6a115e0d8899378fe759bfe0554e1bc195
1,657
py
Python
prymate/token/token.py
NightShade256/prymate
deeb81ab685854599d803719971e85ead6699a90
[ "MIT" ]
6
2020-06-22T14:54:55.000Z
2021-12-13T12:33:21.000Z
prymate/token/token.py
NightShade256/prymate
deeb81ab685854599d803719971e85ead6699a90
[ "MIT" ]
null
null
null
prymate/token/token.py
NightShade256/prymate
deeb81ab685854599d803719971e85ead6699a90
[ "MIT" ]
1
2020-10-11T18:31:57.000Z
2020-10-11T18:31:57.000Z
import enum __all__ = ["TokenType", "Token", "lookup_ident"] class TokenType(enum.Enum): """The enumeration for different types of tokens.""" ILLEGAL = "ILLEGAL" EOF = "EOF" # Identifiers and literals IDENT = "IDENT" INT = "INT" STRING = "STRING" # Operators ASSIGN = "=" PLUS = "+" MINUS = "-" BANG = "!" ASTERISK = "*" SLASH = "/" MODULO = "%" # Additional LT = "<" GT = ">" EQ = "==" NOT_EQ = "!=" # Delimiters COMMA = "," SEMICOLON = ";" COLON = ":" DOT = "." # Additional LPAREN = "(" RPAREN = ")" LBRACE = "{" RBRACE = "}" LBRACKET = "[" RBRACKET = "]" # Keywords FUNCTION = "FUNCTION" LET = "LET" TRUE = "TRUE" FALSE = "FALSE" IF = "IF" ELSE = "ELSE" RETURN = "RETURN" CONST = "CONST" WHILE = "WHILE" class Token: """Represents a token.""" def __init__(self, tp: TokenType, literal: str) -> None: self.tp = tp self.literal = literal def __repr__(self) -> str: return f"<Token type: {self.tp} literal: {self.literal}>" def __str__(self) -> str: return f"<Token type: {self.tp} literal: {self.literal}>" KEYWORDS = { "fn": TokenType.FUNCTION, "let": TokenType.LET, "true": TokenType.TRUE, "false": TokenType.FALSE, "if": TokenType.IF, "else": TokenType.ELSE, "return": TokenType.RETURN, "const": TokenType.CONST, "while": TokenType.WHILE, } def lookup_ident(ident: str) -> TokenType: """Fetch correct token type for an identifier.""" return KEYWORDS.get(ident, TokenType.IDENT)
19.045977
65
0.540133
168
1,657
5.214286
0.440476
0.027397
0.02968
0.031963
0.107306
0.107306
0.107306
0.107306
0.107306
0.107306
0
0
0.292698
1,657
86
66
19.267442
0.74744
0.113458
0
0.033898
0
0
0.169199
0
0
0
0
0
0
1
0.067797
false
0
0.016949
0.033898
0.762712
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
1
0
0
1
7966f849a29e53c40e0aa168b93b3cd8e669d4ec
3,191
py
Python
Projects/Project 2/program.py
ymirthor/T-215-STY1
b888da1e88c5aa16eac03353f525e9e0b9d901df
[ "MIT" ]
null
null
null
Projects/Project 2/program.py
ymirthor/T-215-STY1
b888da1e88c5aa16eac03353f525e9e0b9d901df
[ "MIT" ]
null
null
null
Projects/Project 2/program.py
ymirthor/T-215-STY1
b888da1e88c5aa16eac03353f525e9e0b9d901df
[ "MIT" ]
null
null
null
from collections import deque as LL class VM_Manager: def __init__(self): self.s_size = 9 self.p_size = 9 self.w_size = 9 self.PM = [None] * 2**19 # PM[524288] self.D = [[None] * 2**10] * 2**9 # D[1024][512] self.free_frames = LL([i for i in range(2**10)]) self.occupied_frames = [0,1] def get_free_frame(self): while True: frame = self.free_frames.popleft() if frame not in self.occupied_frames: return frame def create_ST(self, s, z, f): if f >= 0: self.occupied_frames.append(f) self.PM[2 * s] = z PT_idx = 2 * s + 1 self.PM[PT_idx] = f def create_PT(self, s, p, f): PT = self.PM[2 * s + 1] if PT < 0: self.D[-PT][p] = f else: self.occupied_frames.append(f) self.PM[PT * 512 + p] = f def translate_VA(self, VA): s = VA >> (self.p_size + self.w_size) p = (VA >> self.w_size) & 2 ** self.p_size - 1 w = VA & 2 ** self.w_size - 1 pw = VA & 2 ** (self.p_size + self.w_size) - 1 return s, p, w, pw def PA(self, s, p, w, pw): if pw >= self.PM[2 * s]: return -1 PT = self.PM[2 * s + 1] if PT < 0: f1 = self.get_free_frame() self.PM[f1 * 512 + p] = self.D[-PT][p] PT = f1 pg = self.PM[PT * 512 + p] if pg < 0: f2 = self.get_free_frame() pg = f2 return pg * 512 + w def line_input(string): nested = [] lis = [] for idx, i in enumerate(string.split(), start=1): lis.append(int(i)) if idx % 3 == 0: nested.append(lis) lis = [] return nested if __name__ == "__main__": manager_no_dp = VM_Manager() manager_dp = VM_Manager() init_dp = open('init-dp.txt','r') input_dp = open('input-dp.txt', 'r') init_no_dp = open('init-no-dp.txt','r') input_no_dp = open('input-no-dp.txt', 'r') STs_dp = line_input(init_dp.readline()) for ST in STs_dp: manager_dp.create_ST(*ST) STs_no_dp = line_input(init_no_dp.readline()) for ST in STs_no_dp: manager_no_dp.create_ST(*ST) PTs_dp = line_input(init_dp.readline()) for PT in PTs_dp: manager_dp.create_PT(*PT) PTs_no_dp = line_input(init_no_dp.readline()) for PT in PTs_no_dp: manager_no_dp.create_PT(*PT) VAs_dp = list(map(int, input_dp.readline().split())) VAs_no_dp = list(map(int, input_no_dp.readline().split())) PAs_dp = [] for idx, address in enumerate(VAs_dp, start=1): spw_pw = manager_dp.translate_VA(address) PA = manager_dp.PA(*spw_pw) PAs_dp.append(PA) PAs_no_dp = [] for idx, address in enumerate(VAs_no_dp, start=1): spw_pw = manager_no_dp.translate_VA(address) PA = manager_no_dp.PA(*spw_pw) PAs_no_dp.append(PA) print(*PAs_no_dp) print(*PAs_dp) with open('output.txt','w') as out: out.write(' '.join(map(str,PAs_no_dp)) + '\n') out.write(' '.join(map(str,PAs_dp)))
27.991228
62
0.531808
506
3,191
3.12253
0.181818
0.055696
0.028481
0.020253
0.375949
0.327848
0.172152
0.060759
0.060759
0
0
0.032543
0.325917
3,191
114
63
27.991228
0.701999
0.007208
0
0.086957
0
0
0.024953
0
0
0
0
0
0
1
0.076087
false
0
0.01087
0
0.152174
0.021739
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
79671fc83f6656f30c6074c1b351a64eeeecad56
3,750
py
Python
src/utils/common/prediction_helper.py
Supreeth-Shetty/Projectathon---Simplified-AI
3fc26a58a9370d119811ac4e864af977c21f6c40
[ "MIT" ]
8
2021-12-23T06:05:00.000Z
2021-12-26T05:39:00.000Z
src/utils/common/prediction_helper.py
Supreeth-Shetty/Projectathon---Simplified-AI
3fc26a58a9370d119811ac4e864af977c21f6c40
[ "MIT" ]
null
null
null
src/utils/common/prediction_helper.py
Supreeth-Shetty/Projectathon---Simplified-AI
3fc26a58a9370d119811ac4e864af977c21f6c40
[ "MIT" ]
2
2021-12-23T06:10:11.000Z
2021-12-23T07:24:28.000Z
import os from flask import session from src.utils.common.common_helper import load_project_encdoing, load_project_model, load_project_pca, \ load_project_scaler, read_config from loguru import logger from from_root import from_root from src.utils.databases.mysql_helper import MySqlHelper from src.preprocessing.preprocessing_helper import Preprocessing from src.feature_engineering.feature_engineering_helper import FeatureEngineering import pandas as pd import numpy as np config_args = read_config("./config.yaml") log_path = os.path.join(from_root(), config_args['logs']['logger'], config_args['logs']['generallogs_file']) logger.add(sink=log_path, format="[{time:YYYY-MM-DD HH:mm:ss.SSS} - {level} - {module} ] - {message}", level="INFO") mysql = MySqlHelper.get_connection_obj() """[Function to make prediction] """ def make_prediction(df): try: logger.info(f"Started Prediction!!1") if df is None: logger.info(f"DataFrame is null") raise Exception("Data Frame is None") else: query_ = f"""Select Name, Input,Output,ActionDate from tblProject_Actions_Reports Join tblProjectActions on tblProject_Actions_Reports.ProjectActionId=tblProjectActions.Id where ProjectId={session['pid']}""" action_performed = mysql.fetch_all(query_) print(action_performed) feature_columns = [col for col in df.columns if col != session['target_column']] df = df.loc[:, feature_columns] df_org = df if len(action_performed) > 0: for action in action_performed: if action[0] == 'Delete Column': df = Preprocessing.delete_col(df, action[1].split(",")) elif action[0] == 'Change Data Type': df = FeatureEngineering.change_data_type(df, action[1], action[2]) elif action[0] == 'Column Name Change': df = FeatureEngineering.change_column_name(df, action[1], action[2]) elif action[0] == 'Encdoing': cat_data = Preprocessing.col_seperator(df, 'Categorical_columns') num_data = Preprocessing.col_seperator(df, 'Numerical_columns') encoder = load_project_encdoing() # columns=action[1].split(",") # df_=df.loc[:,columns] df_ = encoder.transform(cat_data) df = pd.concat([df_, num_data], axis=1) elif action[0] == 'Scalling': scalar = load_project_scaler() columns = df.columns df = scalar.transform(df) df = pd.DataFrame(df, columns=columns) elif action[0] == 'PCA': pca = load_project_pca() columns = df.columns df_ = pca.transform(df) df_ = df_[:, :int(action[1])] df = pd.DataFrame(df_, columns=[f"Col_{col + 1}" for col in np.arange(0, df_.shape[1])]) elif action[0] == 'Custom Script': if action[1] is not None: exec(action[1]) model = load_project_model() result = model.predict(df) df_org.insert(loc=0, column=session['target_column'], value=result) return df_org else: pass return df except Exception as e: logger.info('Error in Prediction ' + str(e)) raise Exception(e)
43.604651
117
0.560533
414
3,750
4.89372
0.342995
0.043435
0.032577
0.015795
0.078973
0.026654
0.026654
0.026654
0
0
0
0.009259
0.3376
3,750
85
118
44.117647
0.806361
0.013333
0
0.058824
0
0.014706
0.161704
0.037695
0
0
0
0
0
1
0.014706
false
0.014706
0.147059
0
0.191176
0.014706
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
796742832becd9376d9f4aeeea9d21e1ba36ad8f
6,343
py
Python
admin/views.py
dob9601/CollaboDev
cb1c3e3c6106ec8b0db8b570204b7fd959e0284e
[ "MIT" ]
7
2018-01-22T23:21:42.000Z
2021-01-27T12:06:02.000Z
admin/views.py
ViesLink/CollaboDev
1ab51f4d5e58dd8610861095632e1cd7f91edc21
[ "MIT" ]
9
2018-03-05T23:54:12.000Z
2022-02-10T10:54:56.000Z
admin/views.py
ViesLink/CollaboDev
1ab51f4d5e58dd8610861095632e1cd7f91edc21
[ "MIT" ]
2
2020-09-30T01:27:27.000Z
2021-01-27T12:05:46.000Z
"""Views for admin app.""" import random import os import requests from django.shortcuts import render from django.contrib.auth.models import User from django.contrib.auth.decorators import user_passes_test from django.http import HttpResponseRedirect, JsonResponse from django.urls import reverse from django.core.exceptions import ObjectDoesNotExist from .models import Settings @user_passes_test(lambda u: u.is_superuser) def users(request): """User management page of administrative app.""" try: temporary_password = request.session['temp_password'] del request.session['temp_password'] except KeyError: temporary_password = '' user_list = User.objects.all() context = { 'users': user_list, 'temporary_password': temporary_password, } return render(request, 'admin/users.html', context) @user_passes_test(lambda u: u.is_superuser) def delete_user(request): """View to handle the deletion of users.""" user = User.objects.get(pk=int(request.POST['user'])) if (not user.is_superuser or request.user.profile.server_owner and user != request.user): user.delete() return HttpResponseRedirect(reverse('admin:users')) @user_passes_test(lambda u: u.is_superuser) def create_user(request): """View to handle the creation of user.""" password = ''.join(random.choice('0123456789ABCDEF') for i in range(8)) user = User.objects.create_user( username=request.POST['username'], first_name=request.POST['first_name'], last_name=request.POST['last_name'], email=request.POST['email'], password=password, ) user.clean() user.save() request.session['temp_password'] = password return HttpResponseRedirect(reverse('admin:users')) @user_passes_test(lambda u: u.is_superuser) def reset_collabodev(_request): """View to facilitate the complete reset of CollaboDev.""" settings = Settings.objects.get(pk=1) settings.settings_initialised = False os.system('python manage.py flush --noinput') return HttpResponseRedirect(reverse('admin:reset_page')) def reset_page(request): """Page displaying reset message post reset.""" try: Settings.objects.get(pk=1) context = { 'derail': True } except ObjectDoesNotExist: context = {} return render(request, 'admin/reset_page.html', context) @user_passes_test(lambda u: u.is_superuser) def github(request): """ Github Integration settings page. Provides administrators with the ability to associate a GitHub Organisation with CollaboDev and import all of its repositories """ session_data = dict(request.session) request.session.pop('invalid_org_name', None) request.session.pop('valid_org_name', None) settings = Settings.objects.get(pk=1) session_data['current_org'] = settings.github_org_name if request.method == 'POST': org_name = request.POST['org_name'] org_api_url = 'https://api.github.com/orgs/' + org_name org_data = requests.get(org_api_url).json() try: if org_data['login'] == org_name: settings.github_org_name = org_name settings.save() request.session['valid_org_name'] = True else: raise KeyError except KeyError: request.session['invalid_org_name'] = True return HttpResponseRedirect(reverse('admin:github')) return render(request, 'admin/github.html', session_data) @user_passes_test(lambda u: u.is_superuser) def update(_request): """Facilitates the updating of CollaboDev to its latest settings.""" update_response = '' # os.popen('git pull https://github.com/dob9601/CollaboDev.git').read() if update_response.startswith('Updating'): response = 1 elif update_response == 'Already up to date.\n': response = 2 elif update_response == '': response = -1 payload = { 'success': True, 'response': response } return JsonResponse(payload) def first_time_setup(request): """First time setup for when CollaboDev is first started up.""" settings = Settings.objects.get(pk=1) context = {} if request.method == 'POST': if 'setup-key' in request.POST: if request.POST['setup-key'] == settings.settings_setup_code: context['stage'] = 1 else: context = {} admin_pwd = request.POST['admin-password'] admin_pwd_conf = request.POST['admin-password-conf'] if admin_pwd == admin_pwd_conf: admin_user = User.objects.create_user( username=request.POST['admin-username'], first_name=request.POST['admin-first-name'], last_name=request.POST['admin-last-name'], email=request.POST['admin-email'], password=admin_pwd, is_superuser=True, ) admin_user.profile.server_owner = True admin_user.save() else: context['stage'] = 1 # Raise password error if context == {}: context['stage'] = 2 settings.settings_initialised = True settings.save() else: settings_model = Settings.objects.get(pk=1) print('COLLABODEV SETUP CODE: '+settings_model.settings_setup_code) context['stage'] = 0 try: open("setup-key.txt", "r") if settings.settings_setup_code == "": raise FileNotFoundError except FileNotFoundError: key = ''.join(random.choice('0123456789ABCDEF') for i in range(16)) key_string = "CollaboDev Setup Code: " + key with open("setup-key.txt", "w") as key_file: key_file.write(key_string) settings.settings_setup_code = key settings.save() return render(request, 'admin/first-time-setup.html', context)
31.715
80
0.610752
710
6,343
5.304225
0.24507
0.040892
0.026022
0.031864
0.241901
0.193309
0.132236
0.132236
0.086033
0.066915
0
0.00856
0.281728
6,343
199
81
31.874372
0.818042
0.096011
0
0.23741
0
0
0.121389
0.008775
0
0
0
0
0
1
0.057554
false
0.122302
0.071942
0
0.194245
0.007194
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
1
0
0
0
0
0
1
7967b563a3566c5502b7994c82c942466d518c87
2,035
py
Python
src/spaceone/repository/api/v1/schema.py
whdalsrnt/repository
4d019c21508629faae7a7e2789bf540a3bab0e20
[ "Apache-2.0" ]
6
2020-06-04T23:00:33.000Z
2020-08-10T02:45:43.000Z
src/spaceone/repository/api/v1/schema.py
whdalsrnt/repository
4d019c21508629faae7a7e2789bf540a3bab0e20
[ "Apache-2.0" ]
4
2020-10-09T07:02:27.000Z
2022-02-28T04:43:01.000Z
src/spaceone/repository/api/v1/schema.py
whdalsrnt/repository
4d019c21508629faae7a7e2789bf540a3bab0e20
[ "Apache-2.0" ]
6
2020-06-01T10:10:57.000Z
2021-10-05T03:03:00.000Z
from spaceone.api.repository.v1 import schema_pb2, schema_pb2_grpc from spaceone.core.pygrpc import BaseAPI class Schema(BaseAPI, schema_pb2_grpc.SchemaServicer): pb2 = schema_pb2 pb2_grpc = schema_pb2_grpc def create(self, request, context): params, metadata = self.parse_request(request, context) with self.locator.get_service('SchemaService', metadata) as schema_svc: schema_data = schema_svc.create(params) return self.locator.get_info('SchemaInfo', schema_data) def update(self, request, context): params, metadata = self.parse_request(request, context) with self.locator.get_service('SchemaService', metadata) as schema_svc: schema_data = schema_svc.update(params) return self.locator.get_info('SchemaInfo', schema_data) def delete(self, request, context): params, metadata = self.parse_request(request, context) with self.locator.get_service('SchemaService', metadata) as schema_svc: schema_svc.delete(params) return self.locator.get_info('EmptyInfo') def get(self, request, context): params, metadata = self.parse_request(request, context) with self.locator.get_service('SchemaService', metadata) as schema_svc: schema_data = schema_svc.get(params) return self.locator.get_info('SchemaInfo', schema_data) def list(self, request, context): params, metadata = self.parse_request(request, context) with self.locator.get_service('SchemaService', metadata) as schema_svc: schemas_data, total_count = schema_svc.list(params) return self.locator.get_info('SchemasInfo', schemas_data, total_count, minimal=self.get_minimal(params)) def stat(self, request, context): params, metadata = self.parse_request(request, context) with self.locator.get_service('SchemaService', metadata) as schema_svc: return self.locator.get_info('StatisticsInfo', schema_svc.stat(params))
46.25
116
0.704668
249
2,035
5.558233
0.176707
0.121387
0.121387
0.104046
0.736994
0.719653
0.676301
0.676301
0.676301
0.676301
0
0.004905
0.198526
2,035
44
117
46.25
0.843654
0
0
0.441176
0
0
0.069745
0
0
0
0
0
0
1
0.176471
false
0
0.058824
0
0.5
0
0
0
0
null
0
0
0
0
1
0
0
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
2
796832284ec5beb0d93e3de2098cee7d04cbed89
18,718
py
Python
examples/connections.py
Thinker83/remote-computer-manager
1ea8353e77fc13a98625d744162f789503a8f400
[ "MIT" ]
null
null
null
examples/connections.py
Thinker83/remote-computer-manager
1ea8353e77fc13a98625d744162f789503a8f400
[ "MIT" ]
null
null
null
examples/connections.py
Thinker83/remote-computer-manager
1ea8353e77fc13a98625d744162f789503a8f400
[ "MIT" ]
null
null
null
from computer_communication_framework.base_connection import Connection import subprocess import re import datetime class BasePbs(Connection): """ This is meant to be a template to create a connection object for a standard PBS/TORQUE cluster. This inherits from the base_connect.Connection class in base_connection.py. It will not define ALL of the abstract classes specified in base_connection.Connection and so you will not be able to create an instance of it. One should create a class that inherits this class and add all the neccessary methods to statisfy the base_connection.Connection abstract methods. This is meant to contain the BASIC commands that can be used by programs to control the remote computer (that aren't already included in base_connection.Connection). This is atomistic level commands that form the basis of more complex and specific programs. Abstract methods that are left out are: - checkDiskUsage """ def __init__(self, cluster_user_name, ssh_config_alias, path_to_key, forename_of_user, surname_of_user, user_email, base_output_path = '/base/output/path', base_runfiles_path = '/base/run/file/path', master_dir = '/master/dir', info_about_cluster = 'Example Cluster Name (ECN): Advanced Computing Research Centre, somewhere.', activate_virtual_environment_list = ['module add python-anaconda-4.2-3.5', 'source activate virtual_environment_name']): Connection.__init__(self, cluster_user_name, ssh_config_alias, path_to_key, forename_of_user, surname_of_user, user_email) self.submit_command = 'qsub' self.information_about_cluster = info_about_cluster self.base_output_path = base_output_path self.base_runfiles_path = base_runfiles_path self.master_dir = master_dir self.activate_venv_list = activate_virtual_environment_list # INSTANCE METHODS def checkQueue(self, job_number): """ This function must exist to satisfy the abstract class that it inherits from. In this case it takes a job number and returns a list of all the array numbers of that job still running. Args: job_number (int): PBS assigns a unique integer number to each job. Remeber that a job can actually be an array of jobs. Returns: output_dict (dict): Has keys 'return_code', 'stdout', and 'stderr'. """ # -t flag shows all array jobs related to one job number, if that job is an array. grep_part_of_cmd = "qstat -tu " + self.user_name + " | grep \'" + str(job_number) + "\' | awk \'{print $1}\' | awk -F \"[][]\" \'{print $2}\'" output_dict = self.checkSuccess(self.sendCommand([grep_part_of_cmd])) # Remember that all commands should be passed through the "checkSuccess" function that is inherited from the Connection class. return output_dict # STUFF FOR THE BCS CHILD CLASS!!! # no_of_unique_jobs (int): Total amount of jobs to run. # no_of_repetitions_of_each_job (int): Total amount of repetitions of each job. # master_dir (str): The directory on the remote computer that you want the submission script to start in. def createPbsSubmissionScriptTemplate(self, pbs_job_name, no_of_nodes, no_of_cores, walltime, queue_name, job_number, outfile_name_and_path, errorfile_name_and_path, initial_message_in_code = None, shebang = "#!/bin/bash"): """ This creates a template for a submission script for the cluster however it does not contain any code for specific jobs (basically just the PBS commands and other bits that might be useful for debugging). It puts it all into a list where list[0] will be line number one of the file and list[2] will be line number two of the file etc and returns that list. Args: pbs_job_name (str): The name given to the queuing system. no_of_nodes (int): The number of nodes that the user would like to request. no_of_cores (int): The number of cores that the user would like to request. walltime (str): The maximum amount of time the job is allowed to take. Has the form 'HH:MM:SS'. queue_name (str): PBS/Torque clusters have a choice of queues and this variable specifies which one to use. outfile_name_and_path (str): Absolute path and file name of where you want the outfiles of each job array stored. errorfile_name_and_path (str): Absolute path and file name of where you want to store the errorfiles of each job array stored. initial_message_in_code (str): The first comment in the code normally says a little something about where this script came from. NOTE: You do not need to include a '#' to indicat it is a comment. initial_message_in_code == None (str): Should the user wish to put a meaasge near the top of the script (maybe explanation or something) then they can add it here as a string. If it's value is None (the default value) then the line is omitted. Returns: list_of_pbs_commands (list of strings): Each string represents the line of a submission file and the list as a whole is the beginning of a PBS submission script. """ # add the first part of the template to the list list_of_pbs_commands = [shebang + "\n", "\n", "# This script was created using Oliver Chalkley's computer_communication_framework library - https://github.com/Oliver-Chalkley/computer_communication_framework." + "\n", "# "] # Only want to put the users initial message if she has one if initial_message_in_code is not None: list_of_pbs_commands += [initial_message_in_code + "\n"] # add the next part of the template list_of_pbs_commands = ["# Title: " + pbs_job_name + "\n", "# User: " + self.forename_of_user + ", " + self.surename_of_user + ", " + self.user_email + "\n"] # Only want to put affiliation if there is one if type(self.affiliation) is not None: list_of_pbs_commands += ["# Affiliation: " + self.affiliation + "\n"] # add the next part of the template to the list list_of_pbs_commands += ["# Last Updated: " + str(datetime.datetime.now()) + "\n", "\n", "## Job name" + "\n", "#PBS -N " + pbs_job_name + "\n", "\n", "## Resource request" + "\n", "#PBS -l nodes=" + str(no_of_nodes) + ":ppn=" + str(no_of_cores) + ",walltime=" + walltime + "\n", "#PBS -q " + queue_name + "\n", "\n", "## Job array request" + "\n", "#PBS -t " + job_array_numbers + "\n", "\n", "## designate output and error files" + "\n", "#PBS -e " + outfile_name_and_path + "\n", "#PBS -o " + errorfile_name_and_path + "\n", "\n", "# print some details about the job" + "\n", 'echo "The Array ID is: ${PBS_ARRAYID}"' + "\n", 'echo Running on host `hostname`' + "\n", 'echo Time is `date`' + "\n", 'echo Directory is `pwd`' + "\n", 'echo PBS job ID is ${PBS_JOBID}' + "\n", 'echo This job runs on the following nodes:' + "\n", 'echo `cat $PBS_NODEFILE | uniq`' + "\n", "\n"] return list_of_pbs_commands def createStandardSubmissionScript(self, file_name_and_path, list_of_job_specific_code, pbs_job_name, no_of_nodes, no_of_cores, queue_name, outfile_name_and_path, errorfile_name_and_path, walltime, initial_message_in_code = None, file_permissions = "700", shebang = "#!/bin/bash"): """ This creates a PBS submission script based on the resources you request and the job specific code that you supply. It then writes this code to a file that you specify. Args: file_name_and_path (str): Absolute path plus filename that you wish to save the PBS submission script to e.g. /path/to/file/pbs_submission_script.sh. list_of_job_specific_code (list of strings): Each element of the list contains a string of one line of code. Note: This code is appended to the end of the submission script. pbs_job_name (str): The name given to this job. no_of_nodes (int): The number of nodes that the user would like to request. no_of_cores (int): The number of cores that the user would like to request. queue_name (str): PBS/Torque clusters have a choice of queues and this variable specifies which one to use. outfile_name_and_path (str): Absolute path and file name of where you want the outfiles of each job array stored. errorfile_name_and_path (str): Absolute path and file name of where you want to store the errorfiles of each job array stored. walltime (str): The maximum amount of time the job is allowed to take. Has the form 'HH:MM:SS'. initial_message_in_code == None (str): Should the user wish to put a meaasge near the top of the script (maybe explanation or something) then they can add it here as a string. If it's value is None (the default value) then the line is omitted. file_permissions = "700" (str): The file permissions that the user would like the PBS submission script to have. If it is None then it will not attempt to change the settings. The default setting, 700, makes it read, write and executable only to the user. NOTE: For the submission script to work one needs to make it executable. shebang = "#!/bin/bash" (str): The shebang line tells the operating system what interpreter to use when executing this script. The default interpreter is BASH which is normally found in /bin/bash. """ # Create the PBS template pbs_script_list = self.createPbsSubmissionScriptCommands(initial_message_in_code, pbs_job_name, no_of_nodes, no_of_cores, walltime, queue_name, job_number, outfile_name_and_path, errorfile_name_and_path, shebang = "#!/bin/bash") # Add the code that is specific to this job pbs_script_list += list_of_job_specific_code # write the code to a file Connection.createLocalFile(file_name_and_path, pbs_script_list, file_permisions = "700") # change the permissions if neccessary if file_permissions != None: subprocess.check_call(["chmod", str(file_permissions), str(output_filename)]) return # DELETE THIS ONCE EVERYTHING HAS BEEN DONE # def createStandardSubmissionScript(self, output_filename, pbs_job_name, queue_name, no_of_unique_jobs, no_of_repetitions_of_each_job, master_dir, outfile_name_and_path, errorfile_name_and_path, walltime, initial_message_in_code, list_of_job_specific_code): # """ # This acts as a template for a submission script for the cluster however it does not contain any code for specific jobs. This code is pass to the function through the list_of_job_specific_code variable. # # The format for a submission in this case will be an array of jobs. Here we want to be able to specify a number of unique jobs and then the amount of times we wish to repeat each unique job. This will then split all the jobs across arrays and CPUs on the cluster depending on how many are given. Each unique job has a name and some settings, this is stored on the cluster in 2 files job_names_file and job_settings_file, respectively. # # Args: # output_filename (str): The name of the submission script. # pbs_job_name (str): The name given to the queuing system. # queue_name (str): This cluster has a choice of queues and this variable specifies which one to use. # no_of_unique_jobs (int): Total amount of jobs to run. # no_of_repetitions_of_each_job (int): Total amount of repetitions of each job. # master_dir (str): The directory on the remote computer that you want the submission script to start in. # outfile_name_and_path (str): Absolute path and file name of where you want the outfiles of each job array stored. # errorfile_name_and_path (str): Absolute path and file name of where you want to store the errorfiles of each job array stored. # walltime (str): The maximum amount of time the job is allowed to take. Has the form 'HH:MM:SS'. # initial_message_in_code (str): The first comment in the code normally says a little something about where this script came from. NOTE: You do not need to include a '#' to indicat it is a comment. # list_of_job_specific_code (list of strings): Each element of the list contains a string of one line of code. # # Returns: # output_dict (dict): Contains details of how it spread the jobs across arrays and CPUs. Has keys, 'no_of_arrays', 'no_of_unique_jobs_per_array_job', 'no_of_repetitions_of_each_job', 'no_of_sims_per_array_job', and 'list_of_rep_dir_names'. # """ # # # set job array numbers to None so that we can check stuff has worked later # job_array_numbers = None # # The maximum job array size on the cluster. # max_job_array_size = 500 # # initialise output dict # output_dict = {} # # test that a reasonable amount of jobs has been submitted (This is not a hard and fast rule but there has to be a max and my intuition suggestss that it will start to get complicated around this level i.e. queueing and harddisk space etc) # total_sims = no_of_unique_jobs * no_of_repetitions_of_each_job # if total_sims > 20000: # raise ValueError('Total amount of simulations for one batch submission must be less than 20,000, here total_sims=',total_sims) # # output_dict['total_sims'] = total_sims # # spread simulations across array jobs # if no_of_unique_jobs <= max_job_array_size: # no_of_unique_jobs_per_array_job = 1 # no_of_arrays = no_of_unique_jobs # job_array_numbers = '1-' + str(no_of_unique_jobs) # else: # # job_array_size * no_of_unique_jobs_per_array_job = no_of_unique_jobs so all the factors of no_of_unique_jobs is # common_factors = [x for x in range(1, no_of_unique_jobs+1) if no_of_unique_jobs % x == 0] # # make the job_array_size as large as possible such that it is less than max_job_array_size # factor_idx = len(common_factors) - 1 # while factor_idx >= 0: # if common_factors[factor_idx] < max_job_array_size: # job_array_numbers = '1-' + str(common_factors[factor_idx]) # no_of_arrays = common_factors[factor_idx] # no_of_unique_jobs_per_array_job = common_factors[(len(common_factors)-1) - factor_idx] # factor_idx = -1 # else: # factor_idx -= 1 # # # raise error if no suitable factors found! # if job_array_numbers is None: # raise ValueError('job_array_numbers should have been assigned by now! This suggests that it wasn\'t possible for my algorithm to split the KOs across the job array properly. Here no_of_unique_jobs=', no_of_unique_jobs, ' and the common factors of this number are:', common_factors) # # output_dict['no_of_arrays'] = no_of_arrays # output_dict['no_of_unique_jobs_per_array_job'] = no_of_unique_jobs_per_array_job # output_dict['no_of_repetitions_of_each_job'] = no_of_repetitions_of_each_job # # calculate the amount of cores per array job - NOTE: for simplification we only use cores and not nodes (this is generally the fastest way to get through the queue anyway) # no_of_cores = no_of_repetitions_of_each_job * no_of_unique_jobs_per_array_job # output_dict['no_of_sims_per_array_job'] = no_of_cores # output_dict['list_of_rep_dir_names'] = list(range(1, no_of_repetitions_of_each_job + 1)) # no_of_nodes = 1 # # write the script to file # with open(output_filename, mode='wt', encoding='utf-8') as myfile: # myfile.write("#!/bin/bash" + "\n") # myfile.write("\n") # myfile.write("# This script was created using Oliver Chalkley's computer_communication_framework library - https://github.com/OliCUoB/computer_communication_framework." + "\n") # myfile.write("# " + initial_message_in_code + "\n") # myfile.write("# Title: " + pbs_job_name + "\n") # myfile.write("# User: " + self.forename_of_user + ", " + self.surename_of_user + ", " + self.user_email + "\n") # if type(self.affiliation) is not None: # myfile.write("# Affiliation: " + self.affiliation + "\n") # myfile.write("# Last Updated: " + str(datetime.datetime.now()) + "\n") # myfile.write("\n") # myfile.write("## Job name" + "\n") # myfile.write("#PBS -N " + pbs_job_name + "\n") # myfile.write("\n") # myfile.write("## Resource request" + "\n") # myfile.write("#PBS -l nodes=" + str(no_of_nodes) + ":ppn=" + str(no_of_cores) + ",walltime=" + walltime + "\n") # myfile.write("#PBS -q " + queue_name + "\n") # myfile.write("\n") # myfile.write("## Job array request" + "\n") # myfile.write("#PBS -t " + job_array_numbers + "\n") # myfile.write("\n") # myfile.write("## designate output and error files" + "\n") # myfile.write("#PBS -e " + outfile_name_and_path + "\n") # myfile.write("#PBS -o " + errorfile_name_and_path + "\n") # myfile.write("\n") # myfile.write("# print some details about the job" + "\n") # myfile.write('echo "The Array ID is: ${PBS_ARRAYID}"' + "\n") # myfile.write('echo Running on host `hostname`' + "\n") # myfile.write('echo Time is `date`' + "\n") # myfile.write('echo Directory is `pwd`' + "\n") # myfile.write('echo PBS job ID is ${PBS_JOBID}' + "\n") # myfile.write('echo This job runs on the following nodes:' + "\n") # myfile.write('echo `cat $PBS_NODEFILE | uniq`' + "\n") # myfile.write("\n") # for line in list_of_job_specific_code: # myfile.write(line) # # # give the file execute permissions # subprocess.check_call(["chmod", "700", str(output_filename)]) # # return output_dict def getJobIdFromSubStdOut(self, stdout): """ When one submits a job to the cluster it returns the job ID to the stdout. This function takes that stdout and extracts the job ID so that it can be used to monitor the job if neccessary. Args: stdout (str): The stdout after submitting a job to the queue. Returns: return (int): The job ID of the job submitted which returned stdout. """ return int(re.search(r'\d+', stdout).group())
76.713115
884
0.678865
2,811
18,718
4.320527
0.165777
0.017456
0.029642
0.023055
0.497242
0.449238
0.397448
0.356608
0.339481
0.33133
0
0.003625
0.233732
18,718
243
885
77.028807
0.843129
0.737793
0
0
0
0.028571
0.228827
0.018132
0.028571
0
0
0
0
1
0.142857
false
0
0.114286
0
0.4
0.057143
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
796b67b9479d04170cd02e4d71dc7ae51ab5fc75
13,795
py
Python
src/util.py
lambertwang/mastery
772bdeb10e014391835d267069afc820a113d2b2
[ "MIT" ]
1
2017-12-01T03:30:34.000Z
2017-12-01T03:30:34.000Z
src/util.py
lambertwang/mastery
772bdeb10e014391835d267069afc820a113d2b2
[ "MIT" ]
1
2017-11-13T18:46:39.000Z
2017-11-13T18:46:39.000Z
src/util.py
lambertwang/mastery
772bdeb10e014391835d267069afc820a113d2b2
[ "MIT" ]
null
null
null
import random import re import json from combat import * from travel import * from pdb import set_trace def load_words(path): with open(path, 'r') as f: for line in f: clean_line = line.strip() if clean_line and not clean_line[0] == "#": yield clean_line class MarkovGenerator: def __init__(self, words, length): self.length = length self.transitions = {} for word in words: key = (None,) * length for char in word: self.addTransition(key, char) key = key[1:] + (char,) self.addTransition(key, None) def addTransition(self, key, char): if key not in self.transitions: self.transitions[key] = [] self.transitions[key].append(char) def generate(self): result = [] key = (None,) * self.length while key in self.transitions: next_char = random.choice(self.transitions[key]) if next_char is None: break result.append(next_char) key = key[1:] + (next_char,) return ''.join(result) town_generator = MarkovGenerator(load_words('../data/towns.txt'), 2) name_generator = MarkovGenerator(load_words('../data/names_male.txt'), 3) occupation_list = list(load_words('../data/occupations.txt')) color_list = list(load_words('../data/colors.txt')) landform_list = list(load_words('../data/landforms.txt')) weapon_list = list(load_words('../data/weapons.txt')) with open('../monsters.json', 'r') as monster_file: monsters_list = json.load(monster_file) def expand(sentence, **kwargs): # set_trace() while True: matches = list(re.finditer('<([!a-zA-Z0-9:_]*?)>', sentence)) if not matches: return sentence for match in reversed(matches): parts = match.group(1).split(':') if parts[0][0] == '!': replacement = kwargs[parts[0][1:]] else: replacement = globals()[parts[0]]() if len(parts) >= 2: replacement = globals()[parts[1]](replacement) sentence = sentence[:match.start(0)] + replacement + sentence[match.end(0):] def title(words): return ' '.join((word[0].upper() + word[1:]) for word in words.split(' ')) def sentence(words): return words[0].upper() + words[1:] def book_title(): return '# <!pc_name>\'s Journey to Defeat the Evil Wizard <!wiz_name> _(and his many battles along the way)_\n\n' def chapter_title(title): return '## <a name="chapter<!chapter_number>"></a> ' + title + '\n\n' def chapter_title_plain(): return 'Chapter <!chapter_number>: <!town_name> and the <!monster_name:title>' def town(): return town_generator.generate() def name(): return name_generator.generate() def occupation(): return random.choice(occupation_list) def color(): return random.choice(color_list) def landform(): return random.choice(landform_list) def weapon(): return random.choice(weapon_list) def positive_trait(): return random.choice([ 'bold', 'courageous', 'daring', 'epic', 'fearless', 'gallant', 'grand', 'gutsy', 'noble', 'valiant', 'classic', 'elevated', 'bigger than life', 'dauntless', 'doughty', 'exaggerated', 'fire-eating', 'grandiose', 'gritty', 'gutty', 'high-flown', 'impavid', 'inflated', 'intrepid', 'lion-hearted', 'mythological', 'tall standing', 'stouthearted', 'unafraid', 'valorous', 'undaunted' ]) def negative_trait(): return random.choice([ 'hideous', 'smelly', 'terrible', 'menacing', 'awful', 'ruinous', 'evil', 'abhorrent', 'abominable', 'appalling', 'awful', 'cruel', 'disgusting', 'dreadful', 'eerie', 'frightful', 'ghastly', 'grim', 'grisly', 'gruesome', 'heinous', 'hideous', 'horrendous', 'horrid', 'lousy', 'nasty', 'scandalous', 'scary', 'shameful', 'shocking', 'terrible', 'terrifying', 'beastly', 'detestable', 'disagreeable', 'execrable', 'fairy', 'fearful', 'loathsome', 'lurid', 'mean', 'obnoxious', 'offensive', 'repellent', 'repulsive', 'revolting', 'sickie', 'ungodly', 'unholy', 'unkind' ]) def pc_name(): return random.choice([ '<!pc_name>', 'the <positive_trait> <!pc_name>', '<!pc_name> the <positive_trait>', 'our hero', 'the adventurer', 'he', 'he', 'he', 'he' ]) def activity(): return random.choice([ 'sat by the side of the road', 'rushed by quickly, ignoring him', 'gazed at him from an open window', 'talked excitedly with what appeared to be a <occupation>', 'slowly carried supplies', 'slept in an alleyway', 'eyed him suspiciously', 'scuttled out of his way', 'stood by a market stall, negotiating with the <occupation>', 'hawked fine imported goods from <town>', 'bit into an apple', 'finished an apple and tossed the core aside', 'ran from person to person, asking if they had seen <name>', 'loaded a market stall with wares', 'threw punches' ]) def town_people_sentence(): return random.choice([ 'A <occupation> <activity>.', 'While the <occupation> <activity>, a <occupation> <activity>.', 'Two <occupation>s <activity>.', 'The <occupation> <activity> with a <occupation>.', 'Nearby, a <occupation> <activity>.' ]) def character_attribute(): return random.choice([ 'unusual weapons', 'foreboding cloak', 'impressive armor', 'strong forearms', 'well-made boots', 'determined look', 'dangerous demeanor' ]) def number(): return str(random.randint(2, 10)) def building(): return random.choice([ 'tavern', 'inn', 'barn', 'church', 'monastery', 'cattle barn', 'stables', 'warehouse' ]) def direction(): return random.choice([ 'left', 'right', 'left' # Bias towards left (for some reason) ]) def in_town_directions_end(): return random.choice([ 'It\'s just to the <direction>.', 'There\'s a small door.', 'Look for the large hanging sign that reads \"<!armor_name> Fine Supplies\".' ]) def in_town_directions(): return random.choice([ 'down the street to the <building> and <direction>. You\'ll see a <building>. It\'s <in_town_directions>', 'past the <building>. <in_town_directions_end>', 'into the market and towards the <building>. Eventually you need to walk <in_town_directions>', 'just a bit further down the street. <in_town_directions_end>' ]) def town_intro(): return ( '<!pc_name> followed a dirt path into the village of <!town_name>. <town_people_sentence> <town_people_sentence> ' '<!pc_name> continued down the path. <town_people_sentence>\n\n' 'Eventually, <!pc_name> arrived at the town square, where he found a <occupation>. ' + random.choice([ 'The man, eying his <character_attribute>, beckoned him forward.\n\n' '"Not many people around here like you." he said gruffly. "What makes you think you can step foot in these parts?"\n\n', '<!pc_name> approached him, hoping for some advice.\n\n' ]) + random.choice([ '"My name is <!pc_name>, and it is my quest to defeat the evil wizard <!wiz_name>." <!pc_name> announced.\n\n', '"The evil wizard <!wiz_name> has terrorized these lands for far too long. I <!pc_name> have come to destroy him!" <!pc_name> exclaimed.\n\n', '"Do you remember the glory days before the evil wizard <!wiz_name> took over?" <!pc_name> asked. ' '"I seek to destroy him and restore this kingdom\'s rightful rule!"\n\n' ]) + '<town_people_sentence> ' + random.choice([ 'The man eyed him thoughtfully', 'He still looked suspicious', 'The man sat in silence for a while', 'The man quietly reminised about the past' ]) + random.choice([ ', then finally responded.\n\n', ', but eventually responded.\n\n', 'He finally responded.\n\n' ]) + random.choice([ '"We have waited for your arrival for many years, <!pc_name>. Is there any way I can be of help?"\n\n', '"Our village of <!town_name> will gladly help you on your quest. What do you need?"\n\n' ]) + '"My weapons were badly damaged on the way here. Could you point me to your armory to get some new supplies?"\n\n' + random.choice([ '"<!armor_name> is the best in town. His shop is <in_town_directions> ', '"The armory is <in_town_directions> You\'ll find <!armor_name>, the best weapons expert we\'ve got. ', '"<!armor_name> is <in_town_directions> Tell him I sent you. ' ]) + random.choice([ 'And here, take a few gold pieces to buy the best." He reached into his pocket and pulled out <number> small coins. ' '"I want that <!wiz_name> gone as much as anybody."\n\n', 'Be careful out there. You\'re not the first to try this adventure. Men stronger than you have vanished or worse."\n\n', 'I\'d show you myself, but I have urgent matters to attend to here in the square."\n\n' ]) + '<!pc_name> hurried towards the armory. <town_people_sentence> <town_people_sentence> ' 'Turning the corner, he saw the armory in front of him. He pushed the door open and walked inside.\n\n' ) def monster_name(): return random.choice([monster['name'].strip() for monster in monsters_list]) def monster_description(name): matches = [monster for monster in monsters_list if monster['name'].strip() == name] if matches and matches[0]['description']: return matches[0]['description'] else: return ['The monster ' + name + ' is terrifying for sure, but I honestly don\'t know much about that beast.'] def armory_intro(): return ( random.choice([ '<!armor_name> looked up from his work behind a counter at <!pc_name>.\n\n', 'There was no one there. <!pc_name> cleared his throat and a man ran out from a backroom.\n\n' ]) + '"I\'m <!pc_name>, a brave adventurer seeking to destroy <!wiz_name>. What dangers lurk nearby?" he asked.\n\n' + random.choice([ '<!armor_name> grabbed a dusty book from the shelf and flipped through it. Pictures of <monster_name>s and <monster_name>s flew by. ' 'Eventually he settled on a page and started to explain.\n\n', '<!armor_name> lifted up his tunic and pointed to a scar. "You see this?" he asked. "Only one monster can do this kind of damage. The <!monster_name>."\n\n', '"Brave you say? You may have fought the <monster_name>, or perhaps even the <monster_name>, but that\'s nothing compared to the <!monster_name> we\'ve got."\n\n' ]) ) def armory_explanation(): return random.choice([ '"<!description>" <!armor_name> explained.\n\n', 'The armorer sighed and continued. "<!description>"\n\n', '<!armor_name> returned to the book of monsters on the desk and pointed at the terrifying illustration. "<!description>"\n\n' ]) def armory_more(): return random.choice([ '<!pc_name> looked surprised. "Incredible! Is there anything else I should know?"\n\n', '"But my weapons may be too weak. Are there any other ways to defeat the <!monster_name>?" <!pc_name> asked.\n\n', '<!pc_name> slipped the man <number> coins. "I get the feeling you\'ve been here for a while. Surely you know more than that."\n\n', '"I could handle that. Tell me again, what makes the <!monster_name> so bad?" <!pc_name> responded.\n\n' ]) def armory_no_more(): return random.choice([ '"That\'s all I can tell you."\n\n', '"Anything else you need to know can be found it the book. Take your time." He took the book of monsters and handed it to <!pc_name>.\n\n', '"Look I\'ve got other things to attend to. Do you need weapons or not?" His frusturation was visible.\n\n' ]) def armory_new_weapon(old_weapon): return ( 'As <!pc_name> turned to leave the armory, <!armor_name> called out\n\n' + random.choice([ '"Before you go, get rid of that useless ' + old_weapon + '. It won\'t make a dent against the carapace of the <!monster_name>." ', '"Wait, you\'ll need a weapon worthy of your great cause. That rusty ' + old_weapon + ' won\'t do." ' ]) + '\n\n' + random.choice([ '"Take this <!pc_weapon>. It has served a well over a dozen adventureres before you and it should serve you well too."\n\n', '"Forged by the finest dwarven smiths in the mountains of <town>, this <!pc_weapon> is the finest display of craftsmanship for miles around."\n\n' ]) )
35.01269
174
0.58137
1,748
13,795
4.497712
0.316362
0.009921
0.0435
0.010684
0.068685
0.022132
0.007123
0
0
0
0
0.002654
0.289888
13,795
393
175
35.101781
0.799918
0.003407
0
0.178161
0
0.048851
0.443288
0.025464
0
0
0
0
0
1
0.100575
false
0
0.020115
0.083333
0.218391
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
796c208b5ef0105c3a346b49387aabac0584232a
5,937
py
Python
soc-tools/reporting/report_splitter.py
michalk68/soc-tools
8d4c8fd53624817c1126c72d757878f305151446
[ "MIT" ]
null
null
null
soc-tools/reporting/report_splitter.py
michalk68/soc-tools
8d4c8fd53624817c1126c72d757878f305151446
[ "MIT" ]
null
null
null
soc-tools/reporting/report_splitter.py
michalk68/soc-tools
8d4c8fd53624817c1126c72d757878f305151446
[ "MIT" ]
1
2020-01-25T08:55:41.000Z
2020-01-25T08:55:41.000Z
import csv import argparse import os class ReportSplitter: def __init__(self, values, columns, file, output_folder=None, verbose=False, case_insensitive=True, contains_value=False): self.values = values self.columns = columns self.file = file self.output_folder = output_folder self._file_mapping = {} self._opened_files = [] self.verbose = verbose self.case_insensitive = case_insensitive self.contains_value = contains_value if self.output_folder is None: self.output_folder = os.getcwd() def split(self): if self.verbose: print("Values used for indexing:") print(self.values) print("Columns that will be indexed:") print(self.columns) print("File that will be splitted: " + self.file) print("Output folder: " + self.output_folder) print("Case insensitivity enabled: " + self.case_insensitive) print("Value contained in indexed column: " + self.contains_value) print("Starting...") try: self._file_exists(self.file) self._folder_exists(self.output_folder) if self.case_insensitive: values = self._values_to_lowecase(self.values) else: values = self.values with open(self.file) as csvfile: reader = csv.DictReader(csvfile) self._verify_column_names(reader.fieldnames) self._create_files(reader.fieldnames, values) # Reading row by row for row in reader: # For each row checking columns that contain indexed data for column in self.columns: if self.case_insensitive: column_value = row[column].lower() else: column_value = row[column] # If indexed value in the column, writing this line to appropriate file if self.contains_value: for v in values: if v in column_value: self._write_line_to_file(v, row) else: if column_value in values: self._write_line_to_file(column_value, row) self._close_files() except Exception as err: print(err) return if self.verbose: print("Finished...") print("Following files were created:") for file in self._opened_files: print(file.name) def _write_line_to_file(self, value, row): self._file_mapping[value].writerow(row) def _folder_exists(self, folder): if not os.path.exists(folder): raise Exception("ERROR - folder " + folder + " doesn't exist!") if not os.path.isdir(folder): raise Exception("ERROR - " + folder + " is not a folder!") if not os.access(folder, os.W_OK): raise Exception("ERROR - folder " + folder + " is not writable!") def _file_exists(self, file): if not os.path.exists(file): raise Exception("ERROR - file " + file + " doesn't exist!") if not os.path.isfile(file): raise Exception("ERROR - " + file + " is not a file!") if not os.access(file, os.R_OK): raise Exception("ERROR - file " + file + " is not readable!") def _verify_column_names(self, fieldnames): for column in self.columns: if column not in fieldnames: raise Exception( "ERROR - Column " + column + " not found to be a in the CSV file. Maybe case sensitivity issue?") def _create_files(self, fieldnames, values): try: for value in values: file_name = os.path.join(self.output_folder, value.replace(".", "_") + ".csv") csvfile = open(file_name, 'w') writer = csv.DictWriter(csvfile, fieldnames) writer.writeheader() self._file_mapping[value] = writer self._opened_files.append(csvfile) except Exception as err: raise err def _values_to_lowecase(self, list): new_list = [] for value in list: new_list.append(value.lower()) return new_list def _close_files(self): for file in self._opened_files: file.close() if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument("-v", "--value_list", help="List of values based on which should the report be splitted. " + "Accepts list of comma separated values") parser.add_argument("-c", "--column_list", help="List of columns that will be searched for indexing." + "Accepts list of comma separated values") parser.add_argument("file", help="File that should be splitted") parser.add_argument("-o", "--output_folder", help="Folder where the output should be placed") parser.add_argument("-p", "--verbose", help="Verbose mode", action='store_true') parser.add_argument("-i", "--case_insensitive", help="Allows to enable case insensitivity.", action='store_true') parser.add_argument("-x", "--contains_value", help="If enabled, value needs to be only contained in the column. No need for the exact match.", action='store_true') args = parser.parse_args() report_splitter = ReportSplitter(args.value_list.split(","), args.column_list.split(","), args.file, args.output_folder, args.verbose) report_splitter.split()
41.229167
120
0.561732
666
5,937
4.828829
0.225225
0.041045
0.041356
0.013682
0.174129
0.094527
0.044776
0.031095
0.031095
0
0
0
0.343945
5,937
143
121
41.517483
0.825674
0.024255
0
0.144068
0
0
0.172223
0
0
0
0
0
0
1
0.076271
false
0
0.025424
0
0.127119
0.110169
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
796dec29764e9116f7092158c4657486b2e11567
1,899
py
Python
go/guru.py
x0rzkov/sublime-go
b77d78594caed017f040fe6c4168e525a563e28b
[ "MIT" ]
51
2019-08-18T18:18:42.000Z
2022-02-09T07:44:42.000Z
go/guru.py
x0rzkov/sublime-go
b77d78594caed017f040fe6c4168e525a563e28b
[ "MIT" ]
28
2019-08-19T04:10:52.000Z
2020-12-09T16:39:26.000Z
go/guru.py
localhots/sublime-go
960e72dafdb6c69d78bb5cbd88052540342517b9
[ "MIT" ]
4
2019-11-12T20:39:54.000Z
2021-07-30T09:57:32.000Z
from . import decorators from . import exec from . import log import os.path as path import sublime import time import json @decorators.thread @decorators.trace def source(view): locate(view) def call(mode, filename, region): """ Call calls guru(1) with the given `<mode>` filename and point. """ file = "{}:#{},#{}".format(filename, region.begin(), region.end()) args = ["--json", mode, file] cmd = exec.Command("guru", args=args) res = cmd.run() if res.code == 0: return json.loads(res.stdout) def locate(view): """ Locate returns the location of the symbol at the cursor, empty string is returned if no symbol is found. """ file = view.file_name() pos = view.sel()[0] resp = call("describe", file, pos) if resp == None: return if resp["detail"] == "value": if 'objpos' in resp['value']: open_position(view, resp['value']['objpos']) return if resp["detail"] == "type": if "namepos" in resp["type"]: open_position(view, resp['type']['namepos']) return if 'built-in type' in resp['desc']: symbol = resp['type']['type'] cwd = path.dirname(file) goroot = exec.goenv(cwd)['GOROOT'] src = path.join(goroot, 'src', 'builtin', 'builtin.go') win = view.window() open_symbol(view, src, symbol) return log.error("guru(1) - unknown response {}", resp) return "" def open_position(view, src): win = view.window() win.open_file(src, sublime.ENCODED_POSITION) def open_symbol(view, src, symbol): win = view.window() new_view = win.open_file(src) show(new_view, symbol) sublime.set_timeout(lambda: show(new_view, symbol), 20) def show(view, symbol): if view.is_loading(): sublime.set_timeout(lambda: show(view, symbol), 30) return for sym in view.symbols(): if symbol in sym[1]: sel = sublime.Selection(0) sel.add(sym[0]) view.show(sel)
22.879518
68
0.636651
271
1,899
4.405904
0.365314
0.033501
0.040201
0.030151
0.083752
0
0
0
0
0
0
0.007266
0.202738
1,899
82
69
23.158537
0.781374
0.087941
0
0.135593
0
0
0.101765
0
0
0
0
0
0
1
0.101695
false
0
0.118644
0
0.338983
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
796e2d57bc64a8e281c2292d35bc9ea91f00edac
1,344
py
Python
tests/test_key.py
ksurdacki/cipher21
8dfdab32299cb8377435e5f57ec6a9aaf33891eb
[ "MIT" ]
2
2021-05-02T21:15:38.000Z
2021-05-03T07:45:09.000Z
tests/test_key.py
ksurdacki/cipher21
8dfdab32299cb8377435e5f57ec6a9aaf33891eb
[ "MIT" ]
null
null
null
tests/test_key.py
ksurdacki/cipher21
8dfdab32299cb8377435e5f57ec6a9aaf33891eb
[ "MIT" ]
null
null
null
from unittest import TestCase from random import Random from cipher21.key import Cipher21Key from cipher21.constants import KEY_LENGTH class AssessKeyTest(TestCase): def test_positive_cases(self): prng = Random() # For test repetitiveness purpose only. Use SystemRandom ordinarily. prng.seed(0xBDC34FD75D0B49F5817B4038C45EC575, version=2) for t in range(10**4): with self.subTest(t=t): Cipher21Key.from_bytes(bytes(prng.getrandbits(8) for _ in range(KEY_LENGTH))) def test_negative_cases(self): key = KEY_LENGTH*b'\x00' with self.assertRaises(ValueError): Cipher21Key.from_bytes(key) key = bytes(range(KEY_LENGTH)) with self.assertRaises(ValueError): Cipher21Key.from_bytes(key) key = bytes(range(0, 5*KEY_LENGTH, 5)) with self.assertRaises(ValueError): Cipher21Key.from_bytes(key) key = bytes(range(KEY_LENGTH, 0, -1)) with self.assertRaises(ValueError): Cipher21Key.from_bytes(key) key = bytes(range(7*KEY_LENGTH, 0, -7)) with self.assertRaises(ValueError): Cipher21Key.from_bytes(key) key = 2*bytes.fromhex('e521377823342e05bd6fe051a12a8820') with self.assertRaises(ValueError): Cipher21Key.from_bytes(key)
37.333333
93
0.665923
156
1,344
5.615385
0.320513
0.136986
0.159817
0.205479
0.446347
0.446347
0.446347
0.446347
0.385845
0.321918
0
0.079334
0.240327
1,344
35
94
38.4
0.778648
0.049107
0
0.4
0
0
0.028213
0.025078
0
0
0.026646
0
0.2
1
0.066667
false
0
0.133333
0
0.233333
0
0
0
0
null
0
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
796f8ea384a7f05b46370bc3b9473a2242391c4a
357
py
Python
Problems/String/1209. Remove All Adjacent Duplicates in String II.py
BYJRK/LeetCode-Solutions
008467e1717309066a519acb8623d2f84071b64a
[ "MIT" ]
null
null
null
Problems/String/1209. Remove All Adjacent Duplicates in String II.py
BYJRK/LeetCode-Solutions
008467e1717309066a519acb8623d2f84071b64a
[ "MIT" ]
null
null
null
Problems/String/1209. Remove All Adjacent Duplicates in String II.py
BYJRK/LeetCode-Solutions
008467e1717309066a519acb8623d2f84071b64a
[ "MIT" ]
null
null
null
# https://leetcode.com/problems/remove-all-adjacent-duplicates-in-string-ii/ class Solution: def removeDuplicates(self, s: str, k: int) -> str: res = '' for c in s: res += c if res[-k:] == c * k: res = res[:-k] return res s = Solution() print(s.removeDuplicates('deeedbbcccbdaa', 3))
21
76
0.537815
45
357
4.266667
0.622222
0.041667
0
0
0
0
0
0
0
0
0
0.004065
0.310924
357
16
77
22.3125
0.776423
0.207283
0
0
0
0
0.049822
0
0
0
0
0
0
1
0.1
false
0
0
0
0.3
0.1
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
7970c64577da21a05fff8a9ac2d82e4f99958f7a
4,520
py
Python
pathUtils.py
Noboxxx/ctrlShaper
0c1e30610be711f51230a8ebd1288d628409e0f9
[ "MIT" ]
null
null
null
pathUtils.py
Noboxxx/ctrlShaper
0c1e30610be711f51230a8ebd1288d628409e0f9
[ "MIT" ]
null
null
null
pathUtils.py
Noboxxx/ctrlShaper
0c1e30610be711f51230a8ebd1288d628409e0f9
[ "MIT" ]
null
null
null
import os import json from maya import cmds import re def conform_path(path): return join_path(*split_path(path.replace('\\', '/'))) def join_path(*args): path = list() for arg in args: parts = split_path(arg) for part in parts: part = str(part) if part: path.append(part) return '/'.join(path) def split_path(path): conformed_path = path.replace('\\', '/') list_ = list() for item in conformed_path.split('/'): if item: list_.append(item) return list_ def decompose_file_path(path): path_split = split_path(path) file_name = path_split.pop() location = join_path(*path_split) return location, file_name class JsonFile(object): default_location = cmds.internalVar(userPrefDir=True) extension = 'json' def __init__(self, name): if not self.is_one(name): cmds.error('\'{}\' is not a valid argument for \'{}\' class.'.format(name, self.__class__.__name__)) self.name = str(name) def __repr__(self): return self.name def __str__(self): return self.name def __eq__(self, other): return self.name == str(other) def __ne__(self, other): return not self.__eq__(other) def __iter__(self): return iter(self.name) def endswith(self, item): return self.name.endswith(item) def startswith(self, item): return self.name.startswith(item) @classmethod def compress_data(cls, data): return data @classmethod def uncompress_data(cls, data): return data @classmethod def format_file_name(cls, file_name): file_name = str(file_name) if not file_name.lower().endswith('.{0}'.format(cls.extension)): return '{0}.{1}'.format(file_name, cls.extension) return file_name @classmethod def create(cls, *args, **kwargs): pass @classmethod def create_file(cls, data, location=None, file_name=None, force=False): location = cls.default_location if location is None else str(location) file_name = cls.get_default_file_name() if file_name is None else str(file_name) force = bool(force) location = conform_path(location) file_name = cls.format_file_name(file_name) path = join_path(location, file_name) if not os.path.isdir(location): raise cmds.error('The given location is invalid -> \'{}\''.format(location)) if not force and os.path.isfile(path): raise cmds.error('The given path already exists -> \'{}\''.format(path)) with open(path, 'w') as f: json.dump(None, f) json_file = cls(path) json_file.write(data) print('The file \'{0}\' has been created.'.format(json_file.get_path())) return json_file @classmethod def get_default_file_name(cls): file_name = re.sub(r'(?<!^)(?=[A-Z])', '_', cls.__name__).lower() return '{0}.{1}'.format(file_name, cls.extension) @classmethod def get(cls, location=None, file_name=None): location = cls.default_location if location is None else str(location) file_name = cls.get_default_file_name() if file_name is None else cls.format_file_name(file_name) full_path = join_path(location, file_name) if cls.is_one(full_path): return cls(full_path) print('The file \'{0}\' does not exist.'.format(full_path)) return None def load(self, *args, **kwargs): print('The file \'{0}\' has been loaded.'.format(self.get_path())) @classmethod def is_one(cls, path): path = str(path) if os.path.isfile(path): if path.lower().endswith(cls.extension): return True return False def write(self, data): data = self.compress_data(data) with open(self.get_path(), 'w') as f: json.dump(data, f, indent=2, sort_keys=True) def get_path(self): return self.name def read(self): with open(self.get_path(), 'r') as f: data = json.load(f) return self.uncompress_data(data) def get_file_name(self, extension=True): name = self.get_path().split('/')[-1] if extension: return name return name.split('.')[0] def delete(self): os.remove(self.get_path()) print('The file \'{0}\' has been deleted.'.format(self.get_path()))
28.074534
112
0.606195
602
4,520
4.337209
0.181063
0.088855
0.029491
0.019916
0.323248
0.211413
0.157028
0.107239
0.081195
0.081195
0
0.003616
0.265708
4,520
160
113
28.25
0.783067
0
0
0.140496
0
0
0.05885
0
0
0
0
0
0
1
0.214876
false
0.008264
0.033058
0.090909
0.479339
0.033058
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
0
0
0
0
1
797130522e525a58e85e7b3f848947aed4b21310
2,150
py
Python
detro/packages/circledet/network.py
Peiiii/detro
26d74468d7554dc20b2a2daf7ec5009302c820f2
[ "MIT" ]
null
null
null
detro/packages/circledet/network.py
Peiiii/detro
26d74468d7554dc20b2a2daf7ec5009302c820f2
[ "MIT" ]
null
null
null
detro/packages/circledet/network.py
Peiiii/detro
26d74468d7554dc20b2a2daf7ec5009302c820f2
[ "MIT" ]
null
null
null
from .resnet_backbone import resnet18 from torch import nn import torch import torch.nn.functional as F from detro.networks.components import BiFPN, Center_layer, Offset_layer, Reg_layer, Heatmap_layer from detro.networks.losslib import center_loss, distance_loss class FeatureFusionNetwork(nn.Module): def __init__(self): super().__init__() def forward(self, inputs): resized = [] size = inputs[0].size()[-2:] for x in inputs[1:]: resized.append(F.upsample(x, size)) x = torch.cat(resized, dim=1) return x class CircleNet(nn.Module): def __init__(self, num_classes=1): super().__init__() self.backbone = resnet18(pretrained=True) self.neck = FeatureFusionNetwork() self.conv1 = nn.Conv2d(896, 256, kernel_size=1, stride=1, padding=0) self.bn1 = nn.BatchNorm2d(256) self.relu = nn.ReLU(inplace=True) # self.center_layer = Heatmap_layer(in_channels=256, out_channels=num_classes) # self.reg_layer = Heatmap_layer(in_channels=256, out_channels=1) self.hm_layer = Heatmap_layer(in_channels=256, out_channels=num_classes + 1) def forward(self, inputs): c1, c2, c3, c4, c5 = self.backbone(inputs) features = [c2, c3, c4, c5] features = self.neck(features) x = features x = self.conv1(x) x = self.bn1(x) x = self.relu(x) # center_heatmap = self.center_layer(x) # offsets = self.reg_layer(x) x=self.hm_layer(x) center_heatmap=x[:,:-1] offsets=x[:,-1:] return dict( center_heatmap=center_heatmap, offsets=offsets ) def CircleDetCriterion(preds, labels): loss_center = center_loss(preds['center_heatmap'], labels['center_heatmap']) # loss_corner=center_loss(preds['corner_heatmap'],labels['corner_heatmap']) loss_offsets = distance_loss(preds['offsets'], labels['offsets'], labels['offsets_mask']) return dict( loss=loss_center + loss_offsets, loss_center=loss_center, # loss_corner=loss_corner, loss_offsets=loss_offsets, )
33.59375
97
0.649767
278
2,150
4.791367
0.276978
0.045045
0.051051
0.042793
0.135886
0.107357
0.107357
0.107357
0.076577
0.076577
0
0.029679
0.232093
2,150
63
98
34.126984
0.777105
0.14186
0
0.12766
0
0
0.02938
0
0
0
0
0
0
1
0.106383
false
0
0.12766
0
0.340426
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
79714648fe909d1ef23cf1429aeb6aaa8d22155b
2,938
py
Python
home/forms.py
kana-shimmichi/Weeet
4e332107748cbf63b6c109d3e5ce968a42ed10c3
[ "BSD-3-Clause" ]
null
null
null
home/forms.py
kana-shimmichi/Weeet
4e332107748cbf63b6c109d3e5ce968a42ed10c3
[ "BSD-3-Clause" ]
9
2021-03-19T00:17:56.000Z
2022-03-12T00:17:14.000Z
home/forms.py
kana-shimmichi/Weeet
4e332107748cbf63b6c109d3e5ce968a42ed10c3
[ "BSD-3-Clause" ]
null
null
null
from django import forms from .models import MakerProfile,BuyerProfile,MstLang,MstSkill,Contact,Order,OrderMessage from register.models import User class UserForm(forms.ModelForm): class Meta: model = User fields = ('last_name', 'first_name') class MakerProfileForm(forms.ModelForm): class Meta: model = MakerProfile fields = ('picture','lang','cost','skill') def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.fields['lang'].widget = forms.CheckboxSelectMultiple() self.fields['lang'].queryset = MstLang.objects self.fields['skill'].widget = forms.CheckboxSelectMultiple() self.fields['skill'].queryset = MstSkill.objects class BuyerProfileForm(forms.ModelForm): class Meta: model = BuyerProfile fields = ('picture',) class ContactForm(forms.ModelForm): class Meta: model = Contact fields = ('user','email','message','file',) def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.fields['user'].widget.attrs.update({ 'class': 'form-control required', 'placeholder':'Your Name', 'data-placement':'top', 'data-trigger':'manual', 'data-content':'Must be at least 3 characters long, and must only contain letters.'}) self.fields['email'].widget.attrs.update({ 'class':'form-control email', 'placeholder':'email@xxx.com', 'data-placement':'top', 'data-trigger':'manual', 'data-content':'Must be a valid e-mail address (user@gmail.com)', }) self.fields['message'].widget.attrs.update({ 'class':'form-control', 'placeholder':"Your message here..", 'data-placement':'top', 'data-trigger':'manual', }) class OrderForm(forms.ModelForm): class Meta: model = Order fields = ('title','body','order_type','order_finish_time','cost',) def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.fields['title'].widget.attrs.update({ 'class':'form-control', 'placeholder':"タイトルを入れてください", 'data-placement':'top', 'data-trigger':'manual', "data-content" :"依頼の内容入力", }) self.fields['order_type'].widget.attrs.update({ 'class': 'form-control', }) self.fields['body'].widget.attrs.update({ 'class':'form-control', }) self.fields['cost'].widget.attrs.update({ 'class':'form-control', }) self.fields['order_finish_time'].widget.attrs.update({ 'class':'form-control', }) class SearchForm(forms.Form): title = forms.CharField( initial='', label='タイトル', required = False, # 必須ではない )
29.676768
97
0.573179
292
2,938
5.657534
0.311644
0.072639
0.082324
0.106538
0.526029
0.389225
0.309322
0.256053
0.151332
0.151332
0
0.000465
0.267529
2,938
98
98
29.979592
0.767193
0.002042
0
0.415584
0
0
0.248464
0
0
0
0
0
0
1
0.038961
false
0
0.038961
0
0.233766
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
7975367638974979532191242ae89eeddc64e809
4,418
py
Python
jacket/compute/opts.py
bopopescu/jacket
d7ad3147fcb43131098c2a5210847634ff5fb325
[ "Apache-2.0" ]
null
null
null
jacket/compute/opts.py
bopopescu/jacket
d7ad3147fcb43131098c2a5210847634ff5fb325
[ "Apache-2.0" ]
null
null
null
jacket/compute/opts.py
bopopescu/jacket
d7ad3147fcb43131098c2a5210847634ff5fb325
[ "Apache-2.0" ]
2
2016-08-10T02:21:49.000Z
2020-07-24T01:57:21.000Z
# Licensed under the Apache License, Version 2.0 (the "License"); you may not # use this file except in compliance with the License. You may obtain a copy # of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import itertools import jacket.cmd.compute.novnc import jacket.cmd.compute.serialproxy import jacket.cmd.compute.spicehtml5proxy import jacket.compute.baserpc import jacket.compute.cloudpipe.pipelib import jacket.compute.conductor.rpcapi import jacket.compute.conductor.tasks.live_migrate import jacket.compute.conf import jacket.compute.console.manager import jacket.compute.console.rpcapi import jacket.compute.console.serial import jacket.compute.console.xvp import jacket.compute.consoleauth import jacket.compute.consoleauth.manager import jacket.compute.consoleauth.rpcapi import jacket.compute.crypto import jacket.compute.exception import jacket.compute.image.download.file import jacket.compute.image.glance import jacket.compute.ipv6.api import jacket.compute.keymgr import jacket.compute.keymgr.barbican import jacket.compute.keymgr.conf_key_mgr import jacket.compute.netconf import jacket.compute.notifications import jacket.compute.paths import jacket.compute.quota import jacket.compute.rdp import jacket.compute.servicegroup.api import jacket.compute.spice import jacket.compute.utils import jacket.compute.volume import jacket.compute.volume.cinder import jacket.db.base import jacket.db.compute.api import jacket.db.compute.sqlalchemy.api import jacket.objects.compute.network def list_opts(): return [ ('DEFAULT', itertools.chain( [jacket.compute.conductor.tasks.live_migrate.migrate_opt], [jacket.compute.consoleauth.consoleauth_topic_opt], [jacket.db.base.db_driver_opt], [jacket.compute.ipv6.api.ipv6_backend_opt], [jacket.compute.servicegroup.api.servicegroup_driver_opt], jacket.compute.cloudpipe.pipelib.cloudpipe_opts, jacket.cmd.compute.novnc.opts, jacket.compute.console.manager.console_manager_opts, jacket.compute.console.rpcapi.rpcapi_opts, jacket.compute.console.xvp.xvp_opts, jacket.compute.consoleauth.manager.consoleauth_opts, jacket.compute.crypto.crypto_opts, jacket.db.compute.api.db_opts, jacket.db.compute.sqlalchemy.api.db_opts, jacket.compute.exception.exc_log_opts, jacket.compute.netconf.netconf_opts, jacket.compute.notifications.notify_opts, jacket.objects.compute.network.network_opts, jacket.compute.paths.path_opts, jacket.compute.quota.quota_opts, # jacket.compute.service.service_opts, jacket.compute.utils.monkey_patch_opts, jacket.compute.utils.utils_opts, jacket.compute.volume._volume_opts, )), ('barbican', jacket.compute.keymgr.barbican.barbican_opts), ('cinder', jacket.compute.volume.cinder.cinder_opts), ('api_database', jacket.db.compute.sqlalchemy.api.api_db_opts), ('database', jacket.db.compute.sqlalchemy.api.oslo_db_options.database_opts), ('glance', jacket.compute.image.glance.glance_opts), ('image_file_url', [jacket.compute.image.download.file.opt_group]), ('compute_keymgr', itertools.chain( jacket.compute.keymgr.conf_key_mgr.key_mgr_opts, jacket.compute.keymgr.keymgr_opts, )), ('rdp', jacket.compute.rdp.rdp_opts), ('spice', itertools.chain( jacket.cmd.compute.spicehtml5proxy.opts, jacket.compute.spice.spice_opts, )), ('upgrade_levels', itertools.chain( [jacket.compute.baserpc.rpcapi_cap_opt], [jacket.compute.conductor.rpcapi.rpcapi_cap_opt], [jacket.compute.console.rpcapi.rpcapi_cap_opt], [jacket.compute.consoleauth.rpcapi.rpcapi_cap_opt], )), ('workarounds', jacket.compute.utils.workarounds_opts), ]
40.53211
85
0.716614
538
4,418
5.762082
0.247212
0.26
0.183871
0.033548
0.144516
0.086452
0
0
0
0
0
0.002506
0.187189
4,418
108
86
40.907407
0.860763
0.125396
0
0.086957
0
0
0.028045
0
0
0
0
0
0
1
0.01087
true
0
0.413043
0.01087
0.434783
0
0
0
0
null
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
4
7975415464bdf9086363882be5e74bf46c4eaee1
5,362
py
Python
src/simple_regression.py
haojunqiu/csc110-project
f379d66709c89e33a312fb054bc91619e0fe6a92
[ "MIT" ]
null
null
null
src/simple_regression.py
haojunqiu/csc110-project
f379d66709c89e33a312fb054bc91619e0fe6a92
[ "MIT" ]
null
null
null
src/simple_regression.py
haojunqiu/csc110-project
f379d66709c89e33a312fb054bc91619e0fe6a92
[ "MIT" ]
1
2022-01-11T04:26:48.000Z
2022-01-11T04:26:48.000Z
"""CSC110 final project, main module Descriptions =============================== This module contains all the functions we used to implement the simple linear regression model. Copyright and Usage Information =============================== All forms of distribution of this code, whether as given or with any changes, are expressly prohibited. All rights reserved. This file is Copyright (c) 2020 Runshi Yang, Chenxu Wang and Haojun Qiu """ from typing import List, Tuple import plotly.graph_objects as go def evaluate_line(a: float, b: float, x: float) -> float: """Evaluate the linear function y = a + bx for the given a, b. >>> result = evaluate_line(5.0, 1.0, 10.0) # y = 5.0 + 1.0 * 10.0, >>> result == 15 True """ return a + b * x def convert_points(points: List[tuple]) -> tuple: """Return a tuple of two lists, containing the x- and y-coordinates of the given points. >>> result = convert_points([(0.0, 1.1), (2.2, 3.3), (4.4, 5.5)]) >>> result[0] # The x-coordinates [0.0, 2.2, 4.4] >>> result[1] # The y-coordinates [1.1, 3.3, 5.5] """ x_coordinates = [x[0] for x in points] y_coordinates = [x[1] for x in points] return (x_coordinates, y_coordinates) def simple_linear_regression(points: List[tuple]) -> tuple: """Perform a linear regression on the given points. This function returns a pair of floats (a, b) such that the line y = a + bx is the approximation of this data. Further reading: https://en.wikipedia.org/wiki/Simple_linear_regression Preconditions: - len(points) > 0 >>> simple_linear_regression([(1.0, 1.0), (2.0, 2.0), (3.0, 3.0)]) (0.0, 1.0) """ avg_x = sum(convert_points(points)[0]) / len(points) avg_y = sum(convert_points(points)[1]) / len(points) numerator = [(p[0] - avg_x) * (p[1] - avg_y) for p in points] denominator = [(p[0] - avg_x) ** 2 for p in points] b = sum(numerator) / sum(denominator) a = avg_y - b * avg_x return (a, b) def calculate_r_squared(points: List[tuple], a: float, b: float) -> float: """Return the R squared value when the given points are modelled as the line y = a + bx. points is a list of pairs of numbers: [(x_1, y_1), (x_2, y_2), ...] Preconditions: - len(points) > 0 """ avg_y = sum(convert_points(points)[1]) / len(points) tot = [(avg_y - p[1]) ** 2 for p in points] res = [(p[1] - (a + b * p[0])) ** 2 for p in points] return 1 - sum(res) / sum(tot) def perform_regression(train_data: List[tuple], xlabel: str, title: str) -> Tuple[float, float, float]: """Return (a, b, r_squared) Plot all data points and regression line """ # Get data points. points = train_data # Converts the points into the format expected by plotly. separated_coordinates = convert_points(points) x_coords = separated_coordinates[0] y_coords = separated_coordinates[1] # Do a simple linear regression. Returns the (a, b) constants for # the line y = a + b * x. model = simple_linear_regression(points) a = model[0] b = model[1] # Plot all the data points AND a line based on the regression plot_points_and_regression(x_coords, y_coords, [a, b], xlabel, title) # Calculate the r_squared value r_squared = calculate_r_squared(points, a, b) return (a, b, r_squared) def plot_points_and_regression(x_coords: list, y_coords: list, coef: List[float], xlabel: str, title: str) -> None: """Plot the given x- and y-coordinates and linear regression model using plotly. """ # Create a blank figure layout = go.Layout(title=title, xaxis={'title': xlabel}, yaxis={'title': 'number of cases'}) fig = go.Figure(layout=layout) # Add the raw data fig.add_trace(go.Scatter(x=x_coords, y=y_coords, mode='markers', name='Data')) # Add the regression line x_max = 1.1 * max(x_coords) fig.add_trace(go.Scatter(x=[0, x_max], y=[evaluate_line(coef[0], coef[1], 0), evaluate_line(coef[0], coef[1], x_max)], mode='lines', name='Regression line')) # Display the figure in a web browser fig.show() def predict(test_data: List[Tuple], model: Tuple[float, float, float], xlabel: str, title: str) -> float: """Return r_squared for the prediction. Plot all data points and regression line """ # Get data points. points = test_data a = model[0] b = model[1] # Converts the points into the format expected by plotly. separated_coordinates = convert_points(points) x_coords = separated_coordinates[0] y_hat = separated_coordinates[1] # Plot all the data points AND a line based on the regression plot_points_and_regression(x_coords, y_hat, [a, b], xlabel, title) # Calculate the r_squared value r_squared = calculate_r_squared(points, a, b) return r_squared if __name__ == '__main__': import doctest doctest.testmod(verbose=True) import python_ta python_ta.check_all(config={ 'extra-imports': ['plotly.graph_objects', 'python_ta'], 'allowed-io': [], 'max-line-length': 100, 'disable': ['R1705', 'C0200'] })
31.356725
92
0.619172
795
5,362
4.047799
0.228931
0.00808
0.041019
0.014916
0.313238
0.27253
0.223741
0.223741
0.223741
0.201367
0
0.02797
0.24655
5,362
170
93
31.541176
0.768564
0.397426
0
0.181818
0
0
0.048271
0
0
0
0
0
0
1
0.106061
false
0
0.075758
0
0.272727
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
7978c918a22636965af6e68798263bf0533bd12f
292
py
Python
exceptions.py
gongchengshi/aws
d04d42739e026d2e99936dd046be05293e063e08
[ "MIT" ]
null
null
null
exceptions.py
gongchengshi/aws
d04d42739e026d2e99936dd046be05293e063e08
[ "MIT" ]
null
null
null
exceptions.py
gongchengshi/aws
d04d42739e026d2e99936dd046be05293e063e08
[ "MIT" ]
null
null
null
class AwsErrorCodes: SqsNonExistentQueue = 'AWS.SimpleQueueService.NonExistentQueue' class NonExistantSqsQueueException(Exception): def __init__(self, queue_name): self.queue_name = queue_name Exception.__init__(self, "SQS Queue '%s' no longer exists" % queue_name)
32.444444
80
0.75
30
292
6.9
0.6
0.173913
0.125604
0
0
0
0
0
0
0
0
0
0.164384
292
8
81
36.5
0.848361
0
0
0
0
0
0.239726
0.133562
0
0
0
0
0
1
0.166667
false
0
0
0
0.666667
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
1
0
0
2
797b83c4395d6b6acbe9c60dbd945372be2f9477
718
py
Python
FaceRecogEngine/recognition/urls.py
thecodacus/FaceAuth
dca6d6438426df48cd7e9c9693fa450d817f7d61
[ "Apache-2.0" ]
2
2018-09-22T18:28:33.000Z
2021-08-28T17:44:30.000Z
FaceRecogEngine/recognition/urls.py
thecodacus/FaceAuth
dca6d6438426df48cd7e9c9693fa450d817f7d61
[ "Apache-2.0" ]
null
null
null
FaceRecogEngine/recognition/urls.py
thecodacus/FaceAuth
dca6d6438426df48cd7e9c9693fa450d817f7d61
[ "Apache-2.0" ]
1
2019-06-05T15:34:59.000Z
2019-06-05T15:34:59.000Z
from django.contrib import admin from django.urls import path, include from . import views from django.conf import settings app_name='recognition' urlpatterns = [ path('', views.Home.as_view(), name='home'), path('settings/', views.Home.as_view(), name='settings'), path('login/', views.UserLoginView.as_view(), name='login'), path('logout/', views.LogoutView.as_view(), name='logout'), path('register/', views.UserRegistrationView.as_view(), name='register'), path('settings/profile/', views.ProfileSettingsView.as_view(), name='edit-profile'), path('settings/reg-face/', views.UserFaceRegView.as_view(), name='reg-face'), path('apis/auth/', views.UserFaceLogInView.as_view(), name='api-auth') ]
34.190476
86
0.71727
93
718
5.44086
0.365591
0.094862
0.158103
0.059289
0.075099
0
0
0
0
0
0
0
0.093315
718
20
87
35.9
0.777266
0
0
0
0
0
0.203626
0
0
0
0
0
0
1
0
false
0
0.266667
0
0.266667
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
797d78dc8a7e7f2b8677fa417daf060e2f5479f3
2,026
py
Python
.pre-commit/check_version.py
JPchico/aiida-lammps
8f618541784bbd6360efc653350570cf76398e83
[ "MIT" ]
7
2021-02-26T06:12:28.000Z
2022-03-27T17:06:41.000Z
.pre-commit/check_version.py
JPchico/aiida-lammps
8f618541784bbd6360efc653350570cf76398e83
[ "MIT" ]
21
2020-09-18T14:03:16.000Z
2022-02-14T10:48:40.000Z
.pre-commit/check_version.py
JPchico/aiida-lammps
8f618541784bbd6360efc653350570cf76398e83
[ "MIT" ]
5
2018-03-02T23:49:41.000Z
2020-04-17T05:35:19.000Z
"""Validate consistency of versions and dependencies. Validates consistency of setup.json and * environment.yml * version in aiida_lammps/__init__.py """ import json import os import sys import click FILENAME_SETUP_JSON = "setup.json" SCRIPT_PATH = os.path.split(os.path.realpath(__file__))[0] ROOT_DIR = os.path.join(SCRIPT_PATH, os.pardir) FILEPATH_SETUP_JSON = os.path.join(ROOT_DIR, FILENAME_SETUP_JSON) def get_setup_json(): """Return the `setup.json` as a python dictionary.""" with open(FILEPATH_SETUP_JSON, "r") as handle: setup_json = json.load(handle) # , object_pairs_hook=OrderedDict) return setup_json @click.group() def cli(): """Command line interface for pre-commit checks.""" pass @cli.command("version") def validate_version(): """Check that version numbers match. Check version number in setup.json and aiida_lammos/__init__.py and make sure they match. """ # Get version from python package sys.path.insert(0, ROOT_DIR) import aiida_lammps # pylint: disable=wrong-import-position version = aiida_lammps.__version__ setup_content = get_setup_json() if version != setup_content["version"]: click.echo("Version number mismatch detected:") click.echo( "Version number in '{}': {}".format( FILENAME_SETUP_JSON, setup_content["version"] ) ) click.echo( "Version number in '{}/__init__.py': {}".format("aiida_lammps", version) ) click.echo( "Updating version in '{}' to: {}".format(FILENAME_SETUP_JSON, version) ) setup_content["version"] = version with open(FILEPATH_SETUP_JSON, "w") as fil: # Write with indentation of two spaces and explicitly define separators to not have spaces at end of lines json.dump(setup_content, fil, indent=2, separators=(",", ": ")) sys.exit(1) if __name__ == "__main__": cli() # pylint: disable=no-value-for-parameter
28.138889
118
0.661895
258
2,026
4.94186
0.418605
0.105882
0.053333
0.051765
0.123922
0.064314
0.064314
0
0
0
0
0.002541
0.2231
2,026
71
119
28.535211
0.807497
0.304541
0
0.075
0
0
0.139416
0
0
0
0
0
0
1
0.075
false
0.025
0.125
0
0.225
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
797dc34e814424ff0892e6ac9838f4607837049a
7,062
py
Python
main.py
rorro/legacy-gauntlet
82898408acee5ddd0c629c15521c7f5f7a8982fe
[ "MIT" ]
null
null
null
main.py
rorro/legacy-gauntlet
82898408acee5ddd0c629c15521c7f5f7a8982fe
[ "MIT" ]
null
null
null
main.py
rorro/legacy-gauntlet
82898408acee5ddd0c629c15521c7f5f7a8982fe
[ "MIT" ]
null
null
null
import json import os import time from configparser import ConfigParser import discord from discord.ext import tasks, commands from dotenv import load_dotenv from datetime import datetime load_dotenv() TOKEN = os.getenv('TOKEN') CONFIG_FILE = 'config.ini' # Config config_parser = ConfigParser() config_parser.read(CONFIG_FILE) # In minutes CHALLENGE_TIME = int(config_parser.get('CHALLENGE', 'frequency')) BOUNTY_TIME = int(config_parser.get('BOUNTY', 'frequency')) challenge_start = 0 bounty_start = 0 started = False def read_file(file): with open(file) as f: lst = [] for entry in json.load(f): lst.append(entry) return lst bounties = read_file(config_parser.get('BOUNTY', 'file')) challenges = read_file(config_parser.get('CHALLENGE', 'file')) # Create bot client = commands.Bot(command_prefix='!') # Startup information @client.event async def on_ready(): print(f'Connected to bot: {client.user.name}') print(f'Bot ID: {client.user.id}') @client.event async def on_command_error(ctx, error): if isinstance(error, commands.CommandNotFound): return elif isinstance(error, commands.MissingPermissions): return elif isinstance(error, commands.MissingRequiredArgument): return elif isinstance(error, commands.CommandInvokeError): return elif isinstance(error, commands.ChannelNotFound): return raise error @commands.has_permissions(administrator=True) @client.command(help='- Start the announcements') async def start(ctx): global started if config_parser.get('CHALLENGE', 'enabled') == "True": challenge_loop.start() if config_parser.get('BOUNTY', 'enabled') == "True": bounty_loop.start() started = True await ctx.send('Announcements have been started') time.sleep(3) countdown.start() @commands.has_permissions(administrator=True) @client.command(help='- Stop the announcements') async def stop(ctx): global started challenge_loop.cancel() bounty_loop.cancel() countdown.cancel() started = False await ctx.send('Announcements have been stopped') @commands.has_permissions(administrator=True) @client.command(help='- DO NOT USE THIS WHILE EVENT IS ONGOING!') async def reset(ctx): config_parser.set('BOUNTY', 'index', '0') with open(CONFIG_FILE, 'w') as config_file: config_parser.write(config_file) config_parser.set('CHALLENGE', 'index', '0') with open(CONFIG_FILE, 'w') as config_file: config_parser.write(config_file) await ctx.send('Indexes have been reset to 0') @commands.has_permissions(administrator=True) @client.command(help='- Give a message id to set message as ended. Run this in the same channel as the ended message.') async def end(ctx, arg): ended_message = await ctx.fetch_message(int(arg)) if ended_message.author == client.user: new_embed = ended_message.embeds[0] new_embed.set_footer(text='Time remaining: 0h 0min') await ended_message.edit(embed=new_embed) await ctx.message.delete() @commands.has_permissions(administrator=True) @client.command(help='- Set channels for bounties and challenges. Configure this before you start the event!') async def set_channel(ctx, t, channel: discord.TextChannel): if started: await ctx.send("You can only configure this while the event is stopped.") return if t not in ["bounty", "challenge"]: await ctx.send("Invalid type. Only valid types are 'bounty' and 'challenge'.") return config_parser.set(t.upper(), 'channel', str(channel.id)) with open(CONFIG_FILE, 'w') as config_file: config_parser.write(config_file) await ctx.send(f'Successfully set the {t} channel to {channel.mention}') # Announcements for the bounty channel @tasks.loop(minutes=BOUNTY_TIME) async def bounty_loop(): global bounty_start bounty_start = datetime.now() bounty_channel = client.get_channel(int(config_parser.get('BOUNTY', 'channel'))) bounty_index = int(config_parser.get('BOUNTY', 'index')) if bounty_index >= len(bounties): bounty_loop.stop() return embed_message = discord.Embed(title=f'{BOUNTY_TIME//60} Hour Bounty', color=discord.Color.green()) embed_message.add_field(name="The current bounty is...", value=bounties[bounty_index]['bounty'], inline=False) embed_message.add_field(name="Keyword", value=bounties[bounty_index]['keyword']) embed_message.set_footer(text=f'Time remaining: {BOUNTY_TIME//60}h {BOUNTY_TIME%60}min') msg = await bounty_channel.send(embed=embed_message) config_parser.set('BOUNTY', 'index', str(bounty_index + 1)) config_parser.set('BOUNTY', 'message_id', str(msg.id)) with open(CONFIG_FILE, 'w') as config_file: config_parser.write(config_file) # Announcements for the challenges channel @tasks.loop(minutes=CHALLENGE_TIME) async def challenge_loop(): global challenge_start challenge_start = datetime.now() challenge_channel = client.get_channel(int(config_parser.get('CHALLENGE', 'channel'))) challenge_index = int(config_parser.get('CHALLENGE', 'index')) if challenge_index >= len(challenges): challenge_loop.stop() return embed_message = discord.Embed(title="Daily Challenge", color=discord.Color.green()) embed_message.add_field(name="The current challenge is...", value=challenges[challenge_index]['challenge'], inline=False) embed_message.add_field(name="Keyword", value=challenges[challenge_index]['keyword']) embed_message.set_footer(text=f'Time remaining: {CHALLENGE_TIME // 60}h {CHALLENGE_TIME % 60}min') msg = await challenge_channel.send(embed=embed_message) config_parser.set('CHALLENGE', 'index', str(challenge_index + 1)) config_parser.set('CHALLENGE', 'message_id', str(msg.id)) with open(CONFIG_FILE, 'w') as config_file: config_parser.write(config_file) def update_counter(message, t, start_time): new_embed = message.embeds[0] difference = datetime.now() - start_time difference_min = difference.seconds//60 new_embed.set_footer(text=f'Time remaining: {(t - difference_min)//60}h {(t - difference_min)%60}min') return new_embed @tasks.loop(minutes=1) async def countdown(): if config_parser.get('BOUNTY', 'enabled') == "True": bounty_channel = await client.fetch_channel(config_parser.get('BOUNTY', 'channel')) bounty_message = await bounty_channel.fetch_message(config_parser.get('BOUNTY', 'message_id')) await bounty_message.edit(embed=update_counter(bounty_message, BOUNTY_TIME, bounty_start)) if config_parser.get('CHALLENGE', 'enabled') == "True": challenge_channel = await client.fetch_channel(config_parser.get('CHALLENGE', 'channel')) challenge_message = await challenge_channel.fetch_message(config_parser.get('CHALLENGE', 'message_id')) await challenge_message.edit(embed=update_counter(challenge_message, CHALLENGE_TIME, challenge_start)) client.run(TOKEN)
33.15493
125
0.717927
932
7,062
5.26824
0.177039
0.07332
0.04888
0.039104
0.468024
0.349287
0.298574
0.298574
0.136456
0.117312
0
0.004889
0.160011
7,062
212
126
33.311321
0.822825
0.017842
0
0.226667
0
0.006667
0.190215
0.006206
0
0
0
0
0
1
0.013333
false
0
0.053333
0
0.14
0.013333
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
797e4e1803afd3fcc981a76ff3c6d7fb99ce8aa0
7,462
py
Python
zfunc.py
zhuligs/Pallas
c8d77d0963c080fa7331560f1659001488b0328f
[ "MIT" ]
null
null
null
zfunc.py
zhuligs/Pallas
c8d77d0963c080fa7331560f1659001488b0328f
[ "MIT" ]
null
null
null
zfunc.py
zhuligs/Pallas
c8d77d0963c080fa7331560f1659001488b0328f
[ "MIT" ]
null
null
null
import numpy as np import itdbase from itdbase import Cell import itin from copy import deepcopy as cp import cPickle as pick # from tsase.optimize import MDMin from ase.optimize.fire import FIRE # from ase.optimize import BFGS from ase import * from ase.io import read, write import os import sys import numpy as np from tsase.mushybox import mushybox # from tsase.calculators.vasp_ext import Vasp # from tsase.calculators.lammps_ext import LAMMPS from ase.calculators.lammpsrun import LAMMPS from tsase.dimer import ssdimer from tsase.dimer import lanczos from tsase.neb.util import vunit, vrand def gopt(xcell, mode): if itin.interface == 'lammps': return gopt_lammps(xcell, mode) elif itin.interface == 'vasp': return gopt_vasp(xcell, mode) else: print 'ERROR: WRONG INTERFACE' sys.exit(1) def gopt_vasp(xcell, mode): lat = xcell.get_lattice() vol = xcell.get_volume() jacob = (vol / itin.nat)**(1.0/3.0) * itin.nat**0.5 latt = lat + np.dot(lat, mode[-3:]/jacob) xcell.set_lattice(latt) newpos = xcell.get_positions() + mode[:-3] xcell.set_positions(newpos) write_cell_to_vasp(xcell, "POSCAR") os.system("cp INCAR_OPT INCAR") os.system("sh runvasp.sh") e = float(os.popen("awk '/free energy/{print $5}' OUTCAR|tail -1").read()) pcell = set_cell_from_vasp("CONTCAR") h = itin.press * pcell.get_volume() / 1602.2 + e pcell.set_e(h) gdirs = glob.glob('Gdir*') gdir = 'Gdir' + str(len(gdirs)) os.system('mkdir -p ' + gdir) os.system('cp POSCAR OUTCAR CONTCAR XDATCAR ' + gdir) sdata.gdir = gdir return pcell def gopt_lammps(xcell, mode): write_cell_to_vasp(xcell, 'POSCAR') p1 = read('POSCAR', format='vasp') # tags = [a.symbol == 'Si' for a in p1] #parameters = {'mass': ['1 1.0'], 'pair_style': 'lj/sf 2.5', # 'pair_coeff': ['1 1 1.0 1.0 2.5'], # 'pair_modify': 'shift yes'} parameters = itin.parameters calc = LAMMPS(parameters=parameters) p1.set_calculator(calc) natom = len(p1) vol = p1.get_volume() jacob = (vol/natom)**(1.0/3.0) * natom**0.5 # mode = np.zeros((len(p)+3, 3)) # mode = vrand(mode) # try: # mode = vunit(mode) # except: # pass cellt = p1.get_cell() + np.dot(p1.get_cell(), mode[-3:]/jacob) p1.set_cell(cellt, scale_atoms=True) p1.set_positions(p1.get_positions() + mode[:-3]) pstress = p1.get_cell()*0.0 p1box = mushybox(p1, pstress) # print len(p1box) # print p1box.jacobian # print p1box.get_potential_energy() try: dyn = FIRE(p1box, dt=0.1, maxmove=0.2, dtmax=0.2) dyn.run(fmax=0.01, steps=2000) io.write("CONTCAR", p1, format='vasp') pcell = set_cell_from_vasp("CONTCAR") e = p1box.get_potential_energy() pcell.set_e(e) except: pcell = cp(xcell) pcell.set_e(151206) return pcell def rundim(xcell, mode): if itin.interface == 'lammps': return rundim_lammps(xcell, mode) elif itin.interface == 'vasp': return rundim_vasp(xcell, mode) else: print 'ERROR: WRONG INTERFACE' sys.exit(1) def rundim_vasp(xcell, mode): lat = xcell.get_lattice() vol = xcell.get_volume() jacob = (vol/itin.nat)**(1.0/3.0) * itin.nat**0.5 latt = lat + np.dot(lat, mode[-3:]/jacob) xcell.set_lattice(latt) f = open('MODECAR', 'w') for x in mode[:-3]: f.write("%15.9f %15.9f %15.9f\n" % tuple(x)) f.close() write_cell_to_vasp(xcell, "POSCAR") os.system("cp INCAR_DIM INCAR") os.system("sh runvasp.sh") e = float(os.popen("awk '/free energy/{print $5}' OUTCAR|tail -1").read()) pcell = set_cell_from_vasp("CONTCAR") h = itin.press * pcell.get_volume() / 1602.2 + e pcell.set_e(h) ddirs = glob.glob('Ddir*') ddir = 'Ddir' + str(len(ddirs)) os.system('mkdir -p ' + ddir) os.system('cp POSCAR MODECAR OUTCAR XDATCAR DIMCAR ' + ddir) sdata.ddir = ddir return pcell def rundim_ts(xcell, mode): write_cell_to_vasp('TSCELL', 'w') f = open('tmode', 'w') pick.dump(mode, 'f') f.close() os.system('python -u dvjob.py > zout') os.system('rm WAVECAR') e = float(os.popen("awk '/TTENERGY/{print $2}' zout").read()) pcell = set_cell_from_vasp('dimer1.con') h = itin.press * pcell.get_volume() / 1602.2 + e pcell.set_e(h) return pcell def rundim_lammps(xcell, mode): write_cell_to_vasp(xcell, 'DCAR') p = read('DCAR', format='vasp') parameters = itin.parameters calc = LAMMPS(parameters=parameters) p.set_calculator(calc) # E0 = p.get_potential_energy() natom = len(p) vol = p.get_volume() jacob = (vol/natom)**(1.0/3.0) * natom**0.5 # mode = np.zeros((len(p)+3, 3)) # mode = vrand(mode) try: mode = vunit(mode) except: mode = z_rmode() cellt = p.get_cell() + np.dot(p.get_cell(), mode[-3:]/jacob) p.set_cell(cellt, scale_atoms=True) p.set_positions(p.get_positions() + mode[:-3]) d = lanczos.lanczos_atoms(p, mode=mode, rotationMax=4, ss=True, phi_tol=15) dyn = FIRE(d, dt=0.1, maxmove=0.2, dtmax=0.2) try: dyn.run(fmax=0.05, steps=2000) E1 = p.get_potential_energy() write("CDCAR", d.R0, format='vasp', direct=True) pcell = set_cell_from_vasp("CDCAR") pcell.set_e(E1) except: pcell = cp(xcell) pcell.set_e(151206) return pcell def set_cell_from_vasp(pcar): xcell = Cell() buff = [] with open(pcar) as f: for line in f: buff.append(line.split()) lat = np.array(buff[2:5], float) try: typt = np.array(buff[5], int) except: del(buff[5]) typt = np.array(buff[5], int) nat = sum(typt) pos = np.array(buff[7:7 + nat], float) xcell.set_name(itin.sname) xcell.set_lattice(lat) if buff[6][0].strip()[0] == 'D': xcell.set_positions(pos) else: xcell.set_cart_positions(pos) xcell.set_typt(typt) xcell.set_znucl(itin.znucl) xcell.set_types() xcell.cal_fp(itin.fpcut, itin.lmax) return xcell def write_cell_to_vasp(xcell, pcar): lat = xcell.get_lattice() typt = xcell.get_typt() pos = xcell.get_positions() f = open(pcar, 'w') f.write(itin.sname + '\n') f.write('1.0\n') for x in lat: f.write("%15.9f %15.9f %15.9f\n" % tuple(x)) for iz in itin.znucl: f.write(itdbase.atom_data[iz][1]) f.write(' ') f.write('\n') for ix in typt: f.write(str(ix) + ' ') f.write('\n') f.write('Direct\n') for x in pos: f.write("%15.9f %15.9f %15.9f\n" % tuple(x)) f.close() def getx(cell1, cell2): mode = np.zeros((itin.nat + 3, 3)) mode[-3:] = cell1.get_lattice() - cell2.get_lattice() ilat = np.linalg.inv(cell1.get_lattice()) vol = cell1.get_volume() jacob = (vol / itin.nat)**(1.0 / 3.0) * itin.nat**0.5 mode[-3:] = np.dot(ilat, mode[-3:]) * jacob pos1 = cell1.get_cart_positions() pos2 = cell2.get_cart_positions() for i in range(itin.nat): mode[i] = pos1[i] - pos2[i] try: mode = vunit(mode) except: mode = np.zeros((itin.nat + 3, 3)) return mode def z_rmode(): mode = np.zeros((itin.nat + 3, 3)) mode = vrand(mode) mode = vunit(mode) return mode
28.158491
79
0.597829
1,138
7,462
3.814587
0.1942
0.022115
0.014513
0.020733
0.430316
0.40751
0.362129
0.316056
0.269523
0.260309
0
0.036309
0.243366
7,462
264
80
28.265152
0.732554
0.079067
0
0.397129
0
0
0.08787
0
0
0
0
0
0
0
null
null
0
0.08134
null
null
0.023923
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
1
0
0
0
0
0
0
0
0
1
797e80a621441acbd16bae2574af1c39fbbbbfb6
1,113
py
Python
ckanext/issues/commands.py
rhabbachi/ckanext-issues
d8c3dde8372e88dd5dc173023df34c90034ca777
[ "MIT" ]
8
2016-06-16T20:45:54.000Z
2020-09-24T12:06:15.000Z
ckanext/issues/commands.py
rhabbachi/ckanext-issues
d8c3dde8372e88dd5dc173023df34c90034ca777
[ "MIT" ]
50
2015-03-25T16:59:11.000Z
2016-01-10T21:35:26.000Z
ckanext/issues/commands.py
rhabbachi/ckanext-issues
d8c3dde8372e88dd5dc173023df34c90034ca777
[ "MIT" ]
11
2016-09-14T13:34:53.000Z
2020-08-28T05:48:58.000Z
from ckan.lib.cli import CkanCommand import logging import sys class Issues(CkanCommand): """ Usage: paster issues init_db - Creates the database table issues needs to run paster issues upgrade_db - Does any database migrations required (idempotent) """ summary = __doc__.split('\n')[0] usage = __doc__ def command(self): """ Parse command line arguments and call appropriate method. """ if not self.args or self.args[0] in ['--help', '-h', 'help']: print self.usage sys.exit(1) cmd = self.args[0] self._load_config() self.log = logging.getLogger(__name__) if cmd == 'init_db': from ckanext.issues.model import setup setup() self.log.info('Issues tables are initialized') elif cmd == 'upgrade_db': from ckanext.issues.model import upgrade upgrade() self.log.info('Issues tables are up to date') else: self.log.error('Command %s not recognized' % (cmd,))
25.883721
69
0.57053
131
1,113
4.709924
0.549618
0.045381
0.029173
0.061588
0.181524
0.181524
0
0
0
0
0
0.005355
0.328841
1,113
42
70
26.5
0.820616
0
0
0
0
0
0.137637
0
0
0
0
0
0
0
null
null
0
0.217391
null
null
0.043478
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
1
0
0
0
0
0
0
0
0
1
797ef4080003e9940cfa950023fe499ebb5260ea
510
py
Python
wprevents/base/views.py
arroway/wprevents
5ed14c0e85c1a6463a0e72c94836fae81fbe3fda
[ "BSD-3-Clause" ]
1
2015-02-07T10:04:48.000Z
2015-02-07T10:04:48.000Z
wprevents/base/views.py
yvan-sraka/wprevents
03f95150fe7c09338c3a17e00a4b85febef87789
[ "BSD-3-Clause" ]
6
2015-02-07T10:08:38.000Z
2021-06-06T13:17:24.000Z
wprevents/base/views.py
yvan-sraka/wprevents
03f95150fe7c09338c3a17e00a4b85febef87789
[ "BSD-3-Clause" ]
4
2015-01-20T19:48:31.000Z
2017-04-08T22:10:52.000Z
from django.http import HttpResponseServerError from django.shortcuts import render from django.template import RequestContext from django.template.loader import get_template def login(request): return render(request, 'login.html') def error404(request): t = get_template('404.html') res = HttpResponseServerError(t.render(RequestContext(request))) return res def error500(request): t = get_template('500.html') res = HttpResponseServerError(t.render(RequestContext(request))) return res
22.173913
66
0.782353
62
510
6.387097
0.354839
0.10101
0.090909
0.09596
0.338384
0.338384
0.338384
0.338384
0.338384
0
0
0.026846
0.123529
510
22
67
23.181818
0.85906
0
0
0.285714
0
0
0.05098
0
0
0
0
0
0
1
0.214286
false
0
0.285714
0.071429
0.714286
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
0
1
0
0
2
798152a21d0084126235c34a6bada661f5af6a43
1,984
py
Python
symmetricom_raspberrypi_clock.py
wkumari/symmetricom-nd4-python
08ff7dc56f52667ad885817a3afbb97c879d5f48
[ "Apache-2.0" ]
null
null
null
symmetricom_raspberrypi_clock.py
wkumari/symmetricom-nd4-python
08ff7dc56f52667ad885817a3afbb97c879d5f48
[ "Apache-2.0" ]
null
null
null
symmetricom_raspberrypi_clock.py
wkumari/symmetricom-nd4-python
08ff7dc56f52667ad885817a3afbb97c879d5f48
[ "Apache-2.0" ]
null
null
null
#!/usr/bin/env python # Copyright (c) 2017 Warren Kumari """ This small program uses a Raspberry Pi Zero W to drive the display portion of a Symmetricom ND-4 display. This replaces the processor board of the ND-4, and powers the Pi from the internal ND-4 power supply. The original processor board simply drives a MAX7219 which is conveniently on the power-supply board, to the processor board just gets unplugged and the Pi connected instead. The wiring is as follows: ND-4 MAX7219 Function Pi Pin -------------------------------- VCC VCC 2 GND GND 6 PA0 CLK SPI CLK(11) 23 PA1 LOAD/CS SPI CE0(8) 24 PA2 DIN MOSI(10) 19 All the hard work is done by Richard Hull's luma.led_matrix library from: https://github.com/rm-hull/luma.led_matrix """ from datetime import datetime import time from luma.core.interface.serial import spi, noop from luma.core.render import canvas from luma.core.virtual import sevensegment from luma.led_matrix.device import max7219 # Setup the interface. serial = spi(port=0, device=0, gpio=noop()) device = max7219(serial, cascaded=1) seg = sevensegment(device) # For some reason the LED display ignores the first octet. # The colons are addressed with a period at position 8 in the string, # and the "point" is at 3. # For added entertainment, the digits are all reversed as well, so # 17:28:31 is sent as "0013827.1" while True: timestr = datetime.now().strftime('%H%M%S') # Reverse the time string revtimestr = timestr[::-1] paddedstr = "00" + revtimestr # ... and display it. seg.text = paddedstr # and now sleep around 1/2 second and redisplay with the colon on # to makke it "flash" time.sleep(0.5) # insert a period before last character (to get : on display) # Removed: add a period in spot 3 to get period to flash revtimestr = revtimestr[:5] + '.' + revtimestr[5:] paddedstr = "00" + revtimestr seg.text = paddedstr time.sleep(0.5)
32
116
0.697077
318
1,984
4.339623
0.522013
0.008696
0.028261
0.015942
0
0
0
0
0
0
0
0.047044
0.207157
1,984
61
117
32.52459
0.830261
0.666331
0
0.315789
0
0
0.017161
0
0
0
0
0
0
1
0
false
0
0.315789
0
0.315789
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
1
7981d5f5623d46312039f8e4c8cb2b8fbffad125
4,730
py
Python
tests/test_rtpPayload_ttml.py
bbc/rd-apmm-python-lib-rtpPayload_ttml
805d13242b44f26f38e5a9d940ee2ec4862528c3
[ "Apache-1.1" ]
null
null
null
tests/test_rtpPayload_ttml.py
bbc/rd-apmm-python-lib-rtpPayload_ttml
805d13242b44f26f38e5a9d940ee2ec4862528c3
[ "Apache-1.1" ]
null
null
null
tests/test_rtpPayload_ttml.py
bbc/rd-apmm-python-lib-rtpPayload_ttml
805d13242b44f26f38e5a9d940ee2ec4862528c3
[ "Apache-1.1" ]
null
null
null
#!/usr/bin/python # # James Sandford, copyright BBC 2020 # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from unittest import TestCase from hypothesis import given, strategies as st # type: ignore from rtpPayload_ttml import (RTPPayload_TTML, LengthError, SUPPORTED_ENCODINGS, utfEncode) class TestExtension (TestCase): def setUp(self): self.thisP = RTPPayload_TTML() @given(st.tuples( st.text(), st.sampled_from(SUPPORTED_ENCODINGS), st.booleans()).filter( lambda x: len(utfEncode(x[0], x[1], x[2])) < 2**16)) def test_init(self, data): doc, encoding, bom = data reservedBits = bytearray(b'\x00\x00') newP = RTPPayload_TTML(reservedBits, doc, encoding, bom) self.assertEqual(newP.reserved, reservedBits) self.assertEqual(newP.userDataWords, doc) self.assertEqual(newP._encoding, encoding) self.assertEqual(newP._bom, bom) @given( st.text(), st.text().filter(lambda x: x not in SUPPORTED_ENCODINGS), st.booleans()) def test_init_invalidEnc(self, doc, enc, bom): reservedBits = bytearray(b'\x00\x00') with self.assertRaises(AttributeError): RTPPayload_TTML(reservedBits, doc, enc, bom) def test_reserved_default(self): self.assertEqual(self.thisP.reserved, bytearray(b'\x00\x00')) def test_reserved_notBytes(self): with self.assertRaises(AttributeError): self.thisP.reserved = "" @given(st.binary().filter(lambda x: x != bytearray(b'\x00\x00'))) def test_reserved_invalid(self, value): with self.assertRaises(ValueError): self.thisP.reserved = bytearray(value) def test_userDataWords_default(self): self.assertEqual(self.thisP.userDataWords, "") @given(st.text().filter(lambda x: len(utfEncode(x, "UTF-8")) < 2**16)) def test_userDataWords(self, doc): self.thisP.userDataWords = doc self.assertEqual(self.thisP.userDataWords, doc) def test_userDataWords_invalidType(self): with self.assertRaises(AttributeError): self.thisP.userDataWords = 0 def test_userDataWords_tooLong(self): doc = "" for x in range(2**16): doc += "a" with self.assertRaises(LengthError): self.thisP.userDataWords = doc @given(st.tuples( st.text(), st.sampled_from(SUPPORTED_ENCODINGS), st.booleans()).filter( lambda x: len(utfEncode(x[0], x[1], x[2])) < 2**16)) def test_userDataWords_encodings(self, data): doc, encoding, bom = data payload = RTPPayload_TTML( userDataWords=doc, encoding=encoding, bom=bom) self.assertEqual(payload.userDataWords, doc) self.assertEqual(payload._userDataWords, utfEncode(doc, encoding, bom)) def test_eq(self): reservedBits = bytearray(b'\x00\x00') newP = RTPPayload_TTML(reservedBits, "") self.assertEqual(newP, self.thisP) def test_bytearray_default(self): expected = bytearray(4) self.assertEqual(bytes(self.thisP), expected) newP = RTPPayload_TTML().fromBytearray(expected) self.assertEqual(newP, self.thisP) @given(st.binary(min_size=2, max_size=2).filter( lambda x: x != b'\x00\x00')) def test_fromBytearray_invalidLen(self, length): bArray = bytearray(4) bArray[2:4] = length with self.assertRaises(LengthError): RTPPayload_TTML().fromBytearray(bArray) @given(st.text()) def test_toBytearray(self, doc): self.thisP.userDataWords = doc bDoc = utfEncode(doc) expected = bytearray(2) expected += int(len(bDoc)).to_bytes(2, byteorder='big') expected += bDoc self.assertEqual(expected, self.thisP.toBytearray()) @given(st.text()) def test_fromBytearray(self, doc): expected = RTPPayload_TTML(userDataWords=doc) bDoc = utfEncode(doc) bArray = bytearray(2) bArray += int(len(bDoc)).to_bytes(2, byteorder='big') bArray += bDoc self.thisP.fromBytearray(bArray) self.assertEqual(expected, self.thisP)
33.785714
79
0.65074
566
4,730
5.353357
0.256184
0.047525
0.025743
0.026403
0.364026
0.246205
0.176238
0.124752
0.104951
0.066667
0
0.017099
0.233404
4,730
139
80
34.028777
0.818533
0.12389
0
0.3125
0
0
0.014535
0
0
0
0
0
0.208333
1
0.166667
false
0
0.03125
0
0.208333
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
7982421d07b8b666fc8fc840123a2f47aa6edf14
860
py
Python
client-hints/resources/echo-ua-client-hints-received.py
BasixKOR/wpt
aa27d567c10dcdb2aea6884d5155dfaaa177a800
[ "BSD-3-Clause" ]
null
null
null
client-hints/resources/echo-ua-client-hints-received.py
BasixKOR/wpt
aa27d567c10dcdb2aea6884d5155dfaaa177a800
[ "BSD-3-Clause" ]
59
2022-01-19T21:35:57.000Z
2022-03-30T21:35:27.000Z
client-hints/resources/echo-ua-client-hints-received.py
BasixKOR/wpt
aa27d567c10dcdb2aea6884d5155dfaaa177a800
[ "BSD-3-Clause" ]
null
null
null
import importlib client_hints_ua_list = importlib.import_module("client-hints.resources.clienthintslist").client_hints_ua_list def main(request, response): """ Simple handler that sets a response header based on which client hint request headers were received. """ response.headers.append(b"Access-Control-Allow-Origin", b"*") response.headers.append(b"Access-Control-Allow-Headers", b"*") response.headers.append(b"Access-Control-Expose-Headers", b"*") client_hint_headers = client_hints_ua_list() request_client_hints = {i: request.headers.get(i) for i in client_hint_headers} for header in client_hint_headers: if request_client_hints[header] is not None: response.headers.set(header + b"-received", request_client_hints[header]) headers = [] content = u"" return 200, headers, content
35.833333
109
0.731395
116
860
5.232759
0.396552
0.126853
0.06425
0.08402
0.192751
0.192751
0.192751
0
0
0
0
0.004149
0.159302
860
23
110
37.391304
0.835408
0.116279
0
0
0
0
0.179625
0.163539
0
0
0
0
0
1
0.071429
false
0
0.142857
0
0.285714
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
79836d938d077f9ed2ab86fd140347063a4e8fc0
8,892
py
Python
base/vocab.py
thu-spmi/semi-EBM
393e3ea3566dd60c48872a5c573a335e8e802707
[ "Apache-2.0" ]
2
2021-09-18T14:21:24.000Z
2021-12-20T03:39:13.000Z
base/vocab.py
thu-spmi/semi-EBM
393e3ea3566dd60c48872a5c573a335e8e802707
[ "Apache-2.0" ]
null
null
null
base/vocab.py
thu-spmi/semi-EBM
393e3ea3566dd60c48872a5c573a335e8e802707
[ "Apache-2.0" ]
1
2021-09-12T07:02:23.000Z
2021-09-12T07:02:23.000Z
import os import json import numpy as np class Vocab(object): def __init__(self): self.word_to_id = dict() self.count = list() self.words = list() self.to_lower = False # add character information self.chars = list() # ['a', 'b', 'c', 'd', ...] self.char_to_id = dict() # {'a': 0, 'b': 1, ...} self.word_to_chars = list() # [ ['a', 'b', 'c'], ... ] self.word_max_len = 0 self.char_beg_id = 0 self.char_end_id = 0 def load_data(self, file_list): v_count = dict() total_line = 0 total_word = 0 for file in file_list: print('[%s.%s] generate_vocab: ' % (__name__, self.__class__.__name__), file) with open(file, 'rt') as f: for line in f: # to lower if self.to_lower: line = line.lower() for w in line.split(): v_count.setdefault(w, 0) v_count[w] += 1 total_word += 1 total_line += 1 return v_count, total_line, total_word def generate_vocab(self, file_list, cutoff=0, max_size=None, add_beg_token='<s>', add_end_token='</s>', add_unk_token='<unk>', to_lower=False): self.to_lower = to_lower v_count, total_line, total_word = self.load_data(file_list) if add_beg_token is not None: v_count[add_beg_token] = total_line if add_end_token is not None: v_count[add_end_token] = total_line if add_unk_token is not None: v_count[add_unk_token] = 1 print('[%s.%s] vocab_size=' % (__name__, self.__class__.__name__), len(v_count)) print('[%s.%s] total_line=' % (__name__, self.__class__.__name__), total_line) print('[%s.%s] total_word=' % (__name__, self.__class__.__name__), total_word) # cutoff v_list = [] ignore_list = [add_beg_token, add_end_token, add_unk_token] for w, count in v_count.items(): if w in ignore_list: continue if count > cutoff: v_list.append((w, count)) # to handle the words with the same counts v_list = sorted(v_list, key=lambda x: x[0]) # sorted as the word v_list = sorted(v_list, key=lambda x: -x[1]) # sorted as the count ignore_dict = dict() for ignore_token in reversed(ignore_list): if ignore_token is not None and ignore_token not in ignore_dict: v_list.insert(0, (ignore_token, v_count[ignore_token])) ignore_dict[ignore_token] = 0 print('[%s.%s] vocab_size(after_cutoff)=' % (__name__, self.__class__.__name__), len(v_list)) if max_size is not None: print('[%s.%s] vocab max_size=()' % (__name__, self.__class__.__name__), max_size) unk_count = sum(x[1] for x in v_list[max_size:]) v_list = v_list[0: max_size] # revise the unkcount if add_unk_token is not None: for i in range(len(v_list)): if v_list[i][0] == add_unk_token: v_list[i] = (add_unk_token, v_list[i][1] + unk_count) break # create vocab self.count = list() self.words = list() self.word_to_id = dict() for i, (w, count) in enumerate(v_list): self.words.append(w) self.count.append(count) self.word_to_id[w] = i return self def write(self, fname): with open(fname, 'wt') as f: f.write('to_lower = %d\n' % int(self.to_lower)) for i in range(len(self.words)): f.write('{}\t{}\t{}'.format(i, self.words[i], self.count[i])) if self.word_to_chars: s = ' '.join('{}'.format(k) for k in self.word_to_chars[i]) f.write('\t{}'.format(s)) f.write('\n') # write a extra char vocabulary if self.chars: with open(fname + '.chr', 'wt') as f: f.write('char_beg_id = %d\n' % self.char_beg_id) f.write('char_end_id = %d\n' % self.char_end_id) f.write('word_max_len = %d\n' % self.word_max_len) f.write('id \t char\n') for i in range(len(self.chars)): f.write('{}\t{}\n'.format(i, self.chars[i])) def read(self, fname): self.words = list() self.count = list() self.word_to_id = dict() self.word_to_chars = list() with open(fname, 'rt') as f: self.to_lower = bool(int(f.readline().split()[-1])) for line in f: a = line.split() i = int(a[0]) w = a[1] n = int(a[2]) self.words.append(w) self.count.append(n) self.word_to_id[w] = i # read word_to_chars if len(a) > 3: self.word_to_chars.append([int(k) for k in a[3:]]) if self.word_to_chars: # read char vocab self.chars = list() self.char_to_id = dict() with open(fname + '.chr', 'rt') as f: self.char_beg_id = int(f.readline().split()[-1]) self.char_end_id = int(f.readline().split()[-1]) self.word_max_len = int(f.readline().split()[-1]) f.readline() for line in f: a = line.split() i = int(a[0]) c = a[1] self.chars.append(c) self.char_to_id[c] = i return self def create_chars(self, add_char_beg='<s>', add_char_end='</s>'): if self.chars: return # process the word and split to chars c_dict = dict() for w in self.words: for c in list(w): c_dict.setdefault(c, 0) if add_char_beg is not None: c_dict.setdefault(add_char_beg) if add_char_end is not None: c_dict.setdefault(add_char_end) self.chars = sorted(c_dict.keys()) self.char_to_id = dict([(c, i) for i, c in enumerate(self.chars)]) self.char_beg_id = self.char_to_id[add_char_beg] self.char_end_id = self.char_to_id[add_char_end] self.word_to_chars = [] for w in self.words: chr_ids = [self.char_to_id[c] for c in w] chr_ids.insert(0, self.char_beg_id) chr_ids.append(self.char_end_id) self.word_to_chars.append(chr_ids) self.word_max_len = max([len(x) for x in self.word_to_chars]) def words_to_ids(self, word_list, unk_token='<unk>'): id_list = [] for w in word_list: if self.to_lower: w = w.lower() if w in self.word_to_id: id_list.append(self.word_to_id[w]) elif unk_token is not None and unk_token in self.word_to_id: id_list.append(self.word_to_id[unk_token]) else: raise KeyError('[%s.%s] cannot find the word = %s' % (__name__, self.__class__.__name__, w)) return id_list def ids_to_words(self, id_list): return [self.words[i] for i in id_list] def get_size(self): return len(self.words) def get_char_size(self): if not self.chars: raise TypeError('[Vocab] no char information!!') return len(self.chars) def __contains__(self, item): return item in self.word_to_id class VocabX(Vocab): def __init__(self, total_level=2, read_level=0): super().__init__() self.total_level = total_level self.read_level = read_level def load_data(self, file_list): v_count = dict() total_line = 0 total_word = 0 for file in file_list: print('[%s.%s] generate_vocab: ' % (__name__, self.__class__.__name__), file) cur_line = 0 with open(file, 'rt') as f: for line in f: if cur_line % self.total_level == self.read_level: for w in line.split(): v_count.setdefault(w, 0) v_count[w] += 1 total_word += 1 total_line += 1 cur_line += 1 return v_count, total_line, total_word
36.743802
109
0.501012
1,203
8,892
3.386534
0.102244
0.047128
0.046637
0.029455
0.459745
0.302896
0.249877
0.17354
0.158321
0.129111
0
0.008026
0.383491
8,892
241
110
36.896266
0.735133
0.036887
0
0.338542
0
0
0.045504
0.00301
0
0
0
0
0
1
0.067708
false
0
0.015625
0.015625
0.145833
0.036458
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
7983823b3cdf770a3c86d666eba52cc7de43379b
2,749
py
Python
dpfinder/searcher/statistics/ratio/ratio_cdf.py
barryZZJ/dp-finder
ddf8e3589110b4b35920b437d605b45dd56291da
[ "MIT" ]
15
2018-10-19T05:48:17.000Z
2022-02-14T20:34:16.000Z
dpfinder/searcher/statistics/ratio/ratio_cdf.py
barryZZJ/dp-finder
ddf8e3589110b4b35920b437d605b45dd56291da
[ "MIT" ]
1
2020-04-22T22:55:39.000Z
2020-04-22T22:55:39.000Z
dpfinder/searcher/statistics/ratio/ratio_cdf.py
barryZZJ/dp-finder
ddf8e3589110b4b35920b437d605b45dd56291da
[ "MIT" ]
9
2018-11-13T12:37:55.000Z
2021-11-22T11:11:52.000Z
# ==BEGIN LICENSE== # # MIT License # # Copyright (c) 2018 SRI Lab, ETH Zurich # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. # # ==END LICENSE== import ctypes import os from dpfinder.logging import logger from dpfinder.utils.redirect import redirect_stdout path = os.path.dirname(__file__) lib = ctypes.cdll.LoadLibrary(path + '/libratio.so') joint_gauss_fraction = getattr(lib, "ratio_cdf_extern", None) joint_gauss_fraction.restype = ctypes.c_double ratio_pdf_extern = getattr(lib, "ratio_pdf_extern", None) ratio_pdf_extern.restype = ctypes.c_double def cdf(lower, upper, mx, my, sx, sy, rho): lower = ctypes.c_double(lower) upper = ctypes.c_double(upper) mx = ctypes.c_double(mx) my = ctypes.c_double(my) sx = ctypes.c_double(sx) sy = ctypes.c_double(sy) rho = ctypes.c_double(rho) return joint_gauss_fraction(lower, upper, mx, my, sx, sy, rho) def pdf(w, mx, my, sx, sy, rho): w = ctypes.c_double(w) mx = ctypes.c_double(mx) my = ctypes.c_double(my) sx = ctypes.c_double(sx) sy = ctypes.c_double(sy) rho = ctypes.c_double(rho) return ratio_pdf_extern(w, mx, my, sx, sy, rho) ratio_confidence_interval_C = getattr(lib, "ratio_confidence_interval_extern", None) ratio_confidence_interval_C.restype = ctypes.c_double def ratio_confidence_interval(p1, p2, d1, d2, corr, center, confidence, err_goal): p1 = ctypes.c_double(p1) p2 = ctypes.c_double(p2) d1 = ctypes.c_double(d1) d2 = ctypes.c_double(d2) corr = ctypes.c_double(corr) center = ctypes.c_double(center) confidence = ctypes.c_double(confidence) err_goal = ctypes.c_double(err_goal) with redirect_stdout.redirect(output=logger.debug): ret = ratio_confidence_interval_C(p1, p2, d1, d2, corr, center, confidence, err_goal) return ret
33.938272
87
0.75773
436
2,749
4.630734
0.34633
0.08321
0.154532
0.015849
0.182268
0.159485
0.147598
0.126795
0.126795
0.092125
0
0.00854
0.148054
2,749
80
88
34.3625
0.853544
0.404147
0
0.243902
0
0
0.047146
0.019851
0
0
0
0
0
1
0.073171
false
0
0.097561
0
0.243902
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
79847f99b109a82c82b372de1f58d473f2ed39a1
2,611
py
Python
psana/psana/pscalib/dcs/lcls1/DCConfigParameters.py
JBlaschke/lcls2
30523ef069e823535475d68fa283c6387bcf817b
[ "BSD-3-Clause-LBNL" ]
16
2017-11-09T17:10:56.000Z
2022-03-09T23:03:10.000Z
psana/psana/pscalib/dcs/lcls1/DCConfigParameters.py
JBlaschke/lcls2
30523ef069e823535475d68fa283c6387bcf817b
[ "BSD-3-Clause-LBNL" ]
6
2017-12-12T19:30:05.000Z
2020-07-09T00:28:33.000Z
psana/psana/pscalib/dcs/lcls1/DCConfigParameters.py
JBlaschke/lcls2
30523ef069e823535475d68fa283c6387bcf817b
[ "BSD-3-Clause-LBNL" ]
25
2017-09-18T20:02:43.000Z
2022-03-27T22:27:42.000Z
####!/usr/bin/env python #---------------------------- """ :py:class:`DCConfigParameters` - class supporting configuration parameters for application ========================================================================================== See: * :py:class:`DCStore` * :py:class:`DCType` * :py:class:`DCRange` * :py:class:`DCVersion` * :py:class:`DCBase` * :py:class:`DCInterface` * :py:class:`DCUtils` * :py:class:`DCDetectorId` * :py:class:`DCConfigParameters` * :py:class:`DCFileName` * :py:class:`DCLogger` * :py:class:`DCMethods` * :py:class:`DCEmail` This software was developed for the SIT project. If you use all or part of it, please give an appropriate acknowledgment. Created: 2016-05-17 by Mikhail Dubrovin """ #---------------------------- from PSCalib.DCLogger import log from CalibManager.ConfigParameters import ConfigParameters #---------------------------- class DCConfigParameters(ConfigParameters) : """A storage of configuration parameters for Detector Calibration Store (DCS) project. """ def __init__(self, fname=None) : """Constructor. - fname the file name with configuration parameters, if not specified then default value. """ ConfigParameters.__init__(self) self.name = self.__class__.__name__ self.fname_cp = 'confpars-dcs.txt' # Re-define default config file name log.info('In %s c-tor', self.name) self.declareParameters() self.readParametersFromFile(fname) #----------------------------- def declareParameters(self) : # Possible typs for declaration : 'str', 'int', 'long', 'float', 'bool' # Logger.py self.log_level = self.declareParameter(name='LOG_LEVEL_OF_MSGS', val_def='info', type='str' ) self.log_file = self.declareParameter(name='LOG_FILE_NAME', val_def='./log.txt', type='str' ) self.dir_repo = self.declareParameter(name='CDS_DIR_REPO', val_def='/reg/d/psdm/detector/calib', type='str' ) #self.dir_repo = self.declareParameter(name='CDS_DIR_REPO', val_def='/reg/g/psdm/detector/calib', type='str' ) #------------------------------ cp = DCConfigParameters() #------------------------------ def test_DCConfigParameters() : log.setPrintBits(0377) cp.readParametersFromFile() cp.printParameters() cp.log_level.setValue('debug') cp.saveParametersInFile() #------------------------------ if __name__ == "__main__" : import sys test_DCConfigParameters() sys.exit(0) #------------------------------
32.6375
124
0.579088
266
2,611
5.515038
0.466165
0.066803
0.06544
0.03681
0.111111
0.083163
0.083163
0.083163
0.083163
0.083163
0
0.006075
0.180391
2,611
79
125
33.050633
0.679439
0.184987
0
0
0
0
0.111301
0.02226
0
0
0
0
0
0
null
null
0
0.12
null
null
0.04
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
1
0
0
0
0
0
0
0
0
1
798a3cc03589f18c700fa6bcef79b697f3535128
22,751
py
Python
limonero/models.py
eubr-bigsea/limonero
54851b73bb1e4f5626b3d38ea7eeb50f3ed2e3c5
[ "Apache-2.0" ]
1
2018-01-01T20:35:43.000Z
2018-01-01T20:35:43.000Z
limonero/models.py
eubr-bigsea/limonero
54851b73bb1e4f5626b3d38ea7eeb50f3ed2e3c5
[ "Apache-2.0" ]
37
2017-02-24T17:07:25.000Z
2021-09-02T14:49:19.000Z
limonero/models.py
eubr-bigsea/limonero
54851b73bb1e4f5626b3d38ea7eeb50f3ed2e3c5
[ "Apache-2.0" ]
2
2019-11-05T13:45:45.000Z
2020-11-13T22:02:37.000Z
import datetime import json from flask_sqlalchemy import SQLAlchemy from sqlalchemy import Column, Integer, String, Boolean, ForeignKey, Float, \ Enum, DateTime, Numeric, Text, Unicode, UnicodeText from sqlalchemy import event from sqlalchemy.dialects.mysql import LONGTEXT from sqlalchemy.sql import func from sqlalchemy.orm import relationship, backref from sqlalchemy.schema import UniqueConstraint from sqlalchemy_i18n import make_translatable, translation_base, Translatable make_translatable(options={'locales': ['pt', 'en'], 'auto_create_locales': True, 'fallback_locale': 'en'}) db = SQLAlchemy() # noinspection PyClassHasNoInit class DataSourceFormat: CSV = 'CSV' CUSTOM = 'CUSTOM' GEO_JSON = 'GEO_JSON' JDBC = 'JDBC' IMAGE_FOLDER = 'IMAGE_FOLDER' DATA_FOLDER = 'DATA_FOLDER' HAR_IMAGE_FOLDER = 'HAR_IMAGE_FOLDER' HDF5 = 'HDF5' HIVE = 'HIVE' JSON = 'JSON' NPY = 'NPY' PICKLE = 'PICKLE' PARQUET = 'PARQUET' SAV = 'SAV' SHAPEFILE = 'SHAPEFILE' TAR_IMAGE_FOLDER = 'TAR_IMAGE_FOLDER' TEXT = 'TEXT' VIDEO_FOLDER = 'VIDEO_FOLDER' XML_FILE = 'XML_FILE' UNKNOWN = 'UNKNOWN' @staticmethod def values(): return [n for n in list(DataSourceFormat.__dict__.keys()) if n[0] != '_' and n != 'values'] # noinspection PyClassHasNoInit class DataSourceInitialization: NO_INITIALIZED = 'NO_INITIALIZED' INITIALIZING = 'INITIALIZING' INITIALIZED = 'INITIALIZED' @staticmethod def values(): return [n for n in list(DataSourceInitialization.__dict__.keys()) if n[0] != '_' and n != 'values'] # noinspection PyClassHasNoInit class ModelType: KERAS = 'KERAS' MLEAP = 'MLEAP' PERFORMANCE_SPARK = 'PERFORMANCE_SPARK' PERFORMANCE_KERAS = 'PERFORMANCE_KERAS' SPARK_ML_CLASSIFICATION = 'SPARK_ML_CLASSIFICATION' SPARK_ML_REGRESSION = 'SPARK_ML_REGRESSION' SPARK_MLLIB_CLASSIFICATION = 'SPARK_MLLIB_CLASSIFICATION' UNSPECIFIED = 'UNSPECIFIED' @staticmethod def values(): return [n for n in list(ModelType.__dict__.keys()) if n[0] != '_' and n != 'values'] # noinspection PyClassHasNoInit class DeploymentStatus: NOT_DEPLOYED = 'NOT_DEPLOYED' ERROR = 'ERROR' EDITING = 'EDITING' SAVED = 'SAVED' RUNNING = 'RUNNING' STOPPED = 'STOPPED' SUSPENDED = 'SUSPENDED' PENDING = 'PENDING' DEPLOYED = 'DEPLOYED' @staticmethod def values(): return [n for n in list(DeploymentStatus.__dict__.keys()) if n[0] != '_' and n != 'values'] # noinspection PyClassHasNoInit class StorageType: MONGODB = 'MONGODB' ELASTIC_SEARCH = 'ELASTIC_SEARCH' HDFS = 'HDFS' HIVE = 'HIVE' HIVE_WAREHOUSE = 'HIVE_WAREHOUSE' KAFKA = 'KAFKA' LOCAL = 'LOCAL' JDBC = 'JDBC' CASSANDRA = 'CASSANDRA' @staticmethod def values(): return [n for n in list(StorageType.__dict__.keys()) if n[0] != '_' and n != 'values'] # noinspection PyClassHasNoInit class DataType: BINARY = 'BINARY' CHARACTER = 'CHARACTER' DATE = 'DATE' DATETIME = 'DATETIME' DECIMAL = 'DECIMAL' DOUBLE = 'DOUBLE' ENUM = 'ENUM' FILE = 'FILE' FLOAT = 'FLOAT' INTEGER = 'INTEGER' LAT_LONG = 'LAT_LONG' LONG = 'LONG' TEXT = 'TEXT' TIME = 'TIME' TIMESTAMP = 'TIMESTAMP' VECTOR = 'VECTOR' @staticmethod def values(): return [n for n in list(DataType.__dict__.keys()) if n[0] != '_' and n != 'values'] # noinspection PyClassHasNoInit class AttributeForeignKeyDirection: FROM = 'FROM' TO = 'TO' @staticmethod def values(): return [n for n in list(AttributeForeignKeyDirection.__dict__.keys()) if n[0] != '_' and n != 'values'] # noinspection PyClassHasNoInit class PrivacyRiskType: IDENTIFICATION = 'IDENTIFICATION' @staticmethod def values(): return [n for n in list(PrivacyRiskType.__dict__.keys()) if n[0] != '_' and n != 'values'] # noinspection PyClassHasNoInit class PermissionType: READ = 'READ' WRITE = 'WRITE' MANAGE = 'MANAGE' @staticmethod def values(): return [n for n in list(PermissionType.__dict__.keys()) if n[0] != '_' and n != 'values'] # noinspection PyClassHasNoInit class AnonymizationTechnique: ENCRYPTION = 'ENCRYPTION' GENERALIZATION = 'GENERALIZATION' SUPPRESSION = 'SUPPRESSION' MASK = 'MASK' NO_TECHNIQUE = 'NO_TECHNIQUE' @staticmethod def values(): return [n for n in list(AnonymizationTechnique.__dict__.keys()) if n[0] != '_' and n != 'values'] # noinspection PyClassHasNoInit class PrivacyType: IDENTIFIER = 'IDENTIFIER' QUASI_IDENTIFIER = 'QUASI_IDENTIFIER' SENSITIVE = 'SENSITIVE' NON_SENSITIVE = 'NON_SENSITIVE' @staticmethod def values(): return [n for n in list(PrivacyType.__dict__.keys()) if n[0] != '_' and n != 'values'] # Association tables definition class Attribute(db.Model): """ Data source attribute. """ __tablename__ = 'attribute' # Fields id = Column(Integer, primary_key=True) name = Column(String(100), nullable=False) description = Column(String(500)) type = Column(Enum(*list(DataType.values()), name='DataTypeEnumType'), nullable=False) size = Column(Integer) precision = Column(Integer) scale = Column(Integer) nullable = Column(Boolean, default=False, nullable=False) enumeration = Column(Boolean, default=False, nullable=False) missing_representation = Column(String(200)) feature = Column(Boolean, default=True, nullable=False) label = Column(Boolean, default=True, nullable=False) distinct_values = Column(Integer) mean_value = Column(Float) median_value = Column(String(200)) max_value = Column(String(200)) min_value = Column(String(200)) std_deviation = Column(Float) missing_total = Column(String(200)) deciles = Column(LONGTEXT) format = Column(String(100)) key = Column(Boolean, default=False, nullable=False) # Associations data_source_id = Column(Integer, ForeignKey("data_source.id", name="fk_attribute_data_source_id"), nullable=False, index=True) data_source = relationship( "DataSource", overlaps='attributes', foreign_keys=[data_source_id], backref=backref("attributes", cascade="all, delete-orphan")) attribute_privacy = relationship( "AttributePrivacy", uselist=False, back_populates="attribute", lazy='joined') def __str__(self): return self.name def __repr__(self): return '<Instance {}: {}>'.format(self.__class__, self.id) class AttributeForeignKey(db.Model): """ Attribute that form a foreign key in data sources """ __tablename__ = 'attribute_foreign_key' # Fields id = Column(Integer, primary_key=True) order = Column(Integer, nullable=False) direction = Column(Enum(*list(AttributeForeignKeyDirection.values()), name='AttributeForeignKeyDirectionEnumType'), nullable=False) # Associations foreign_key_id = Column(Integer, ForeignKey("data_source_foreign_key.id", name="fk_attribute_foreign_key_foreign_key_id"), nullable=False, index=True) foreign_key = relationship( "DataSourceForeignKey", overlaps='attributes', foreign_keys=[foreign_key_id], backref=backref("attributes", cascade="all, delete-orphan")) from_attribute_id = Column(Integer, ForeignKey("attribute.id", name="fk_attribute_foreign_key_from_attribute_id"), nullable=False, index=True) from_attribute = relationship( "Attribute", overlaps='foreign_keys', foreign_keys=[from_attribute_id], backref=backref("foreign_keys", cascade="all, delete-orphan")) to_attribute_id = Column(Integer, ForeignKey("attribute.id", name="fk_attribute_foreign_key_to_attribute_id"), nullable=False, index=True) to_attribute = relationship( "Attribute", overlaps='references', foreign_keys=[to_attribute_id], backref=backref("references", cascade="all, delete-orphan")) def __str__(self): return self.order def __repr__(self): return '<Instance {}: {}>'.format(self.__class__, self.id) class AttributePrivacy(db.Model): """ Privacy configuration for an attribute. """ __tablename__ = 'attribute_privacy' # Fields id = Column(Integer, primary_key=True) attribute_name = Column(String(200), nullable=False) data_type = Column(Enum(*list(DataType.values()), name='DataTypeEnumType')) privacy_type = Column(Enum(*list(PrivacyType.values()), name='PrivacyTypeEnumType'), nullable=False) category_technique = Column(String(100)) anonymization_technique = Column(Enum(*list(AnonymizationTechnique.values()), name='AnonymizationTechniqueEnumType'), nullable=False) hierarchical_structure_type = Column(String(100)) privacy_model_technique = Column(String(100)) hierarchy = Column(LONGTEXT) category_model = Column(LONGTEXT) privacy_model = Column(LONGTEXT) privacy_model_parameters = Column(LONGTEXT) unlock_privacy_key = Column(String(400)) is_global_law = Column(Boolean, default=False) # Associations attribute_id = Column(Integer, ForeignKey("attribute.id", name="fk_attribute_privacy_attribute_id"), index=True) attribute = relationship( "Attribute", overlaps='attribute_privacy', foreign_keys=[attribute_id], back_populates="attribute_privacy") attribute_privacy_group_id = Column(Integer, ForeignKey("attribute_privacy_group.id", name="fk_attribute_privacy_attribute_privacy_group_id"), index=True) attribute_privacy_group = relationship( "AttributePrivacyGroup", overlaps='attribute_privacy', foreign_keys=[attribute_privacy_group_id], backref=backref("attribute_privacy", cascade="all, delete-orphan")) def __str__(self): return self.attribute_name def __repr__(self): return '<Instance {}: {}>'.format(self.__class__, self.id) class AttributePrivacyGroup(db.Model): """ Groups attributes with same semantic """ __tablename__ = 'attribute_privacy_group' # Fields id = Column(Integer, primary_key=True) name = Column(String(100), nullable=False) user_id = Column(Integer, nullable=False) def __str__(self): return self.name def __repr__(self): return '<Instance {}: {}>'.format(self.__class__, self.id) class DataSource(db.Model): """ Data source in Lemonade system (anything that stores data. """ __tablename__ = 'data_source' # Fields id = Column(Integer, primary_key=True) name = Column(String(100), nullable=False) description = Column(String(500)) enabled = Column(Boolean, default=True, nullable=False) statistics_process_counter = Column(Integer, default=0, nullable=False) read_only = Column(Boolean, default=True, nullable=False) privacy_aware = Column(Boolean, default=False, nullable=False) url = Column(String(200), nullable=False) created = Column(DateTime, default=func.now(), nullable=False) updated = Column(DateTime, default=datetime.datetime.utcnow, nullable=False, onupdate=datetime.datetime.utcnow) format = Column(Enum(*list(DataSourceFormat.values()), name='DataSourceFormatEnumType'), nullable=False) initialization = Column(Enum(*list(DataSourceInitialization.values()), name='DataSourceInitializationEnumType'), default=DataSourceInitialization.INITIALIZED, nullable=False) initialization_job_id = Column(String(200)) provenience = Column(LONGTEXT) estimated_rows = Column(Integer, default=0) estimated_size_in_mega_bytes = Column(Numeric(10, 2)) expiration = Column(String(200)) user_id = Column(Integer) user_login = Column(String(50)) user_name = Column(String(200)) tags = Column(String(100)) temporary = Column(Boolean, default=False, nullable=False) workflow_id = Column(Integer) task_id = Column(String(200)) attribute_delimiter = Column(String(20)) record_delimiter = Column(String(20)) text_delimiter = Column(String(20)) is_public = Column(Boolean, default=False, nullable=False) treat_as_missing = Column(LONGTEXT) encoding = Column(String(200)) is_first_line_header = Column(Boolean, default=0, nullable=False) is_multiline = Column(Boolean, default=0, nullable=False) command = Column(LONGTEXT) is_lookup = Column(Boolean, default=0, nullable=False) use_in_workflow = Column(Boolean, default=0, nullable=False, index=True) # Associations storage_id = Column(Integer, ForeignKey("storage.id", name="fk_data_source_storage_id"), nullable=False, index=True) storage = relationship( "Storage", overlaps='storage', foreign_keys=[storage_id]) def __str__(self): return self.name def __repr__(self): return '<Instance {}: {}>'.format(self.__class__, self.id) class DataSourceForeignKey(db.Model): """ Foreign key in data sources """ __tablename__ = 'data_source_foreign_key' # Fields id = Column(Integer, primary_key=True) # Associations from_source_id = Column(Integer, ForeignKey("data_source.id", name="fk_data_source_foreign_key_from_source_id"), nullable=False, index=True) from_source = relationship( "DataSource", overlaps='foreign_keys', foreign_keys=[from_source_id], backref=backref("foreign_keys", cascade="all, delete-orphan")) to_source_id = Column(Integer, ForeignKey("data_source.id", name="fk_data_source_foreign_key_to_source_id"), nullable=False, index=True) to_source = relationship( "DataSource", overlaps='references', foreign_keys=[to_source_id], backref=backref("references", cascade="all, delete-orphan")) def __str__(self): return 'DataSourceForeignKey' def __repr__(self): return '<Instance {}: {}>'.format(self.__class__, self.id) class DataSourcePermission(db.Model): """ Associate users and permissions """ __tablename__ = 'data_source_permission' # Fields id = Column(Integer, primary_key=True) permission = Column(Enum(*list(PermissionType.values()), name='PermissionTypeEnumType'), nullable=False) user_id = Column(Integer, nullable=False) user_login = Column(String(50), nullable=False) user_name = Column(String(200), nullable=False) # Associations data_source_id = Column(Integer, ForeignKey("data_source.id", name="fk_data_source_permission_data_source_id"), nullable=False, index=True) data_source = relationship( "DataSource", overlaps='permissions', foreign_keys=[data_source_id], backref=backref("permissions", cascade="all, delete-orphan")) def __str__(self): return self.permission def __repr__(self): return '<Instance {}: {}>'.format(self.__class__, self.id) class Model(db.Model): """ Machine learning model """ __tablename__ = 'model' # Fields id = Column(Integer, primary_key=True) name = Column(String(100), nullable=False) enabled = Column(Boolean, default=True, nullable=False) created = Column(DateTime, default=func.now(), nullable=False) path = Column(String(500), nullable=False) class_name = Column(String(500), nullable=False) type = Column(Enum(*list(ModelType.values()), name='ModelTypeEnumType'), default=ModelType.UNSPECIFIED, nullable=False) deployment_status = Column(Enum(*list(DeploymentStatus.values()), name='DeploymentStatusEnumType'), default=DeploymentStatus.NOT_DEPLOYED, nullable=False) user_id = Column(Integer, nullable=False) user_login = Column(String(50), nullable=False) user_name = Column(String(200), nullable=False) workflow_id = Column(Integer) workflow_name = Column(String(200)) task_id = Column(String(200)) job_id = Column(Integer) # Associations storage_id = Column(Integer, ForeignKey("storage.id", name="fk_model_storage_id"), nullable=False, index=True) storage = relationship( "Storage", overlaps='storage', foreign_keys=[storage_id]) def __str__(self): return self.name def __repr__(self): return '<Instance {}: {}>'.format(self.__class__, self.id) class ModelPermission(db.Model): """ Associate users and permissions to models """ __tablename__ = 'model_permission' # Fields id = Column(Integer, primary_key=True) permission = Column(Enum(*list(PermissionType.values()), name='PermissionTypeEnumType'), nullable=False) user_id = Column(Integer, nullable=False) user_login = Column(String(50), nullable=False) user_name = Column(String(200), nullable=False) # Associations model_id = Column(Integer, ForeignKey("model.id", name="fk_model_permission_model_id"), nullable=False, index=True) model = relationship( "Model", overlaps='permissions', foreign_keys=[model_id], backref=backref("permissions", cascade="all, delete-orphan")) def __str__(self): return self.permission def __repr__(self): return '<Instance {}: {}>'.format(self.__class__, self.id) class PrivacyRisk(db.Model): """ Privacy information associated to the data source """ __tablename__ = 'privacy_risk' # Fields id = Column(Integer, primary_key=True) type = Column(Enum(*list(PrivacyRiskType.values()), name='PrivacyRiskTypeEnumType'), nullable=False) probability = Column(Float) impact = Column(Float) value = Column(Float, nullable=False) detail = Column(LONGTEXT, nullable=False) # Associations data_source_id = Column(Integer, ForeignKey("data_source.id", name="fk_privacy_risk_data_source_id"), nullable=False, index=True) data_source = relationship( "DataSource", overlaps='risks', foreign_keys=[data_source_id], backref=backref("risks", cascade="all, delete-orphan")) def __str__(self): return self.type def __repr__(self): return '<Instance {}: {}>'.format(self.__class__, self.id) class Storage(db.Model): """ Type of storage used by data sources """ __tablename__ = 'storage' # Fields id = Column(Integer, primary_key=True) name = Column(String(100), nullable=False) type = Column(Enum(*list(StorageType.values()), name='StorageTypeEnumType'), nullable=False) enabled = Column(Boolean, default=True, nullable=False) url = Column(String(1000), nullable=False) client_url = Column(String(1000)) extra_params = Column(LONGTEXT) def __str__(self): return self.name def __repr__(self): return '<Instance {}: {}>'.format(self.__class__, self.id) class StoragePermission(db.Model): """ Associate users and permissions """ __tablename__ = 'storage_permission' # Fields id = Column(Integer, primary_key=True) permission = Column(Enum(*list(PermissionType.values()), name='PermissionTypeEnumType'), nullable=False) user_id = Column(Integer, nullable=False) # Associations storage_id = Column(Integer, ForeignKey("storage.id", name="fk_storage_permission_storage_id"), nullable=False, index=True) storage = relationship( "Storage", overlaps='permissions', foreign_keys=[storage_id], backref=backref("permissions", cascade="all, delete-orphan")) def __str__(self): return self.permission def __repr__(self): return '<Instance {}: {}>'.format(self.__class__, self.id)
33.213139
107
0.594611
2,168
22,751
5.976476
0.147601
0.069229
0.040519
0.027012
0.541329
0.50876
0.420313
0.398626
0.379023
0.326233
0
0.008925
0.30069
22,751
684
108
33.261696
0.805468
0.045668
0
0.45951
0
0
0.132377
0.042074
0
0
0
0
0
1
0.065913
false
0
0.018832
0.065913
0.645951
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
1
0
0
1
798d2621ee7b1e2db86f8b2f4ca02a4a32de49fa
398
py
Python
binary_counting.py
Lioheart/python-samples
de5f82b20fa216178e0084b7693e42df4fcaf883
[ "Unlicense" ]
null
null
null
binary_counting.py
Lioheart/python-samples
de5f82b20fa216178e0084b7693e42df4fcaf883
[ "Unlicense" ]
null
null
null
binary_counting.py
Lioheart/python-samples
de5f82b20fa216178e0084b7693e42df4fcaf883
[ "Unlicense" ]
null
null
null
""" Przemienia liczbę na wartość binarną i zwraca sumę jedynek występującą w wartości binarnej Example: The binary representation of 1234 is 10011010010, so the function should return 5 in this case """ def countBits(n): # szybsza metoda # return bin(n).count("1") final = 0 for x in str(bin(n)): if x == '1': final += 1 return final print(countBits(1234))
26.533333
103
0.660804
58
398
4.534483
0.758621
0.030418
0
0
0
0
0
0
0
0
0
0.08
0.246231
398
15
104
26.533333
0.796667
0.590452
0
0
0
0
0.006452
0
0
0
0
0
0
1
0.142857
false
0
0
0
0.285714
0.142857
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
798d7fd532e84917e31256f8642a90e8a7bd3c0f
2,392
py
Python
cognite/transformations_cli/commands/deploy/transformation_types.py
cognitedata/transformations-cli
7466cd8dcb30829793e0026d0b2eae62c3df5f6b
[ "Apache-2.0" ]
1
2022-02-01T09:54:52.000Z
2022-02-01T09:54:52.000Z
cognite/transformations_cli/commands/deploy/transformation_types.py
cognitedata/transformations-cli
7466cd8dcb30829793e0026d0b2eae62c3df5f6b
[ "Apache-2.0" ]
19
2021-11-12T13:21:17.000Z
2022-03-28T14:34:00.000Z
cognite/transformations_cli/commands/deploy/transformation_types.py
cognitedata/transformations-cli
7466cd8dcb30829793e0026d0b2eae62c3df5f6b
[ "Apache-2.0" ]
null
null
null
from dataclasses import dataclass, field from enum import Enum from typing import List, Optional, Union class DestinationType(Enum): assets = "assets" timeseries = "timeseries" asset_hierarchy = "asset_hierarchy" events = "events" datapoints = "datapoints" string_datapoints = "string_datapoints" sequences = "sequences" files = "files" labels = "labels" relationships = "relationships" raw = "raw" data_sets = "data_sets" sequence_rows = "sequence_rows" alpha_data_model_instances = "alpha_data_model_instances" # Experimental feature class ActionType(Enum): create = "create" abort = "abort" update = "update" upsert = "upsert" delete = "delete" @dataclass class AuthConfig: api_key: Optional[str] client_id: Optional[str] client_secret: Optional[str] token_url: Optional[str] scopes: Optional[List[str]] cdf_project_name: Optional[str] audience: Optional[str] @dataclass class ReadWriteAuthentication: read: AuthConfig write: AuthConfig @dataclass class DestinationConfig: """ Valid type values are: assets, asset_hierarchy, events, timeseries, datapoints, string_datapoints, raw (needs database and table) """ type: DestinationType raw_database: Optional[str] = None raw_table: Optional[str] = None external_id: Optional[str] = None @dataclass class QueryConfig: file: str @dataclass class ScheduleConfig: interval: str is_paused: bool = False @dataclass class TransformationConfig: """ Master configuration class of a transformation """ external_id: str name: str query: Union[str, QueryConfig] authentication: Union[AuthConfig, ReadWriteAuthentication] schedule: Optional[Union[str, ScheduleConfig]] destination: Union[DestinationType, DestinationConfig] data_set_id: Optional[int] data_set_external_id: Optional[str] notifications: List[str] = field(default_factory=list) shared: bool = True ignore_null_fields: bool = True action: ActionType = ActionType.upsert legacy: bool = False class TransformationConfigError(Exception): """Exception raised for config parser Attributes: message -- explanation of the error """ def __init__(self, message: str): self.message = message super().__init__(self.message)
23.45098
133
0.701923
254
2,392
6.440945
0.440945
0.067237
0.047677
0.028117
0
0
0
0
0
0
0
0
0.210702
2,392
101
134
23.683168
0.866525
0.119565
0
0.086957
0
0
0.086048
0.01264
0
0
0
0
0
1
0.014493
false
0
0.043478
0
0.884058
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
1
0
0
1
798f90e5f4234e1706d47ac52ae151b26363ed30
105
py
Python
autoapi/__init__.py
lhfriedman/sphinx-autoapi
efe815c1df16a616b40b44cb679fda4a4eb7895a
[ "MIT" ]
null
null
null
autoapi/__init__.py
lhfriedman/sphinx-autoapi
efe815c1df16a616b40b44cb679fda4a4eb7895a
[ "MIT" ]
1
2020-07-31T01:19:04.000Z
2020-07-31T01:19:04.000Z
autoapi/__init__.py
lhfriedman/sphinx-autoapi
efe815c1df16a616b40b44cb679fda4a4eb7895a
[ "MIT" ]
null
null
null
""" Sphinx AutoAPI """ from .extension import setup from ._version import __version__, __version_info__
15
51
0.780952
12
105
6
0.666667
0
0
0
0
0
0
0
0
0
0
0
0.133333
105
6
52
17.5
0.791209
0.133333
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
798fc2e845f5bf9a57d8e39d930d847c7c266425
1,851
py
Python
org/openbaton/v2/events.py
openbaton/openbaton-cli
4d2f894c0b6d821fa41e03caca880467de90d8fe
[ "Apache-2.0" ]
2
2017-07-24T11:33:27.000Z
2020-05-09T00:21:49.000Z
org/openbaton/v2/events.py
openbaton/openbaton-cli
4d2f894c0b6d821fa41e03caca880467de90d8fe
[ "Apache-2.0" ]
5
2017-06-09T14:28:50.000Z
2018-07-05T10:46:40.000Z
org/openbaton/v2/events.py
openbaton/openbaton-cli
4d2f894c0b6d821fa41e03caca880467de90d8fe
[ "Apache-2.0" ]
2
2017-06-09T13:27:27.000Z
2017-07-18T21:01:59.000Z
import logging from org.openbaton.v2.cmd import BaseObCmd from org.openbaton.v2.utils import get_result_to_list, get_result_to_show, parse_path_or_json, result_to_str class Events(BaseObCmd): """Command to manage event endpoints: it is possible to: * show details of a specific event endpoint passing an id * list all saved event endpoints * delete a specific event endpoint passing an id * create a specific event endpoint passing a path to a file or directly the json content """ log = logging.getLogger(__name__) keys_to_list = ["id", "name", "description"] keys_to_exclude = [] def find(self, params): if not params: return "ERROR: missing <event-id>" _id = params[0] return result_to_str(get_result_to_show(self.app.ob_client.get_event(_id), excluded_keys=self.keys_to_exclude, _format=self.app.format)) def create(self, params): if not params: return "ERROR: missing <event> or <path-to-json>" event = parse_path_or_json(params[0]) return result_to_str(get_result_to_show(self.app.ob_client.create_event(event), excluded_keys=self.keys_to_exclude, _format=self.app.format)) def delete(self, params): if not params: return "ERROR: missing <event-id>" _id = params[0] self.app.ob_client.delete_event(_id) return "INFO: Deleted event with id %s" % _id def list(self, params=None): return result_to_str( get_result_to_list(self.app.ob_client.list_events(), keys=self.keys_to_list, _format=self.app.format), _format=self.app.format)
39.382979
114
0.611021
243
1,851
4.399177
0.288066
0.067353
0.05145
0.056127
0.449018
0.42189
0.42189
0.333957
0.333957
0.292797
0
0.003885
0.3047
1,851
46
115
40.23913
0.826729
0.15289
0
0.354839
0
0
0.089836
0
0
0
0
0
0
1
0.129032
false
0
0.096774
0.032258
0.580645
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
1
0
0
1
7990148f4b787430b9e80ecbae6f2daea018109e
6,390
py
Python
tests/bugs/gh_5995_test.py
FirebirdSQL/firebird-qa
96af2def7f905a06f178e2a80a2c8be4a4b44782
[ "MIT" ]
1
2022-02-05T11:37:13.000Z
2022-02-05T11:37:13.000Z
tests/bugs/gh_5995_test.py
FirebirdSQL/firebird-qa
96af2def7f905a06f178e2a80a2c8be4a4b44782
[ "MIT" ]
1
2021-09-03T11:47:00.000Z
2021-09-03T12:42:10.000Z
tests/bugs/gh_5995_test.py
FirebirdSQL/firebird-qa
96af2def7f905a06f178e2a80a2c8be4a4b44782
[ "MIT" ]
1
2021-06-30T14:14:16.000Z
2021-06-30T14:14:16.000Z
#coding:utf-8 # # id: bugs.gh_5995 # title: Connection to server may hang when working with encrypted databases over non-TCP protocol [CORE5730] # decription: # https://github.com/FirebirdSQL/firebird/issues/5995 # # Test implemented only to be run on Windows. # It assumes that there are files keyholder.dll and keyholder.conf in the %FIREBIRD_HOME%\\plugins dir. # These files were provided by IBSurgeon and added during fbt_run prepare phase by batch scenario (qa_rundaily). # File keyholder.conf initially contains several keys. # # If we make this file EMPTY then usage of XNET and WNET protocols became improssible before this ticket was fixed. # Great thanks to Alex for suggestions. # # Confirmed bug on 3.0.1.32609: ISQL hangs on attempt to connect to database when file plugins\\keyholder.conf is empty. # In order to properly finish test, we have to kill hanging ISQL and change DB state to full shutdown (with subsequent # returning it to online) - fortunately, connection using TCP remains avaliable in this case. # # Checked on: # 5.0.0.181 SS; 5.0.0.169 CS; # 4.0.1.2578 SS; 4.0.1.2578 CS; # 3.0.8.33489 SS; 3.0.8.33476 CS. # # tracker_id: # min_versions: ['3.0.4'] # versions: 3.0.4 # qmid: None import pytest from firebird.qa import db_factory, python_act, Action # version: 3.0.4 # resources: None substitutions_1 = [] init_script_1 = """""" db_1 = db_factory(sql_dialect=3, init=init_script_1) # test_script_1 #--- # # import os # import subprocess # from subprocess import Popen # import datetime # import time # import shutil # import re # import fdb # # os.environ["ISC_USER"] = user_name # os.environ["ISC_PASSWORD"] = user_password # engine = db_conn.engine_version # db_name = db_conn.database_name # db_conn.close() # # svc = fdb.services.connect(host='localhost', user=user_name, password=user_password) # FB_HOME = svc.get_home_directory() # svc.close() # # #-------------------------------------------- # # def flush_and_close( file_handle ): # # https://docs.python.org/2/library/os.html#os.fsync # # If you're starting with a Python file object f, # # first do f.flush(), and # # then do os.fsync(f.fileno()), to ensure that all internal buffers associated with f are written to disk. # global os # # file_handle.flush() # if file_handle.mode not in ('r', 'rb') and file_handle.name != os.devnull: # # otherwise: "OSError: [Errno 9] Bad file descriptor"! # os.fsync(file_handle.fileno()) # file_handle.close() # # #-------------------------------------------- # # def cleanup( f_names_list ): # global os # for i in range(len( f_names_list )): # if type(f_names_list[i]) == file: # del_name = f_names_list[i].name # elif type(f_names_list[i]) == str: # del_name = f_names_list[i] # else: # print('Unrecognized type of element:', f_names_list[i], ' - can not be treated as file.') # print('type(f_names_list[i])=',type(f_names_list[i])) # del_name = None # # if del_name and os.path.isfile( del_name ): # os.remove( del_name ) # # #-------------------------------------------- # # # dts = datetime.datetime.now().strftime("%y%m%d_%H%M%S") # # kholder_cur = os.path.join( FB_HOME, 'plugins', 'keyholder.conf') # kholder_bak = os.path.join( context['temp_directory'], 'keyholder'+dts+'.bak') # # shutil.copy2( kholder_cur, kholder_bak) # # # Make file %FB_HOME%\\plugins\\keyholder.conf empty: # with open(kholder_cur,'w') as f: # pass # # MAX_SECONDS_TO_WAIT = 3 # # # Trying to establish connection to database using WNET and XNET protocols. # # Async. launch of ISQL with check that it will finished within some reasonable time (and w/o errors). # # If it will hang - kill (this is bug dexcribed in the ticket) # for p in ('wnet', 'xnet'): # f_isql_sql=open(os.path.join(context['temp_directory'],'tmp_gh_5995.'+p+'.sql'),'w') # f_isql_sql.write('set list on; select mon$remote_protocol from mon$attachments where mon$attachment_id = current_connection;') # flush_and_close( f_isql_sql ) # # protocol_conn_string = ''.join( (p, '://', db_name) ) # f_isql_log=open( os.path.join(context['temp_directory'],'tmp_gh_5995.'+p+'.log'), 'w') # p_isql = Popen([ context['isql_path'], protocol_conn_string, "-i", f_isql_sql.name], stdout=f_isql_log, stderr=subprocess.STDOUT ) # # time.sleep(0.2) # for i in range(0,MAX_SECONDS_TO_WAIT): # # Check if child process has terminated. Set and return returncode attribute. Otherwise, returns None. # p_isql.poll() # if p_isql.returncode is None: # # A None value indicates that the process has not terminated yet. # time.sleep(1) # if i < MAX_SECONDS_TO_WAIT-1: # continue # else: # f_isql_log.write( '\\nISQL process %d hangs for %d seconds and is forcedly killed.' % (p_isql.pid, MAX_SECONDS_TO_WAIT) ) # p_isql.terminate() # # flush_and_close(f_isql_log) # # with open(f_isql_log.name,'r') as f: # for line in f: # if line: # print(line) # # cleanup((f_isql_sql,f_isql_log)) # # shutil.move( kholder_bak, kholder_cur) # # # ::: NOTE ::: We have to change DB state to full shutdown and bring it back online # # in order to prevent "Object in use" while fbtest will try to drop this DB # ##################################### # runProgram('gfix',[dsn,'-shut','full','-force','0']) # runProgram('gfix',[dsn,'-online']) # # #--- act_1 = python_act('db_1', substitutions=substitutions_1) expected_stdout_1 = """ MON$REMOTE_PROTOCOL WNET MON$REMOTE_PROTOCOL XNET """ @pytest.mark.version('>=3.0.4') @pytest.mark.platform('Windows') @pytest.mark.xfail def test_1(act_1: Action): pytest.fail("Test not IMPLEMENTED")
38.035714
140
0.596557
873
6,390
4.201604
0.367698
0.014995
0.024537
0.020992
0.097056
0.056707
0.023991
0.023991
0.023991
0.023991
0
0.022133
0.25759
6,390
167
141
38.263473
0.751054
0.869327
0
0
0
0
0.189953
0
0
0
0
0
0
1
0.066667
false
0
0.133333
0
0.2
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
7990913dfb319b60a3a689bb4ec8e33cb489297d
10,799
py
Python
mednickdb_pyapi/test_mednickdb_usecases.py
MednickLab/python_module
818763a70d1058e72ddecfea7e07b88e42b39f3b
[ "MIT" ]
null
null
null
mednickdb_pyapi/test_mednickdb_usecases.py
MednickLab/python_module
818763a70d1058e72ddecfea7e07b88e42b39f3b
[ "MIT" ]
null
null
null
mednickdb_pyapi/test_mednickdb_usecases.py
MednickLab/python_module
818763a70d1058e72ddecfea7e07b88e42b39f3b
[ "MIT" ]
1
2018-12-06T21:51:22.000Z
2018-12-06T21:51:22.000Z
from mednickdb_pyapi.mednickdb_pyapi import MednickAPI import pytest import time import pandas as pd import pprint pp = pprint.PrettyPrinter(indent=4) server_address = 'http://saclab.ss.uci.edu:8000' file_update_time = 2 data_update_time = 10 data_upload_working = False def dict_issubset(superset, subset, show_diffs=False): if show_diffs: return [item for item in subset.items() if item not in superset.items()] return all(item in superset.items() for item in subset.items()) def pytest_namespace(): return {'usecase_1_filedata': None} def test_clear_test_study(): """ Clear all data and files with the studyid of "TEST". This esentually refreshes the database for new testing. """ med_api = MednickAPI(server_address, 'test_grad_account@uci.edu', 'Pass1234') fids = med_api.extract_var(med_api.get_files(studyid='TEST'), '_id') if fids: for fid in fids: med_api.delete_file(fid, delete_all_versions=True) med_api.delete_data_from_single_file(fid) fids2 = med_api.extract_var(med_api.get_files(studyid='TEST'),'_id') assert fid not in fids2 assert (fids2 == []) deleted_fids = med_api.extract_var(med_api.get_deleted_files(),'_id') assert all([dfid in deleted_fids for dfid in fids]) med_api.delete_data(studyid='TEST') assert len(med_api.get_data(studyid='TEST', format='nested_dict')) == 0 #TODO after clearing up sourceid bug @pytest.mark.dependency(['test_clear_test_study']) def test_usecase_1(): """runs usecase one from the mednickdb_usecase document (fid=)""" #a) med_api = MednickAPI(server_address, 'test_ra_account@uci.edu', 'pass1234') file_info_post = { 'fileformat':'eeg', 'studyid':'TEST', 'versionid':1, 'subjectid':1, 'visitid':1, 'sessionid':1, 'filetype':'sleep_eeg', } file_data_real = file_info_post.copy() with open('testfiles/sleepfile1.edf','rb') as sleepfile: file_info_returned = med_api.upload_file(fileobject=sleepfile, **file_info_post) with open('testfiles/sleepfile1.edf', 'rb') as sleepfile: downloaded_sleepfile = med_api.download_file(file_info_returned['_id']) assert (downloaded_sleepfile == sleepfile.read()) # b) time.sleep(file_update_time) # give db 5 seconds to update file_info_get = med_api.get_file_by_fid(file_info_returned['_id']) file_info_post.update({'filename': 'sleepfile1.edf', 'filedir': 'uploads/TEST/1/1/1/1/sleep_eeg/'}) assert dict_issubset(file_info_get, file_info_post) time.sleep(data_update_time-file_update_time) # give db 5 seconds to update file_datas = med_api.get_data_from_single_file(filetype='sleep_eeg', fid=file_info_returned['_id'], format='flat_dict') file_data_real.pop('fileformat') file_data_real.pop('filetype') file_data_real.update({'sleep_eeg.eeg_nchan': 3, 'sleep_eeg.eeg_sfreq':128, 'sleep_eeg.eeg_meas_date':1041380737000, 'sleep_eeg.eeg_ch_names': ['C3A2', 'C4A1', 'ECG']}) # add actual data in file. # TODO add all pytest.usecase_1_filedata = file_data_real pytest.usecase_1_filename_version = file_info_get['filename_version'] assert(any([dict_issubset(file_data, file_data_real) for file_data in file_datas])), "Is pyparse running? (and working)" @pytest.mark.dependency(['test_usecase_1']) def test_usecase_2(): # a) file_info_post = {'filetype':'demographics', 'fileformat':'tabular', 'studyid':'TEST', 'versionid':1} med_api = MednickAPI(server_address, 'test_grad_account@uci.edu', 'Pass1234') with open('testfiles/TEST_Demographics.xlsx', 'rb') as demofile: # b) file_info = med_api.upload_file(fileobject=demofile, **file_info_post) fid = file_info['_id'] downloaded_demo = med_api.download_file(fid) with open('testfiles/TEST_Demographics.xlsx', 'rb') as demofile: assert downloaded_demo == demofile.read() # c) time.sleep(file_update_time) # Give file db 5 seconds to update file_info_post.update({'filename': 'TEST_Demographics.xlsx', 'filedir': 'uploads/TEST/1/demographics/'}) file_info_get = med_api.get_file_by_fid(fid) assert dict_issubset(file_info_get, file_info_post) # d) time.sleep(data_update_time-file_update_time) # Give data db 50 seconds to update data_rows = med_api.get_data(studyid='TEST', versionid=1, format='flat_dict') correct_row1 = {'studyid': 'TEST', 'versionid': 1, 'subjectid': 1, 'demographics.age': 23, 'demographics.sex': 'F', 'demographics.bmi': 23} correct_row1.update(pytest.usecase_1_filedata) correct_row2 = {'studyid': 'TEST', 'versionid': 1, 'subjectid': 2, 'demographics.age': 19, 'demographics.sex': 'M', 'demographics.bmi': 20} correct_rows = [correct_row1, correct_row2] pytest.usecase_2_row1 = correct_row1 pytest.usecase_2_row2 = correct_row2 pytest.usecase_2_filename_version = file_info_get['filename_version'] for correct_row in correct_rows: assert any([dict_issubset(data_row, correct_row) for data_row in data_rows]), "demographics data downloaded does not match expected" # e) data_sleep_eeg = med_api.get_data(studyid='TEST', versionid=1, filetype='sleep_eeg')[0] #FIXME will fail here until filetype is query-able assert dict_issubset(data_sleep_eeg, pytest.usecase_1_filedata), "sleep data downloaded does not match what was uploaded in usecase 1" @pytest.mark.dependency(['test_usecase_2']) def test_usecase_3(): # a) med_api = MednickAPI(server_address, 'test_ra_account@uci.edu', 'Pass1234') fid_for_manual_upload = med_api.extract_var(med_api.get_files(studyid='TEST'), '_id')[0] # get a random fid data_post = {'studyid': 'TEST', 'filetype': 'memtesta', 'data': {'accuracy': 0.9}, 'versionid': 1, 'subjectid': 2, 'visitid': 1, 'sessionid': 1} med_api.upload_data(**data_post, fid=fid_for_manual_upload) # b) time.sleep(5) # Give db 5 seconds to update correct_filename_versions = [pytest.usecase_1_filename_version, pytest.usecase_2_filename_version] filename_versions = med_api.extract_var(med_api.get_files(studyid='TEST', versionid=1), 'filename_version') assert all([fid in correct_filename_versions for fid in filename_versions]), "Missing expected filename versions from two previous usecases" # c) time.sleep(5) # Give db 5 seconds to update data_rows = med_api.get_data(studyid='TEST', versionid=1, format='flat_dict') correct_row_2 = pytest.usecase_2_row2.copy() correct_row_2.update({'memtesta.accuracy': 0.9, 'visitid': 1}) pytest.usecase_3_row2 = correct_row_2 correct_rows = [pytest.usecase_2_row1, correct_row_2] for correct_row in correct_rows: assert any([dict_issubset(data_row, correct_row) for data_row in data_rows]) @pytest.mark.dependency(['test_usecase_3']) def test_usecase_4(): # a) med_api = MednickAPI(server_address, 'test_grad_account@uci.edu', 'Pass1234') # b) uploading some scorefiles file_info1_post = { 'fileformat':'sleep_scoring', 'studyid':'TEST', 'versionid':1, 'subjectid':2, 'visitid':1, 'sessionid':1, 'filetype':'sleep_scoring' } with open('testfiles/scorefile1.mat', 'rb') as scorefile1: fid1 = med_api.upload_file(scorefile1, **file_info1_post) file_info2_post = file_info1_post.copy() file_info2_post.update({'visitid':2}) with open('testfiles/scorefile2.mat', 'rb') as scorefile2: fid2 = med_api.upload_file(scorefile2, **file_info2_post) scorefile1_data = {'sleep_scoring.epochstage': [-1, -1, -1, 0, 0, 0, 0, 0, 0, 0], 'sleep_scoring.epochoffset': [0, 30, 60, 90, 120, 150, 180, 210, 240, 270], 'sleep_scoring.starttime': 1451635302000, 'sleep_scoring.mins_in_0': 3.5, 'sleep_scoring.mins_in_1': 0, 'sleep_scoring.mins_in_2': 0, 'sleep_scoring.mins_in_3': 0, 'sleep_scoring.mins_in_4': 0, 'sleep_scoring.sleep_efficiency': 0, 'sleep_scoring.total_sleep_time': 0} scorefile2_data = {'sleep_scoring.epochstage': [0, 0, 1, 1, 2, 2, 3, 3, 2, 2], 'sleep_scoring.epochoffset': [0, 30, 60, 90, 120, 150, 180, 210, 240, 270], 'sleep_scoring.starttime': 1451635302000, 'sleep_scoring.mins_in_0': 1, 'sleep_scoring.mins_in_1': 1, 'sleep_scoring.mins_in_2': 2, 'sleep_scoring.mins_in_3': 1, 'sleep_scoring.mins_in_4': 0, 'sleep_scoring.sleep_efficiency': 0.8, 'sleep_scoring.total_sleep_time': 4} # c) time.sleep(data_update_time) # Give db 50 seconds to update data_rows = med_api.get_data(studyid='TEST', versionid=1, format='flat_dict') correct_row_1 = pytest.usecase_2_row1.copy() scorefile1_data.update(pytest.usecase_3_row2) correct_row_2 = scorefile1_data scorefile2_data.update(pytest.usecase_2_row2) correct_row_3 = scorefile2_data correct_rows = [correct_row_1, correct_row_2, correct_row_3] for correct_row in correct_rows: assert any([dict_issubset(data_row, correct_row) for data_row in data_rows]) pytest.usecase_4_row1 = correct_row_1 pytest.usecase_4_row2 = correct_row_2 pytest.usecase_4_row3 = correct_row_3 @pytest.mark.dependency(['test_usecase_4']) def test_usecase_5(): # a) med_api = MednickAPI(server_address, 'test_grad_account@uci.edu', 'Pass1234') data_rows = med_api.get_data(query='studyid=TEST and data.memtesta.accuracy>=0.9', format='flat_dict') assert any([dict_issubset(data_row, pytest.usecase_3_row2) for data_row in data_rows]) def test_get_specifiers(): med_api = MednickAPI(server_address, 'test_grad_account@uci.edu', 'Pass1234') sids = med_api.get_unique_var_values('studyid', store='data') assert 'TEST' in sids vids = med_api.get_unique_var_values('versionid', studyid='TEST', store='data') assert vids == [1] sids = med_api.get_unique_var_values('subjectid', studyid='TEST', store='data') assert sids == [1, 2] vids = med_api.get_unique_var_values('visitid', studyid='TEST', store='data') assert vids == [1, 2] sids = med_api.get_unique_var_values('sessionid', studyid='TEST', store='data') assert sids == [1] filetypes = med_api.get_unique_var_values('filetype', studyid='TEST', store='data') assert set(filetypes) == {'sleep_eeg', 'sleep_scoring', 'demographics', 'memtesta'}
43.720648
215
0.682471
1,506
10,799
4.580345
0.158035
0.036532
0.026095
0.030444
0.530444
0.403015
0.358365
0.296028
0.26283
0.212525
0
0.037883
0.193351
10,799
246
216
43.898374
0.753989
0.054079
0
0.224719
0
0
0.227849
0.09824
0
0
0
0.004065
0.117978
1
0.050562
false
0.039326
0.02809
0.005618
0.095506
0.011236
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
79941e8098ddb85ae7adb492cd1a81eb3262f856
4,078
py
Python
sslcommerz_sdk/store.py
monim67/sslcommerz-sdk
77219fc90ab12222df2c03abc95c8d2b19768eeb
[ "MIT" ]
6
2021-01-15T13:31:37.000Z
2021-12-06T13:44:39.000Z
sslcommerz_sdk/store.py
monim67/sslcommerz-sdk
77219fc90ab12222df2c03abc95c8d2b19768eeb
[ "MIT" ]
null
null
null
sslcommerz_sdk/store.py
monim67/sslcommerz-sdk
77219fc90ab12222df2c03abc95c8d2b19768eeb
[ "MIT" ]
null
null
null
import requests from .enums import TransactionStatus from .exceptions import InvalidPaymentException, SslcommerzAPIException from .services import PayloadSchema, is_verify_sign_valid DEFAULT_CONFIG = { "base_url": "https://sandbox.sslcommerz.com", "session_url": "/gwprocess/v4/api.php", "validation_url": "/validator/api/validationserverAPI.php", "transaction_url": "/validator/api/merchantTransIDvalidationAPI.php", } class SslcommerzStore: def __init__(self, store_id, store_passwd, **kwargs): self.id = store_id self.credentials = dict(store_id=store_id, store_passwd=store_passwd) self.config = {**DEFAULT_CONFIG, **kwargs} def request(self, method, url, **kwargs): url = self.config["base_url"] + url return requests.request(method, url, **kwargs) def create_session(self, **kwargs): response = self.request( method="POST", url=self.config["session_url"], data={**self.credentials, **kwargs}, ) if response.status_code != 200: raise SslcommerzAPIException( f"Unexpected status code: {response.status_code}" ) response_json = response.json() if response_json["status"] != "SUCCESS": raise SslcommerzAPIException(f"Error: {response_json['failedreason']}") return response_json def validate_ipn_payload(self, payload): try: if not is_verify_sign_valid( store_passwd=self.credentials["store_passwd"], payload=payload["original"], ): raise InvalidPaymentException("verify_sign mismatch") if payload["status"] == TransactionStatus.VALID: validation_response = self.validate_transaction(payload["val_id"]) if validation_response["status"] not in ( TransactionStatus.VALID, TransactionStatus.VALIDATED, ): raise InvalidPaymentException( f"Payment status: {validation_response['status']}" ) return PayloadSchema().load(validation_response) except KeyError as key: raise InvalidPaymentException(f"{key} is missing in payload") from key def validate_transaction(self, val_id): response = self.request( method="GET", url=self.config["validation_url"], params=dict(**self.credentials, val_id=val_id, format="json"), ) if response.status_code != 200: raise SslcommerzAPIException( f"Unexpected status code: {response.status_code}" ) return response.json() def query_transaction_by_sessionkey(self, sessionkey): response = self.request( method="GET", url=self.config["transaction_url"], params=dict(**self.credentials, sessionkey=sessionkey, format="json"), ) return response.json() def query_transaction_by_tran_id(self, tran_id): response = self.request( method="GET", url=self.config["transaction_url"], params=dict(**self.credentials, tran_id=tran_id, format="json"), ) return response.json() def init_refund(self, bank_tran_id, refund_amount, refund_remarks): response = self.request( method="GET", url=self.config["transaction_url"], params=dict( **self.credentials, bank_tran_id=bank_tran_id, refund_amount=refund_amount, refund_remarks=refund_remarks, format="json", ), ) return response.json() def query_refund_status(self, refund_ref_id): response = self.request( method="GET", url=self.config["transaction_url"], params=dict(**self.credentials, refund_ref_id=refund_ref_id, format="json"), ) return response.json()
37.072727
88
0.601766
402
4,078
5.893035
0.223881
0.050654
0.038413
0.063318
0.344449
0.33263
0.282398
0.230055
0.230055
0.211904
0
0.002424
0.29181
4,078
109
89
37.412844
0.817867
0
0
0.291667
0
0
0.144924
0.051986
0
0
0
0
0
1
0.09375
false
0.03125
0.041667
0
0.229167
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
7994462d7302f56ae3248adc4074cec0dff871a2
1,636
py
Python
nobrace/converter.py
iblis17/nobrace
7333029c6cd5f2a885614b5fe64f6c85ee5f296d
[ "MIT" ]
2
2015-07-13T09:08:53.000Z
2017-05-22T07:56:29.000Z
nobrace/converter.py
iblis17/nobrace
7333029c6cd5f2a885614b5fe64f6c85ee5f296d
[ "MIT" ]
null
null
null
nobrace/converter.py
iblis17/nobrace
7333029c6cd5f2a885614b5fe64f6c85ee5f296d
[ "MIT" ]
null
null
null
import abc import re from .exceptions import FileSuffixError from .stack import LineCounter, IndentStack class ConverterBase(metaclass=abc.ABCMeta): def __init__(self, src: str): self.src = src self.code_blocks @property def code_blocks(self): ''' Aggregate code block into tuple. A code block could be determined by intentation. ''' indent_stack = IndentStack(['']) blankline_stack = LineCounter() def complete_brace(indent, cur_indent): if indent == cur_indent: print('\n' * blankline_stack.pop(cur_indent, 0)) return if len(indent) > len(cur_indent): print(indent_stack.push(indent)) elif len(indent) < len(cur_indent): print(indent_stack.pop()) else: print('\n' * blankline_stack.pop(cur_indent, 0)) try: complete_brace(indent, indent_stack.top) except IndexError: return for line in self.src.split('\n'): indent_match = re.match('^([ \t]+)[\S]+', line) cur_indent = indent_stack[-1] if indent_match: indent = indent_match.group(1) complete_brace(indent, cur_indent) print(line, sep=', ') else: indent = None if cur_indent: blankline_stack.push(cur_indent) else: print(line) del line # handle eol print('{}}}'.format(indent_stack[-2]))
27.728814
64
0.525672
170
1,636
4.870588
0.388235
0.108696
0.067633
0.05314
0.236715
0.169082
0.169082
0.169082
0
0
0
0.004888
0.374694
1,636
58
65
28.206897
0.804497
0.056846
0
0.170732
0
0
0.017207
0
0
0
0
0
0
1
0.073171
false
0
0.097561
0
0.243902
0.170732
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
7994c8744e38290defb02afb422aa332b6ee9f48
541
py
Python
test/cv2countours.py
taiwc/raspwww
835befaa9255fe53b7ce97b50f9d825191979eae
[ "Apache-2.0" ]
null
null
null
test/cv2countours.py
taiwc/raspwww
835befaa9255fe53b7ce97b50f9d825191979eae
[ "Apache-2.0" ]
null
null
null
test/cv2countours.py
taiwc/raspwww
835befaa9255fe53b7ce97b50f9d825191979eae
[ "Apache-2.0" ]
null
null
null
# import the necessary packages import numpy as np import argparse import cv2 im = cv2.imread('/var/www/test/test.jpg') cv2.imshow("im", im) imgray = cv2.cvtColor(im,cv2.COLOR_BGR2GRAY) ret,thresh = cv2.threshold(imgray,127,255,0) cv2.imshow("Thresh", thresh) (cnts, _) = cv2.findContours(thresh,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE) #(cnts, _) = cv2.findContours(im.copy(), cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE) cv2.drawContours(im,cnts,-1,(0,255,0),3) cv2.drawContours(im,cnts,-1,(0,255,0),-1) cv2.imshow("Image",im) cv2.waitKey(0)
31.823529
83
0.744917
91
541
4.32967
0.43956
0.038071
0.096447
0.101523
0.137056
0.137056
0.137056
0.137056
0
0
0
0.07984
0.073937
541
16
84
33.8125
0.706587
0.205176
0
0
0
0
0.081967
0.051522
0
0
0
0
0
1
0
false
0
0.230769
0
0.230769
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
79954f94e96d2ea0202820bdc5e93050e74cbb64
810
py
Python
migrations/versions/schema/783682226c9b_.py
Georgi2704/pricelist-fastapi-boilerplate
24b88e1f5c28b7eaff50745cd4464caac6de01e6
[ "Apache-2.0" ]
null
null
null
migrations/versions/schema/783682226c9b_.py
Georgi2704/pricelist-fastapi-boilerplate
24b88e1f5c28b7eaff50745cd4464caac6de01e6
[ "Apache-2.0" ]
2
2021-11-11T15:19:30.000Z
2022-02-07T22:52:07.000Z
migrations/versions/schema/783682226c9b_.py
Georgi2704/pricelist-fastapi
24b88e1f5c28b7eaff50745cd4464caac6de01e6
[ "Apache-2.0" ]
null
null
null
"""empty message Revision ID: 783682226c9b Revises: b882b9ab026c Create Date: 2019-10-19 10:07:14.923441 """ import sqlalchemy as sa from alembic import op # revision identifiers, used by Alembic. revision = "783682226c9b" down_revision = "b882b9ab026c" branch_labels = None depends_on = None def upgrade(): # ### commands auto generated by Alembic - please adjust! ### op.alter_column( "prices", "internal_product_id", existing_type=sa.INTEGER(), type_=sa.String(), existing_nullable=True ) # ### end Alembic commands ### def downgrade(): # ### commands auto generated by Alembic - please adjust! ### op.alter_column( "prices", "internal_product_id", existing_type=sa.String(), type_=sa.INTEGER(), existing_nullable=True ) # ### end Alembic commands ###
25.3125
110
0.693827
98
810
5.581633
0.520408
0.043876
0.076782
0.084095
0.47532
0.47532
0.33638
0.33638
0.33638
0.33638
0
0.081203
0.179012
810
31
111
26.129032
0.741353
0.364198
0
0.142857
0
0
0.154812
0
0
0
0
0
0
1
0.142857
false
0
0.142857
0
0.285714
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
7996085f7d2fbdd5bdf9904904104dd2b1da88ab
645
py
Python
backend/pages/migrations/0002_add_streamfield_option.py
nicoepp/the-prayer-walk
6c8217c33f399cfe46dc23075e13ca9464079cae
[ "MIT" ]
null
null
null
backend/pages/migrations/0002_add_streamfield_option.py
nicoepp/the-prayer-walk
6c8217c33f399cfe46dc23075e13ca9464079cae
[ "MIT" ]
null
null
null
backend/pages/migrations/0002_add_streamfield_option.py
nicoepp/the-prayer-walk
6c8217c33f399cfe46dc23075e13ca9464079cae
[ "MIT" ]
null
null
null
# Generated by Django 3.1.4 on 2021-02-04 05:25 from django.db import migrations import wagtail.core.blocks import wagtail.core.fields class Migration(migrations.Migration): dependencies = [ ('pages', '0001_add_homepage'), ] operations = [ migrations.AlterField( model_name='homepage', name='body', field=wagtail.core.fields.StreamField([('title', wagtail.core.blocks.CharBlock(form_classname='title', required=False)), ('paragraph', wagtail.core.blocks.TextBlock(form_classname='full')), ('rich', wagtail.core.blocks.RichTextBlock(form_classname='full'))]), ), ]
30.714286
271
0.669767
73
645
5.835616
0.616438
0.15493
0.159624
0
0
0
0
0
0
0
0
0.03619
0.186047
645
20
272
32.25
0.775238
0.069767
0
0
1
0
0.108696
0
0
0
0
0
0
1
0
false
0
0.214286
0
0.428571
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
79973a1e08549411e3704f4e9cd2dd372854a94e
2,800
py
Python
pysnmp/HPN-ICF-FCOE-MODE-MIB.py
agustinhenze/mibs.snmplabs.com
1fc5c07860542b89212f4c8ab807057d9a9206c7
[ "Apache-2.0" ]
11
2021-02-02T16:27:16.000Z
2021-08-31T06:22:49.000Z
pysnmp/HPN-ICF-FCOE-MODE-MIB.py
agustinhenze/mibs.snmplabs.com
1fc5c07860542b89212f4c8ab807057d9a9206c7
[ "Apache-2.0" ]
75
2021-02-24T17:30:31.000Z
2021-12-08T00:01:18.000Z
pysnmp/HPN-ICF-FCOE-MODE-MIB.py
agustinhenze/mibs.snmplabs.com
1fc5c07860542b89212f4c8ab807057d9a9206c7
[ "Apache-2.0" ]
10
2019-04-30T05:51:36.000Z
2022-02-16T03:33:41.000Z
# # PySNMP MIB module HPN-ICF-FCOE-MODE-MIB (http://snmplabs.com/pysmi) # ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/HPN-ICF-FCOE-MODE-MIB # Produced by pysmi-0.3.4 at Mon Apr 29 19:26:43 2019 # On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4 # Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15) # ObjectIdentifier, OctetString, Integer = mibBuilder.importSymbols("ASN1", "ObjectIdentifier", "OctetString", "Integer") NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues") SingleValueConstraint, ConstraintsUnion, ValueRangeConstraint, ConstraintsIntersection, ValueSizeConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "SingleValueConstraint", "ConstraintsUnion", "ValueRangeConstraint", "ConstraintsIntersection", "ValueSizeConstraint") hpnicfCommon, = mibBuilder.importSymbols("HPN-ICF-OID-MIB", "hpnicfCommon") ModuleCompliance, NotificationGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ModuleCompliance", "NotificationGroup") MibIdentifier, Integer32, IpAddress, Bits, ModuleIdentity, Counter32, Unsigned32, TimeTicks, Counter64, MibScalar, MibTable, MibTableRow, MibTableColumn, ObjectIdentity, NotificationType, Gauge32, iso = mibBuilder.importSymbols("SNMPv2-SMI", "MibIdentifier", "Integer32", "IpAddress", "Bits", "ModuleIdentity", "Counter32", "Unsigned32", "TimeTicks", "Counter64", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "ObjectIdentity", "NotificationType", "Gauge32", "iso") DisplayString, TextualConvention = mibBuilder.importSymbols("SNMPv2-TC", "DisplayString", "TextualConvention") hpnicfFcoeMode = ModuleIdentity((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 2, 135)) hpnicfFcoeMode.setRevisions(('2013-03-08 11:00',)) if mibBuilder.loadTexts: hpnicfFcoeMode.setLastUpdated('201303081100Z') if mibBuilder.loadTexts: hpnicfFcoeMode.setOrganization('') hpnicfFcoeModeMibObjects = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 2, 135, 1)) hpnicfFcoeModeCfgMode = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 2, 135, 1, 1), Integer32()).setMaxAccess("readwrite") if mibBuilder.loadTexts: hpnicfFcoeModeCfgMode.setStatus('current') hpnicfFcoeModeCfgLastResult = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 2, 135, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("success", 1), ("noLicence", 2), ("needReset", 3), ("unknownFault", 4)))).setMaxAccess("readonly") if mibBuilder.loadTexts: hpnicfFcoeModeCfgLastResult.setStatus('current') mibBuilder.exportSymbols("HPN-ICF-FCOE-MODE-MIB", PYSNMP_MODULE_ID=hpnicfFcoeMode, hpnicfFcoeModeCfgLastResult=hpnicfFcoeModeCfgLastResult, hpnicfFcoeModeMibObjects=hpnicfFcoeModeMibObjects, hpnicfFcoeMode=hpnicfFcoeMode, hpnicfFcoeModeCfgMode=hpnicfFcoeModeCfgMode)
112
477
0.777143
299
2,800
7.270903
0.41806
0.074057
0.00552
0.00736
0.313707
0.199172
0.199172
0.199172
0.199172
0.199172
0
0.072009
0.0775
2,800
24
478
116.666667
0.769648
0.12
0
0
0
0
0.238289
0.026477
0
0
0
0
0
1
0
false
0
0.411765
0
0.411765
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
2
799b0a8db84df97948487d6c7aed90ab475e0d53
1,212
py
Python
simulations/yml_to_df.py
danibachar/Kube-Load-Balancing
8b9ea68ddbb46cc730a02ffe30cc68b3d65ca491
[ "MIT" ]
null
null
null
simulations/yml_to_df.py
danibachar/Kube-Load-Balancing
8b9ea68ddbb46cc730a02ffe30cc68b3d65ca491
[ "MIT" ]
null
null
null
simulations/yml_to_df.py
danibachar/Kube-Load-Balancing
8b9ea68ddbb46cc730a02ffe30cc68b3d65ca491
[ "MIT" ]
null
null
null
import argparse import pandas as pd from config_builder import build_config from utils.helpers import load_ymal def app_dep_graph(yml): nodes = [] source = [] target = [] print(yml) for svc_name, service in yml["services"].items(): print(service) nodes.append(svc_name) for dep in service["dependencies"].values(): source.append(svc_name) target.append(dep["name"]) edges = pd.DataFrame({'source': source, 'target': target, }) return edges if __name__ == '__main__': parser = argparse.ArgumentParser(description='Run Kuberentes simulation') parser.add_argument( '--config_file_name', type=str, default="yamls/configurations/simple_run.yml", help='A configuration file that describe the test' ) args = parser.parse_args() config_file_name = args.config_file_name config = build_config(config_file_name) apps = config["simulation_ymals"]["apps"] for app_file in apps: app_name = app_file.split("/")[-1].split(".")[0] yml = load_ymal(app_file) graph = app_dep_graph(yml) graph.to_csv("{}.csv".format(app_name))
28.857143
77
0.632013
149
1,212
4.885906
0.463087
0.054945
0.076923
0.038462
0
0
0
0
0
0
0
0.002191
0.2467
1,212
41
78
29.560976
0.795181
0
0
0
0
0
0.159241
0.028878
0
0
0
0
0
1
0.028571
false
0
0.114286
0
0.171429
0.057143
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
799b58e5ba21ca4170d1a2c3abadcfc25ef8f189
1,896
py
Python
app/core/tests/test_admin.py
wanqian622/recipe-app-api
040eade8c6636347b6928b6deef7dfc3eaebc6f4
[ "MIT" ]
null
null
null
app/core/tests/test_admin.py
wanqian622/recipe-app-api
040eade8c6636347b6928b6deef7dfc3eaebc6f4
[ "MIT" ]
null
null
null
app/core/tests/test_admin.py
wanqian622/recipe-app-api
040eade8c6636347b6928b6deef7dfc3eaebc6f4
[ "MIT" ]
null
null
null
from django.test import TestCase, Client from django.contrib.auth import get_user_model # generate url for our django admin page from django.urls import reverse # allow us to make test requests to our app class AdminSiteTests(TestCase): # the set up test is a function run before every test that we run def setUp(self): # our setUp is going to consist of creating our test Client # add a new user that we can use to test # and make sure the user is loged into our client self.client = Client() self.admin_user = get_user_model().objects.create_superuser( email='admin.@gmail.com', password='password123' ) # Use the client help function that allows us to log a user in # with the Django auth self.client.force_login(self.admin_user) self.user = get_user_model().objects.create_user( email="test@gmail.com", password="password123", name="Test" ) # test the users are listed in our django admin def test_users_listed(self): """Test that users are listed on user page""" # generate a url for our listed user page url = reverse('admin:core_user_changelist') # perform http get on the url res = self.client.get(url) self.assertContains(res, self.user.name) self.assertContains(res, self.user.email) def test_user_change_page(self): """Test that the user edit page works""" url = reverse('admin:core_user_change', args=[self.user.id]) # admin/core/user/id res = self.client.get(url) self.assertEqual(res.status_code, 200) def test_create_user_page(self): """Test that create user page works""" url = reverse('admin:core_user_add') res = self.client.get(url) self.assertEqual(res.status_code, 200)
37.92
70
0.647152
270
1,896
4.444444
0.325926
0.041667
0.043333
0.0475
0.268333
0.204167
0.136667
0.083333
0.083333
0.083333
0
0.008639
0.267405
1,896
49
71
38.693878
0.855292
0.323312
0
0.172414
1
0
0.09793
0.038217
0
0
0
0
0.137931
1
0.137931
false
0.068966
0.103448
0
0.275862
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
null
0
0
0
0
0
0
0
1
0
0
0
0
0
2
799b9f43022008e2e03ab6f0fb315d88d195a221
348
py
Python
by-session/class-921/week2/poly.py
amiraliakbari/sharif-mabani-python
5d14a08d165267fe71c28389ddbafe29af7078c5
[ "MIT" ]
2
2015-04-29T20:59:35.000Z
2018-09-26T13:33:43.000Z
by-session/class-921/week2/poly.py
amiraliakbari/sharif-mabani-python
5d14a08d165267fe71c28389ddbafe29af7078c5
[ "MIT" ]
null
null
null
by-session/class-921/week2/poly.py
amiraliakbari/sharif-mabani-python
5d14a08d165267fe71c28389ddbafe29af7078c5
[ "MIT" ]
null
null
null
import turtle def circle(): while turtle.heading() < 359: turtle.forward(1) turtle.left(1) turtle.left(1) def poly(r, teta): n = 360 / teta while n > 0: n = n - 1 turtle.forward(r) turtle.left(teta) n = 10 while n > 0: n = n - 1 poly(10, 30) turtle.forward(40) turtle.done()
14.5
33
0.522989
52
348
3.5
0.384615
0.214286
0.120879
0.131868
0.10989
0.10989
0
0
0
0
0
0.091703
0.341954
348
23
34
15.130435
0.703057
0
0
0.333333
0
0
0
0
0
0
0
0
0
1
0.111111
false
0
0.055556
0
0.166667
0
0
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
799c75fdf25b724115e60255f1f09d6eec4d851d
4,187
py
Python
ckanext/ckanext-sixodp_scheming/ckanext/sixodp_scheming/helpers.py
Tampere/sixodp-tampere
975105a5e20e97c54dd7c84c761f91cca1316842
[ "MIT" ]
8
2016-10-14T14:32:00.000Z
2022-01-14T16:04:07.000Z
ckanext/ckanext-sixodp_scheming/ckanext/sixodp_scheming/helpers.py
Tampere/sixodp-tampere
975105a5e20e97c54dd7c84c761f91cca1316842
[ "MIT" ]
42
2016-11-11T12:24:54.000Z
2021-07-12T03:29:18.000Z
ckanext/ckanext-sixodp_scheming/ckanext/sixodp_scheming/helpers.py
Tampere/sixodp-tampere
975105a5e20e97c54dd7c84c761f91cca1316842
[ "MIT" ]
7
2017-03-13T09:21:08.000Z
2018-01-08T06:40:22.000Z
from ckan.plugins import toolkit from ckan.lib.i18n import get_lang import ckan.lib.i18n as i18n from ckan.common import config, c import ckan.logic as logic import ckan.lib.base as base import ckan.model as model from ckan.model.package import Package from ckan.lib.dictization.model_dictize import group_list_dictize import logging get_action = toolkit.get_action NotFound = logic.NotFound abort = base.abort log = logging.getLogger(__name__) def call_toolkit_function(fn, args, kwargs): return getattr(toolkit,fn)(*args, **kwargs) def add_locale_to_source(kwargs, locale): copy = kwargs.copy() source = copy.get('data-module-source', None) if source: copy.update({'data-module-source': source + '_' + locale}) return copy return copy def get_current_lang(): return get_lang() def scheming_field_only_default_required(field, lang): if field and field.get('only_default_lang_required') and lang == config.get('ckan.locale_default', 'en'): return True return False def get_current_date(): import datetime return datetime.date.today().strftime("%d.%m.%Y") def get_package_groups_by_type(package_id, group_type): context = {'model': model, 'session': model.Session, 'for_view': True, 'use_cache': False} group_list = [] data_dict = { 'all_fields': True, 'include_extras': True, 'type': group_type } groups = logic.get_action('group_list')(context, data_dict) try: pkg_obj = Package.get(package_id) pkg_group_ids = set(group['id'] for group in group_list_dictize(pkg_obj.get_groups(group_type, None), context)) group_list = [group for group in groups if group['id'] in pkg_group_ids] except (NotFound): abort(404, _('Dataset not found')) return group_list _LOCALE_ALIASES = {'en_GB': 'en'} def get_lang_prefix(): language = i18n.get_lang() if language in _LOCALE_ALIASES: language = _LOCALE_ALIASES[language] return language def get_translated_or_default_locale(data_dict, field): language = i18n.get_lang() if language in _LOCALE_ALIASES: language = _LOCALE_ALIASES[language] try: value = data_dict[field+'_translated'][language] if value: return value else: return data_dict[field+'_translated'][config.get('ckan.locale_default', 'en')] except KeyError: return data_dict.get(field, '') def show_qa(): from ckan.plugins import plugin_loaded if plugin_loaded('qa'): return True return False def scheming_category_list(args): from ckan.logic import NotFound # FIXME: sometimes this might return 0 categories if in development try: context = {'model': model, 'session': model.Session, 'ignore_auth': True} group_ids = get_action('group_list')(context, {}) except NotFound: return None else: category_list = [] # filter groups to those user is allowed to edit group_authz = get_action('group_list_authz')({ 'model': model, 'session': model.Session, 'user': c.user }, {}) user_group_ids = set(group[u'name'] for group in group_authz) group_ids = [group for group in group_ids if group in user_group_ids] for group in group_ids: try: context = {'model': model, 'session': model.Session, 'ignore_auth': True} group_details = get_action('group_show')(context, {'id': group}) except Exception as e: log.error(e) return None category_list.append({ "value": group, "label": group_details.get('title') }) return category_list def check_group_selected(val, data): log.info(val) log.info(data) if filter(lambda x: x['name'] == val, data): return True return False def get_field_from_schema(schema, field_name): field = next(field for field in schema.get('dataset_fields', []) if field.get('field_name') == field_name) return field
27.366013
119
0.64557
545
4,187
4.733945
0.247706
0.027907
0.01938
0.034109
0.212016
0.15814
0.10155
0.10155
0.10155
0.10155
0
0.004453
0.249104
4,187
152
120
27.546053
0.816158
0.026749
0
0.220183
0
0
0.093075
0.006385
0
0
0
0.006579
0
1
0.110092
false
0
0.119266
0.018349
0.412844
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
799d1aa404d567219c68dbf1a4e8d983c39bc395
389
py
Python
helpers/cast_heal.py
nuzcraft/RLTut
b763de87ee49abd413a7c3a278c004803ab45663
[ "MIT" ]
2
2018-01-05T08:09:37.000Z
2018-01-05T20:39:18.000Z
helpers/cast_heal.py
nuzcraft/RLTut
b763de87ee49abd413a7c3a278c004803ab45663
[ "MIT" ]
null
null
null
helpers/cast_heal.py
nuzcraft/RLTut
b763de87ee49abd413a7c3a278c004803ab45663
[ "MIT" ]
null
null
null
# function that heals the player import variables as var from helpers.message import message def cast_heal(): # heal the player if var.player.fighter.hp == var.player.fighter.max_hp: message('You are already at full health.', 'red') return 'cancelled' message('Your wounds start to feel better!', 'light violet') var.player.fighter.heal(var.HEAL_AMOUNT)
27.785714
64
0.701799
56
389
4.821429
0.660714
0.1
0.177778
0
0
0
0
0
0
0
0
0
0.197943
389
13
65
29.923077
0.865385
0.118252
0
0
0
0
0.259587
0
0
0
0
0
0
1
0.125
true
0
0.25
0
0.5
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
0
0
1
79a1df3d282563da1e1751edcf4f307e4b4ca364
8,794
py
Python
notebooks/experiments.py
yashasvi-ranawat/viabel
99245b7f3db8ea9dc55d6130bd5672e1adc62b63
[ "MIT" ]
1
2022-02-15T23:43:22.000Z
2022-02-15T23:43:22.000Z
notebooks/experiments.py
yashasvi-ranawat/viabel
99245b7f3db8ea9dc55d6130bd5672e1adc62b63
[ "MIT" ]
null
null
null
notebooks/experiments.py
yashasvi-ranawat/viabel
99245b7f3db8ea9dc55d6130bd5672e1adc62b63
[ "MIT" ]
3
2020-03-21T12:45:22.000Z
2020-10-06T18:30:47.000Z
import numpy as np import matplotlib.pyplot as plt import seaborn as sns all from viabel import all_bounds from viabel.vb import black_box_klvi, black_box_chivi, adagrad_optimize from utils import Timer from psis import psislw ## Display bounds information ## def print_bounds(results): print('Bounds on...') print(' 2-Wasserstein {:.3g}'.format(results['W2'])) print(' 2-divergence {:.3g}'.format(results['d2'])) print(' mean error {:.3g}'.format(results['mean_error'])) print(' stdev error {:.3g}'.format(results['std_error'])) print(' sqrt cov error {:.3g}'.format(np.sqrt(results['cov_error']))) print(' cov error {:.3g}'.format(results['cov_error'])) ## Check approximation accuracy ## def check_accuracy(true_mean, true_cov, approx_mean, approx_cov, verbose=False, method=None): true_std = np.sqrt(np.diag(true_cov)) approx_std = np.sqrt(np.diag(approx_cov)) results = dict(mean_error=np.linalg.norm(true_mean - approx_mean), cov_error_2=np.linalg.norm(true_cov - approx_cov, ord=2), cov_norm_2=np.linalg.norm(true_cov, ord=2), cov_error_nuc=np.linalg.norm(true_cov - approx_cov, ord='nuc'), cov_norm_nuc=np.linalg.norm(true_cov, ord='nuc'), std_error=np.linalg.norm(true_std - approx_std), rel_std_error=np.linalg.norm(approx_std/true_std - 1), ) if method is not None: results['method'] = method if verbose: print('mean =', approx_mean) print('stdevs =', approx_std) print() print('mean error = {:.3g}'.format(results['mean_error'])) print('stdev error = {:.3g}'.format(results['std_error'])) print('||cov error||_2^{{1/2}} = {:.3g}'.format(np.sqrt(results['cov_error_2']))) print('||true cov||_2^{{1/2}} = {:.3g}'.format(np.sqrt(results['cov_norm_2']))) return results def check_approx_accuracy(var_family, var_param, true_mean, true_cov, verbose=False, name=None): return check_accuracy(true_mean, true_cov, *var_family.mean_and_cov(var_param), verbose, name) ## Convenience functions and PSIS ## def get_samples_and_log_weights(logdensity, var_family, var_param, n_samples): samples = var_family.sample(var_param, n_samples) log_weights = logdensity(samples) - var_family.logdensity(samples, var_param) return samples, log_weights def psis_correction(logdensity, var_family, var_param, n_samples): samples, log_weights = get_samples_and_log_weights(logdensity, var_family, var_param, n_samples) smoothed_log_weights, khat = psislw(log_weights) return samples.T, smoothed_log_weights, khat def improve_with_psis(logdensity, var_family, var_param, n_samples, true_mean, true_cov, transform=None, verbose=False): samples, slw, khat = psis_correction(logdensity, var_family, var_param, n_samples) if verbose: print('khat = {:.3g}'.format(khat)) print() if transform is not None: samples = transform(samples) slw -= np.max(slw) wts = np.exp(slw) wts /= np.sum(wts) approx_mean = np.sum(wts[np.newaxis,:]*samples, axis=1) approx_cov = np.cov(samples, aweights=wts, ddof=0) res = check_accuracy(true_mean, true_cov, approx_mean, approx_cov, verbose) res['khat'] = khat return res, approx_mean, approx_cov ## Plotting ## def plot_approx_and_exact_contours(logdensity, var_family, var_param, xlim=[-10,10], ylim=[-3, 3], cmap2='Reds', savepath=None): xlist = np.linspace(*xlim, 100) ylist = np.linspace(*ylim, 100) X, Y = np.meshgrid(xlist, ylist) XY = np.concatenate([np.atleast_2d(X.ravel()), np.atleast_2d(Y.ravel())]).T zs = np.exp(logdensity(XY)) Z = zs.reshape(X.shape) zsapprox = np.exp(var_family.logdensity(XY, var_param)) Zapprox = zsapprox.reshape(X.shape) plt.contour(X, Y, Z, cmap='Greys', linestyles='solid') plt.contour(X, Y, Zapprox, cmap=cmap2, linestyles='solid') if savepath is not None: plt.savefig(savepath, bbox_inches='tight') plt.show() def plot_history(history, B=None, ylabel=None): if B is None: B = min(500, history.size//10) window = np.ones(B)/B smoothed_history = np.convolve(history, window, 'valid') plt.plot(smoothed_history) yscale = 'log' if np.all(smoothed_history > 0) else 'linear' plt.yscale(yscale) if ylabel is not None: plt.ylabel(ylabel) plt.xlabel('iteration') plt.show() def plot_dist_to_opt_param(var_param_history, opt_param): plt.plot(np.linalg.norm(var_param_history - opt_param[np.newaxis,:], axis=1)) plt.title('iteration vs distance to optimal parameter') plt.xlabel('iteration') plt.ylabel('distance') sns.despine() plt.show() ## Run experiment with both KLVI and CHIVI ## def _optimize_and_check_results(logdensity, var_family, objective_and_grad, init_var_param, true_mean, true_cov, plot_contours, ylabel, contour_kws=dict(), elbo=None, n_iters=5000, bound_w2=True, verbose=False, use_psis=True, n_psis_samples=1000000, **kwargs): opt_param, var_param_history, value_history, _ = \ adagrad_optimize(n_iters, objective_and_grad, init_var_param, **kwargs) plot_dist_to_opt_param(var_param_history, opt_param) accuracy_results = check_approx_accuracy(var_family, opt_param, true_mean, true_cov, verbose); other_results = dict(opt_param=opt_param, var_param_history=var_param_history, value_history=value_history) if bound_w2 not in [False, None]: if bound_w2 is True: n_samples = 1000000 else: n_samples = bound_w2 print() with Timer('Computing CUBO and ELBO with {} samples'.format(n_samples)): _, log_weights = get_samples_and_log_weights( logdensity, var_family, opt_param, n_samples) var_dist_cov = var_family.mean_and_cov(opt_param)[1] moment_bound_fn = lambda p: var_family.pth_moment(p, opt_param) other_results.update(all_bounds(log_weights, q_var=var_dist_cov, moment_bound_fn=moment_bound_fn, log_norm_bound=elbo)) if verbose: print() print_bounds(other_results) if plot_contours: plot_approx_and_exact_contours(logdensity, var_family, opt_param, **contour_kws) if use_psis: print() print('Results with PSIS correction') print('----------------------------') other_results['psis_results'], _, _ = \ improve_with_psis(logdensity, var_family, opt_param, n_psis_samples, true_mean, true_cov, verbose=verbose) return accuracy_results, other_results def run_experiment(logdensity, var_family, init_param, true_mean, true_cov, kl_n_samples=100, chivi_n_samples=500, alpha=2, **kwargs): klvi = black_box_klvi(var_family, logdensity, kl_n_samples) chivi = black_box_chivi(alpha, var_family, logdensity, chivi_n_samples) dim = true_mean.size plot_contours = dim == 2 if plot_contours: plot_approx_and_exact_contours(logdensity, var_family, init_param, **kwargs.get('contour_kws', dict())) print('|--------------|') print('| KLVI |') print('|--------------|', flush=True) kl_results, other_kl_results = _optimize_and_check_results( logdensity, var_family, klvi, init_param, true_mean, true_cov, plot_contours, '-ELBO', **kwargs) kl_results['method'] = 'KLVI' print() print('|---------------|') print('| CHIVI |') print('|---------------|', flush=True) elbo = other_kl_results['log_norm_bound'] chivi_results, other_chivi_results = _optimize_and_check_results( logdensity, var_family, chivi, init_param, true_mean, true_cov, plot_contours, 'CUBO', elbo=elbo, **kwargs) chivi_results['method'] = 'CHIVI' return klvi, chivi, kl_results, chivi_results, other_kl_results, other_chivi_results
41.677725
90
0.61053
1,107
8,794
4.558266
0.171635
0.042806
0.052715
0.032699
0.378121
0.309354
0.245937
0.217598
0.161316
0.131193
0
0.013033
0.267114
8,794
210
91
41.87619
0.769899
0.015806
0
0.102857
0
0
0.090003
0.003243
0
0
0
0
0
1
0.062857
false
0
0.04
0.005714
0.142857
0.171429
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
79a273cc97d15a6de92f6c405beb9d31e30fa60e
6,269
py
Python
network_filters.py
luos/nova-latency-scheduler
8e83539ce1dfd080ba86e4e71a2b999e56a91ec8
[ "MIT" ]
1
2017-03-28T19:02:23.000Z
2017-03-28T19:02:23.000Z
network_filters.py
luos/nova-latency-scheduler
8e83539ce1dfd080ba86e4e71a2b999e56a91ec8
[ "MIT" ]
null
null
null
network_filters.py
luos/nova-latency-scheduler
8e83539ce1dfd080ba86e4e71a2b999e56a91ec8
[ "MIT" ]
null
null
null
from abc import ABCMeta, abstractmethod from collections import namedtuple from nova.objects.request_spec import RequestSpec from nova.scheduler.host_manager import HostState from oslo_log import log as logging import nova.conf from nova.scheduler.filters import BaseHostFilter from latency_meter.server import start_server_on_other_thread LOG = logging.getLogger(__name__) CONF = nova.conf.CONF LOG_TAG = "GLLS" class NetworkAwareFilter(BaseHostFilter): def __init__(self, latency_filter=None, bandwidth_filter=None ): """ :type bandwidth_filter: BandwidthFilter :type latency_filter: LatencyFilter """ super(NetworkAwareFilter, self).__init__() if latency_filter is not None: self.latency_filter = latency_filter else: self.latency_filter = create_default_filter_backend() if latency_filter is not None: self.bandwidth_filter = bandwidth_filter else: self.bandwidth_filter = create_default_bandwidth_filter() start_server_on_other_thread(LOG) def host_passes(self, host_state, spec_obj): """ :type host_state: HostState :type spec_obj: RequestSpec """ latency_passes = self.latency_filter.host_passes(host_state.host, hints=spec_obj.scheduler_hints) bandwidth_passes = self.bandwidth_filter.host_passes(host_state.host, self.get_bandwidth_hints(spec_obj)) LOG.info( "GLLS " + host_state.host + " Latency passes: " + str( latency_passes) + ", Bandwidth passes: " + str(bandwidth_passes) + " with hints: " + str(spec_obj.scheduler_hints)) return latency_passes and bandwidth_passes def get_bandwidth_hints(self, spec_obj): hints = [] if 'bandwidth_to' in spec_obj.scheduler_hints: bandwidth_pairs = [hint.split(',') for hint in spec_obj.scheduler_hints['bandwidth_to']] hints = [BandwidthHint(float(hint[0]), hint[1].strip()) for hint in bandwidth_pairs] return hints class HostLatencyService(): __metaclass__ = ABCMeta @abstractmethod def get_latencies_from_host(self, host): pass class HostBandwidthService(): __metaclass__ = ABCMeta @abstractmethod def get_bandwidth_from_host(self, host): pass class StaticHostLatencyService(HostLatencyService, HostBandwidthService): latencies = { 'node-2': { 'node-2': 0, 'node-3': 30, 'node-4': 100 }, 'node-3': { 'node-2': 30, 'node-3': 0, 'node-4': 45 }, 'node-4': { 'node-2': 100, 'node-3': 45, 'node-4': 0 } } bandwidth = { 'node-2': { 'node-2': 1000000, 'node-3': 100000, 'node-4': 15000, }, 'node-3': { 'node-2': 100000, 'node-3': 1000000, 'node-4': 50000, }, 'node-4': { 'node-2': 15000, 'node-3': 50000, 'node-4': 1000000, }, } def get_latencies_from_host(self, host): return self.latencies[host] def get_bandwidth_from_host(self, host): return self.bandwidth[host] class LatencyFilter(): def __init__(self, measurements): """ :type measurements: HostLatencyService """ self.measurements = measurements def host_passes(self, hostname, hints): if 'latency_to' in hints: latency_expectations = [hint.split(',') for hint in hints['latency_to']] self._log("Scheduling with expectations: " + str(latency_expectations)) if len(latency_expectations) > 0: latencies_to_host = self.measurements.get_latencies_from_host(host=hostname) self._log("Got latency list: " + str(latencies_to_host)) for expected_latency, remote_host in latency_expectations: if remote_host not in latencies_to_host: self._log("Node " + str(remote_host) + " was not in nodes for " + hostname) return False latency_to_target = latencies_to_host[remote_host] self._log("Checking node " + remote_host + " expected latency: " + str( expected_latency) + " got latency " + str(latency_to_target)) if latency_to_target < float(expected_latency): continue else: return False return True return True return True def _log(self, log): LOG.info(LOG_TAG + " " + str(log)) class BandwidthHint(): def __init__(self, bandwidth_kbps, to_host): self.bandwidth_kbps = bandwidth_kbps self.to_host = to_host def __eq__(self, other): if isinstance(other, BandwidthHint): return other.bandwidth_kbps == self.bandwidth_kbps and other.to_host == self.to_host else: return False class BandwidthFilter(): def __init__(self, measurements): """ :type measurements: HostBandwidthService """ self.measurements = measurements def host_passes(self, hostname, hints): """ :type hostname: str :type hints: list[BandwidthHint] """ if len(hints) > 0: bandwidths = self.measurements.get_bandwidth_from_host(hostname) LOG.info(LOG_TAG + " BANDWIDTH to host " + hostname + " -" + str(bandwidths)) for hint in hints: if hint.to_host not in bandwidths: return False bandwidth_to_host = bandwidths[hint.to_host] if bandwidth_to_host >= hint.bandwidth_kbps: continue else: return False return True return True def create_default_filter_backend(): return LatencyFilter(StaticHostLatencyService()) def create_default_bandwidth_filter(): return BandwidthFilter(StaticHostLatencyService())
29.7109
113
0.590206
658
6,269
5.348024
0.168693
0.02387
0.012504
0.02387
0.231316
0.191532
0.108554
0.057403
0.032964
0
0
0.022951
0.318871
6,269
210
114
29.852381
0.801171
0.042112
0
0.282759
0
0
0.066883
0
0
0
0
0
0
1
0.110345
false
0.068966
0.055172
0.027586
0.358621
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
1
0
0
0
0
0
1
79a4a7145c7782f4c24c78c13d10feef5db3850e
2,456
py
Python
data_capture/jobs.py
connectthefuture/calc
7f0dc01d9265f26a36c2b9b5ee779fe876e4a494
[ "CC0-1.0" ]
null
null
null
data_capture/jobs.py
connectthefuture/calc
7f0dc01d9265f26a36c2b9b5ee779fe876e4a494
[ "CC0-1.0" ]
1
2021-06-10T23:13:04.000Z
2021-06-10T23:13:04.000Z
data_capture/jobs.py
connectthefuture/calc
7f0dc01d9265f26a36c2b9b5ee779fe876e4a494
[ "CC0-1.0" ]
null
null
null
import logging import traceback from django.core.exceptions import ValidationError from django.core.files.base import ContentFile from django.db import transaction from django_rq import job from . import email from .r10_spreadsheet_converter import Region10SpreadsheetConverter from contracts.loaders.region_10 import Region10Loader from contracts.models import Contract, BulkUploadContractSource contracts_logger = logging.getLogger('contracts') @transaction.atomic def _process_bulk_upload(upload_source): file = ContentFile(upload_source.original_file) converter = Region10SpreadsheetConverter(file) contracts_logger.info("Deleting contract objects related to region 10.") # Delete existing contracts identified by the same # procurement_center Contract.objects.filter( upload_source__procurement_center=BulkUploadContractSource.REGION_10 ).delete() contracts = [] bad_rows = [] contracts_logger.info("Generating new contract objects.") for row in converter.convert_next(): try: c = Region10Loader.make_contract(row, upload_source=upload_source) contracts.append(c) except (ValueError, ValidationError) as e: bad_rows.append(row) contracts_logger.info("Saving new contract objects.") # Save new contracts Contract.objects.bulk_create(contracts) contracts_logger.info("Updating full-text search indexes.") # Update search field on Contract models Contract._fts_manager.update_search_field() # Update the upload_source upload_source.has_been_loaded = True upload_source.save() return len(contracts), len(bad_rows) @job def process_bulk_upload_and_send_email(upload_source_id): contracts_logger.info( "Starting bulk upload processing (pk=%d)." % upload_source_id ) upload_source = BulkUploadContractSource.objects.get( pk=upload_source_id ) try: num_contracts, num_bad_rows = _process_bulk_upload(upload_source) email.bulk_upload_succeeded(upload_source, num_contracts, num_bad_rows) except: contracts_logger.exception( 'An exception occurred during bulk upload processing ' '(pk=%d).' % upload_source_id ) tb = traceback.format_exc() email.bulk_upload_failed(upload_source, tb) contracts_logger.info( "Ending bulk upload processing (pk=%d)." % upload_source_id )
29.95122
79
0.735342
286
2,456
6.055944
0.374126
0.117783
0.06582
0.038106
0.122979
0.064088
0.064088
0.064088
0
0
0
0.008065
0.192182
2,456
81
80
30.320988
0.864919
0.061075
0
0.071429
0
0
0.125217
0
0
0
0
0
0
1
0.035714
false
0
0.178571
0
0.232143
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
79a5279be723e7987fc33c8f7184b26de97782d2
1,525
py
Python
rss_temple/api/archived_feed_entry_util.py
murrple-1/rss_temple
289197923b1e7d1213f1673d164337df17d7269b
[ "MIT" ]
null
null
null
rss_temple/api/archived_feed_entry_util.py
murrple-1/rss_temple
289197923b1e7d1213f1673d164337df17d7269b
[ "MIT" ]
8
2019-12-04T21:58:35.000Z
2021-12-15T02:29:49.000Z
rss_temple/api/archived_feed_entry_util.py
murrple-1/rss_temple
289197923b1e7d1213f1673d164337df17d7269b
[ "MIT" ]
null
null
null
import itertools from django.conf import settings from django.dispatch import receiver from django.core.signals import setting_changed from api import models _USER_UNREAD_GRACE_INTERVAL = None _USER_UNREAD_GRACE_MIN_COUNT = None @receiver(setting_changed) def _load_global_settings(*args, **kwargs): global _USER_UNREAD_GRACE_INTERVAL global _USER_UNREAD_GRACE_MIN_COUNT _USER_UNREAD_GRACE_INTERVAL = settings.USER_UNREAD_GRACE_INTERVAL _USER_UNREAD_GRACE_MIN_COUNT = settings.USER_UNREAD_GRACE_MIN_COUNT _load_global_settings() def mark_archived_entries(read_mappings_generator, batch_size=768): while True: batch = list(itertools.islice(read_mappings_generator, batch_size)) if len(batch) < 1: break models.ReadFeedEntryUserMapping.objects.bulk_create( batch, batch_size=batch_size, ignore_conflicts=True) def read_mapping_generator_fn(feed, user): grace_start = user.created_at + _USER_UNREAD_GRACE_INTERVAL feed_entries = None if models.FeedEntry.objects.filter(feed=feed, published_at__gte=grace_start).count() > _USER_UNREAD_GRACE_MIN_COUNT: feed_entries = models.FeedEntry.objects.filter( feed=feed, published_at__lt=grace_start) else: feed_entries = models.FeedEntry.objects.filter(feed=feed).order_by( 'published_at')[_USER_UNREAD_GRACE_MIN_COUNT:] for feed_entry in feed_entries.iterator(): yield models.ReadFeedEntryUserMapping(feed_entry=feed_entry, user=user)
31.122449
120
0.773115
200
1,525
5.445
0.34
0.10101
0.151515
0.099174
0.321396
0.139578
0.139578
0.139578
0
0
0
0.003118
0.158689
1,525
48
121
31.770833
0.845674
0
0
0
0
0
0.007869
0
0
0
0
0
0
1
0.09375
false
0
0.15625
0
0.25
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0