hexsha
string
size
int64
ext
string
lang
string
max_stars_repo_path
string
max_stars_repo_name
string
max_stars_repo_head_hexsha
string
max_stars_repo_licenses
list
max_stars_count
int64
max_stars_repo_stars_event_min_datetime
string
max_stars_repo_stars_event_max_datetime
string
max_issues_repo_path
string
max_issues_repo_name
string
max_issues_repo_head_hexsha
string
max_issues_repo_licenses
list
max_issues_count
int64
max_issues_repo_issues_event_min_datetime
string
max_issues_repo_issues_event_max_datetime
string
max_forks_repo_path
string
max_forks_repo_name
string
max_forks_repo_head_hexsha
string
max_forks_repo_licenses
list
max_forks_count
int64
max_forks_repo_forks_event_min_datetime
string
max_forks_repo_forks_event_max_datetime
string
content
string
avg_line_length
float64
max_line_length
int64
alphanum_fraction
float64
qsc_code_num_words_quality_signal
int64
qsc_code_num_chars_quality_signal
float64
qsc_code_mean_word_length_quality_signal
float64
qsc_code_frac_words_unique_quality_signal
float64
qsc_code_frac_chars_top_2grams_quality_signal
float64
qsc_code_frac_chars_top_3grams_quality_signal
float64
qsc_code_frac_chars_top_4grams_quality_signal
float64
qsc_code_frac_chars_dupe_5grams_quality_signal
float64
qsc_code_frac_chars_dupe_6grams_quality_signal
float64
qsc_code_frac_chars_dupe_7grams_quality_signal
float64
qsc_code_frac_chars_dupe_8grams_quality_signal
float64
qsc_code_frac_chars_dupe_9grams_quality_signal
float64
qsc_code_frac_chars_dupe_10grams_quality_signal
float64
qsc_code_frac_chars_replacement_symbols_quality_signal
float64
qsc_code_frac_chars_digital_quality_signal
float64
qsc_code_frac_chars_whitespace_quality_signal
float64
qsc_code_size_file_byte_quality_signal
float64
qsc_code_num_lines_quality_signal
float64
qsc_code_num_chars_line_max_quality_signal
float64
qsc_code_num_chars_line_mean_quality_signal
float64
qsc_code_frac_chars_alphabet_quality_signal
float64
qsc_code_frac_chars_comments_quality_signal
float64
qsc_code_cate_xml_start_quality_signal
float64
qsc_code_frac_lines_dupe_lines_quality_signal
float64
qsc_code_cate_autogen_quality_signal
float64
qsc_code_frac_lines_long_string_quality_signal
float64
qsc_code_frac_chars_string_length_quality_signal
float64
qsc_code_frac_chars_long_word_length_quality_signal
float64
qsc_code_frac_lines_string_concat_quality_signal
float64
qsc_code_cate_encoded_data_quality_signal
float64
qsc_code_frac_chars_hex_words_quality_signal
float64
qsc_code_frac_lines_prompt_comments_quality_signal
float64
qsc_code_frac_lines_assert_quality_signal
float64
qsc_codepython_cate_ast_quality_signal
float64
qsc_codepython_frac_lines_func_ratio_quality_signal
float64
qsc_codepython_cate_var_zero_quality_signal
bool
qsc_codepython_frac_lines_pass_quality_signal
float64
qsc_codepython_frac_lines_import_quality_signal
float64
qsc_codepython_frac_lines_simplefunc_quality_signal
float64
qsc_codepython_score_lines_no_logic_quality_signal
float64
qsc_codepython_frac_lines_print_quality_signal
float64
qsc_code_num_words
int64
qsc_code_num_chars
int64
qsc_code_mean_word_length
int64
qsc_code_frac_words_unique
null
qsc_code_frac_chars_top_2grams
int64
qsc_code_frac_chars_top_3grams
int64
qsc_code_frac_chars_top_4grams
int64
qsc_code_frac_chars_dupe_5grams
int64
qsc_code_frac_chars_dupe_6grams
int64
qsc_code_frac_chars_dupe_7grams
int64
qsc_code_frac_chars_dupe_8grams
int64
qsc_code_frac_chars_dupe_9grams
int64
qsc_code_frac_chars_dupe_10grams
int64
qsc_code_frac_chars_replacement_symbols
int64
qsc_code_frac_chars_digital
int64
qsc_code_frac_chars_whitespace
int64
qsc_code_size_file_byte
int64
qsc_code_num_lines
int64
qsc_code_num_chars_line_max
int64
qsc_code_num_chars_line_mean
int64
qsc_code_frac_chars_alphabet
int64
qsc_code_frac_chars_comments
int64
qsc_code_cate_xml_start
int64
qsc_code_frac_lines_dupe_lines
int64
qsc_code_cate_autogen
int64
qsc_code_frac_lines_long_string
int64
qsc_code_frac_chars_string_length
int64
qsc_code_frac_chars_long_word_length
int64
qsc_code_frac_lines_string_concat
null
qsc_code_cate_encoded_data
int64
qsc_code_frac_chars_hex_words
int64
qsc_code_frac_lines_prompt_comments
int64
qsc_code_frac_lines_assert
int64
qsc_codepython_cate_ast
int64
qsc_codepython_frac_lines_func_ratio
int64
qsc_codepython_cate_var_zero
int64
qsc_codepython_frac_lines_pass
int64
qsc_codepython_frac_lines_import
int64
qsc_codepython_frac_lines_simplefunc
int64
qsc_codepython_score_lines_no_logic
int64
qsc_codepython_frac_lines_print
int64
effective
string
hits
int64
a0d646ba03a4465fe2514a5e2b0f73386fb45c4c
2,321
py
Python
app/api/V1/views/products.py
Paulvitalis200/Store-Manager-API
d61e91bff7fc242da2a93d1caf1012465c7c904a
[ "MIT" ]
null
null
null
app/api/V1/views/products.py
Paulvitalis200/Store-Manager-API
d61e91bff7fc242da2a93d1caf1012465c7c904a
[ "MIT" ]
4
2018-10-21T18:28:03.000Z
2018-10-24T12:48:24.000Z
app/api/V1/views/products.py
Paulstar200/Store-Manager-API
d61e91bff7fc242da2a93d1caf1012465c7c904a
[ "MIT" ]
null
null
null
from flask import Flask, request from flask_restful import Resource, reqparse from flask_jwt_extended import create_access_token, jwt_required from app.api.V1.models import Product, products class PostProduct(Resource): parser = reqparse.RequestParser() parser.add_argument('name', required=True, help='Product name cannot be blank', type=str) parser.add_argument('price', required=True, help=' Product price cannot be blank or a word', type=int) parser.add_argument('quantity', required=True, help='Product quantity cannot be blank or a word', type=int) @jwt_required def post(self): # input validation data = request.get_json() args = PostProduct.parser.parse_args() name = args.get('name').strip() # removes whitespace price = args.get('price') quantity = args.get('quantity') payload = ['name', 'price', 'quantity'] if not name or not price or not quantity: return {'message': 'Product name, price and quantity are all required'}, 400 else: # Check if the item is not required for item in data.keys(): if item not in payload: return {"message": "The field '{}' is not required for the products".format(item)}, 400 try: product = Product.create_product(name, price, quantity) return { 'message': 'Product created successfully!', 'product': product, 'status': 'ok' }, 201 except Exception as my_exception: print(my_exception) return {'message': 'Something went wrong.'}, 500 class GetAllProducts(Resource): # Both attendant and store owner can get products @jwt_required def get(self): products = Product.get_products() if len(products) == 0: return {'message': "No products created yet."} return { 'message': 'Products successfully retrieved!', 'products': products }, 200 # Get a single specific product class GetEachProduct(Resource): @jwt_required def get(self, product_id): try: return products[product_id - 1] except IndexError: return {"message": "No item with that ID in stock"}
35.166667
111
0.616545
271
2,321
5.206642
0.394834
0.064493
0.036145
0.048901
0.068037
0.038271
0.038271
0.038271
0
0
0
0.010843
0.284791
2,321
65
112
35.707692
0.839157
0.063335
0
0.14
0
0
0.214022
0
0
0
0
0
0
1
0.06
false
0
0.08
0
0.38
0.02
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
a0d68497a4530b9b9bb8366ff9da7d608dd9a751
1,155
py
Python
51-100/p87.py
YiWeiShen/Project-Euler-Hints
a79cacab075dd98d393516f083aaa7ffc6115a06
[ "MIT" ]
1
2019-02-25T13:00:31.000Z
2019-02-25T13:00:31.000Z
51-100/p87.py
YiWeiShen/Project-Euler-Hints
a79cacab075dd98d393516f083aaa7ffc6115a06
[ "MIT" ]
null
null
null
51-100/p87.py
YiWeiShen/Project-Euler-Hints
a79cacab075dd98d393516f083aaa7ffc6115a06
[ "MIT" ]
null
null
null
import time from multiprocessing.pool import Pool def is_prime(num): for i in range(2, int(num**0.5+1)): if num % i == 0: return None return num if __name__ == '__main__': t = time.time() p1 = Pool(processes=30) p2 = Pool(processes=30) p3 = Pool(processes=30) num1 = range(2, 7072) num2 = range(2, 369) num3 = range(2, 85) prime_list1 = p1.map(is_prime, num1) p1.close() p1.join() prime_list2 = p2.map(is_prime, num2) p2.close() p2.join() prime_list3 = p3.map(is_prime, num3) p3.close() p3.join() prime_list1_clear = [x for x in prime_list1 if x is not None] prime_list2_clear = [x for x in prime_list2 if x is not None] prime_list3_clear = [x for x in prime_list3 if x is not None] result_list = [] for i in prime_list1_clear: print(i) for j in prime_list2_clear: for k in prime_list3_clear: test_num = i**2 + j**3 + k**4 if test_num < 50000000: result_list.append(test_num) print(str(len(list(set(result_list))))) print('time:'+str(time.time()-t))
26.860465
65
0.587013
186
1,155
3.451613
0.306452
0.065421
0.070093
0.046729
0.15109
0.132399
0
0
0
0
0
0.078528
0.294372
1,155
42
66
27.5
0.709202
0
0
0
0
0
0.011255
0
0
0
0
0
0
1
0.027027
false
0
0.054054
0
0.135135
0.081081
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
a0d6b47a07ed18120ebb9b10352d658a22a11ecb
267
py
Python
Clean Word/index.py
Sudani-Coder/python
9c35f04a0521789ba91b7058695139ed074f7796
[ "MIT" ]
null
null
null
Clean Word/index.py
Sudani-Coder/python
9c35f04a0521789ba91b7058695139ed074f7796
[ "MIT" ]
null
null
null
Clean Word/index.py
Sudani-Coder/python
9c35f04a0521789ba91b7058695139ed074f7796
[ "MIT" ]
null
null
null
# recursion function (Clean Word) def CleanWord(word): if len(word) == 1: return word elif word[0] == word[1]: return CleanWord(word[1:]) else: return word[0] + CleanWord(word[1:]) print(CleanWord("wwwooooorrrrllddd"))
19.071429
44
0.58427
32
267
4.875
0.46875
0.128205
0.141026
0
0
0
0
0
0
0
0
0.030928
0.273408
267
13
45
20.538462
0.773196
0.116105
0
0
0
0
0.07265
0
0
0
0
0
0
1
0.125
false
0
0
0
0.5
0.125
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
a0d7aa3f87b3b51ae56654591cba7faff73f9f8f
665
py
Python
commands/rotatecamera.py
1757WestwoodRobotics/mentorbot
3db344f3b35c820ada4e1aef3eca9b1fc4c5b85a
[ "MIT" ]
2
2021-11-13T20:18:44.000Z
2021-11-13T20:27:04.000Z
commands/rotatecamera.py
1757WestwoodRobotics/mentorbot
3db344f3b35c820ada4e1aef3eca9b1fc4c5b85a
[ "MIT" ]
null
null
null
commands/rotatecamera.py
1757WestwoodRobotics/mentorbot
3db344f3b35c820ada4e1aef3eca9b1fc4c5b85a
[ "MIT" ]
1
2021-11-14T01:38:53.000Z
2021-11-14T01:38:53.000Z
import typing from commands2 import CommandBase from subsystems.cameracontroller import CameraSubsystem class RotateCamera(CommandBase): def __init__(self, camera: CameraSubsystem, leftRight: typing.Callable[[], float], upDown: typing.Callable[[], float]) -> None: CommandBase.__init__(self) self.setName(__class__.__name__) self.camera = camera self.leftRight = leftRight self.upDown = upDown self.addRequirements([self.camera]) self.setName(__class__.__name__) def execute(self) -> None: self.camera.setCameraRotation(self.leftRight(), self.upDown())
28.913043
70
0.667669
63
665
6.666667
0.380952
0.095238
0.090476
0.095238
0
0
0
0
0
0
0
0.001961
0.233083
665
22
71
30.227273
0.821569
0
0
0.125
0
0
0
0
0
0
0
0
0
1
0.125
false
0
0.1875
0
0.375
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
a0d85ead79155e87bca877ab2df552ddd4292930
8,188
py
Python
instapp/views.py
uwamahororachel/instagram
d5b7127e62047287dfadec15743676df48f278a9
[ "MIT" ]
null
null
null
instapp/views.py
uwamahororachel/instagram
d5b7127e62047287dfadec15743676df48f278a9
[ "MIT" ]
null
null
null
instapp/views.py
uwamahororachel/instagram
d5b7127e62047287dfadec15743676df48f278a9
[ "MIT" ]
null
null
null
from django.shortcuts import render,redirect from django.http import HttpResponse, Http404,HttpResponseRedirect import datetime as dt from .models import Post,Comment,Follow,Profile from django.contrib.auth.decorators import login_required from .forms import NewPostForm, NewCommentForm, AddProfileForm from django.contrib.auth.models import User def signup(request): if request.user.is_authenticated(): return redirect('insta') else: if request.method == 'POST': form = SignupForm(request.POST) if form.is_valid(): user = form.save(commit=False) user.save() new_profile = Profile(user=user) else: form = SignupForm() return render(request, 'registration/registration_form.html',{'form':form}) @login_required(login_url='/accounts/login/') def insta(request): title='Instapp' users = User.objects.all() current_user = request.user profile = Profile.objects.filter(user=current_user).first() if profile == None: my_profile = None else: my_profile=profile comments = Comment.objects.all().order_by('-date_posted') posts = Post.objects.all().order_by('-date_posted') for post in posts: if request.method=='POST' and 'post' in request.POST: posted=request.POST.get("post") for post in posts: if (int(post.id)==int(posted)): post.like+=1 post.save() return redirect('insta') return render(request, 'index.html', {"posts": posts, 'comments':comments,'users':users,'user':current_user,'my_profile':my_profile,'title':title}) @login_required(login_url='/accounts/login/') def new_post(request): current_user = request.user profile = Profile.get_profile(current_user) if profile == None: return redirect('add_profile') else: if request.method == 'POST': form = NewPostForm(request.POST, request.FILES) if form.is_valid(): post = form.save(commit=False) post.user = current_user post.profile = profile post.save() return redirect('insta') else: form = NewPostForm() return render(request, 'newPost.html', {"form": form}) @login_required(login_url='/accounts/login/') def single_post(request, post_id): post = Post.objects.get(pk=post_id) comments = Comment.get_comments_by_post(post_id).order_by('-date_posted') current_user = request.user if request.method == 'POST': form = NewCommentForm(request.POST) if form.is_valid(): new_comment = form.save(commit=False) new_comment.user = current_user new_comment.post = post new_comment.save() return redirect('single_post',post_id=post_id) if request.method=='POST' and 'post' in request.POST: posted=request.POST.get("post") for post in posts: if (int(post.id)==int(posted)): post.like+=1 post.save() return redirect('single_post',post_id=post_id) else: form = NewCommentForm() return render(request, 'post.html', {'post':post, 'form':form,'comments':comments}) @login_required(login_url='/accounts/login/') def my_profile(request): current_user = request.user profile = Profile.objects.get(user=current_user) count = Post.objects.filter(profile=profile).count comments = Comment.objects.all().order_by('-date_posted') posts = None if profile == None: return redirect('add_profile') else: posts = Post.get_posts_by_id(profile.id).order_by('-date_posted') for post in posts: if request.method=='POST' and 'post' in request.POST: posted=request.POST.get("post") for post in posts: if (int(post.id)==int(posted)): post.like+=1 post.save() return redirect('profile', profile_id=profile_id) return render(request, 'profile.html', {"posts": posts, "profile": profile, 'count':count,'comments':comments}) @login_required(login_url='/accounts/login/') def update_post(request,post_id): post= Post.objects.get(pk=post_id).order_by('-date_posted') if request.method == 'POST': form = NewPostForm(request.POST) if form.is_valid(): post.caption=form_data.cleaned_data[caption] post=post.update_post(post_id,caption) return redirect('my_profile') else: form = NewPostForm() return render(request, 'postUpdate.html',{'form':form,'post':post}) def delete_post(request,post_id): post= Post.objects.get(pk=post_id) post.delete_post() return redirect('my_profile') return render(request, 'my_profile') @login_required(login_url='/accounts/login/') def new_post(request): current_user = request.user profile = Profile.get_profile(current_user) if profile == None: return redirect('add_profile') else: if request.method == 'POST': form = NewPostForm(request.POST, request.FILES) if form.is_valid(): post = form.save(commit=False) post.user = current_user post.profile = profile post.save() return redirect('insta') else: form = NewPostForm() return render(request, 'newPost.html', {"form": form}) @login_required(login_url='/accounts/login/') def add_profile(request): current_user = request.user if request.method == 'POST': form = AddProfileForm(request.POST, request.FILES) if form.is_valid(): new_profile = form.save(commit=False) new_profile.user = current_user new_profile.save() return redirect('my_profile') else: form = AddProfileForm() return render(request, 'addProfile.html', {"form": form}) @login_required(login_url='/accounts/login/') def update_profile(request): current_user = request.user if request.method == 'POST': form = AddProfileForm(request.POST, request.FILES) if form.is_valid(): new_profile = form.save(commit=False) new_profile.user = current_user new_profile.save() return redirect('my_profile') else: form = AddProfileForm() return render(request, 'addProfile.html', {"form": form}) @login_required(login_url='/accounts/login/') def search_results(request): if 'user' in request.GET and request.GET["user"]: search_term = request.GET.get("user") profiles = Profile.find_profile(search_term) message = f"{search_term}" return render(request, 'search.html',{"results": profiles, "message":message}) else: message = "You haven't searched for any term" return render(request, 'search.html',{"message":message}) @login_required(login_url='/accounts/login/') def profile(request, profile_id): profile = Profile.get_profile_id(profile_id) posts = Post.objects.filter(profile=profile.id).order_by('-date_posted') count = Post.objects.filter(profile=profile).count comments = Comment.objects.all().order_by('-date_posted') for post in posts: if request.method=='POST' and 'post' in request.POST: posted=request.POST.get("post") for post in posts: if (int(post.id)==int(posted)): post.like+=1 post.save() return redirect('profile', profile_id=profile_id) return render(request, 'userProfile.html', {"posts": posts, "profile": profile, 'count':count,'comments':comments}) @login_required(login_url='/accounts/login/') def follow(request, profile_id): current_user = request.user profile = Profile.get_profile_id(profile_id) follow_user = Follow(user=current_user, profile=profile) follow_user.save() myprofile_id= str(profile.id) return redirect('insta')
36.882883
151
0.626282
975
8,188
5.111795
0.104615
0.041934
0.049559
0.041934
0.702047
0.690409
0.630819
0.595506
0.566613
0.524077
0
0.001136
0.24768
8,188
221
152
37.049774
0.807955
0
0
0.678571
0
0
0.107719
0.004275
0
0
0
0
0
1
0.066327
false
0
0.035714
0
0.25
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
a0d89d58810bc392058c43540e5719fda8ed9934
6,822
py
Python
cfg.py
alexandonian/relational-set-abstraction
8af6a6a58883ce59c7b29e4161ff970e3bded642
[ "MIT" ]
9
2020-09-17T23:09:42.000Z
2021-12-29T09:56:24.000Z
cfg.py
alexandonian/relational-set-abstraction
8af6a6a58883ce59c7b29e4161ff970e3bded642
[ "MIT" ]
null
null
null
cfg.py
alexandonian/relational-set-abstraction
8af6a6a58883ce59c7b29e4161ff970e3bded642
[ "MIT" ]
1
2021-01-16T07:19:42.000Z
2021-01-16T07:19:42.000Z
import argparse import torch import logger import models import utils NUM_NODES = { 'moments': 391, 'multimoments': 391, 'kinetics': 608, } CRITERIONS = { 'CE': {'func': torch.nn.CrossEntropyLoss}, 'MSE': {'func': torch.nn.MSELoss}, 'BCE': {'func': torch.nn.BCEWithLogitsLoss}, } OPTIMIZERS = { 'SGD': { 'func': torch.optim.SGD, 'lr': 0.001, 'momentum': 0.9, 'weight_decay': 5e-4, }, 'Adam': {'func': torch.optim.Adam, 'weight_decay': 5e-4}, } SCHEDULER_DEFAULTS = {'CosineAnnealingLR': {'T_max': 100}} METAFILE_FILE = { 'moments': { 'train': 'metadata/moments_train_abstraction_sets.json', 'val': 'metadata/moments_val_abstraction_sets.json', }, 'kinetics': { 'train': 'metadata/kinetics_train_abstraction_sets.json', 'val': 'metadata/kinetics_val_abstraction_sets.json', }, } FEATURES_FILE = { 'moments': { 'train': 'metadata/resnet3d50_moments_train_features.pth', 'val': 'metadata/resnet3d50_moments_val_features.pth', 'test': 'metadata/resnet3d50_moments_test_features.pth', }, 'kinetics': { 'train': 'metadata/resnet3d50_kinetics_train_features.pth', 'val': 'metadata/resnet3d50_kinetics_val_features.pth', 'test': 'metadata/resnet3d50_kinetics_test_features.pth', }, } EMBEDDING_FILE = { 'moments': { 'train': 'metadata/moments_train_embeddings.pth', 'val': 'metadata/moments_val_embeddings.pth', }, 'kinetics': { 'train': 'metadata/kinetics_train_embeddings.pth', 'val': 'metadata/kinetics_val_embeddings.pth', 'test': 'metadata/kinetics_test_embeddings.pth', }, } EMBEDDING_CATEGORIES_FILE = { 'moments': 'metadata/moments_category_embeddings.pth', 'kinetics': 'metadata/kinetics_category_embeddings.pth', } LIST_FILE = { 'moments': { 'train': 'metadata/moments_train_listfile.txt', 'val': 'metadata/moments_val_listfile.txt', 'test': 'metadata/moments_test_listfile.txt', }, 'kinetics': { 'train': 'metadata/kinetics_train_listfile.txt', 'val': 'metadata/kinetics_val_listfile.txt', 'test': 'metadata/kinetics_test_listfile.txt', }, } RANKING_FILE = { 'moments': 'metadata/moments_human_abstraction_sets.json', 'kinetics': 'metadata/kinetics_human_abstraction_sets.json', } GRAPH_FILE = { 'moments': 'metadata/moments_graph.json', 'kinetics': 'metadata/kinetics_graph.json', } def parse_args(): parser = argparse.ArgumentParser(description="Abstraction Experiments") parser.add_argument( '-e', '--experiment', type=str, default='AbstractionEmbedding', help="name of experiment to run", ) parser.add_argument( '-i', '--exp_id', type=str, help="unique name or id of particular experimental run", ) parser.add_argument( '-d', '--dataset', type=str, default='moments', choices=['moments', 'kinetics'], help='name of dataset', ) parser.add_argument( '-m', '--model_name', type=str, default='AbstractionEmbeddingModule', help='class name of model to instantiate', ) parser.add_argument( '-b', '--batch_size', type=int, default=256, help='number of elements (sets) in batch', ) parser.add_argument('--optimizer', type=str, default='Adam') parser.add_argument('--criterion', nargs='+', default=['MSE', 'CE']) parser.add_argument('-l', '--loss_weights', nargs='+', default=[1, 1], type=float) parser.add_argument('--lr', type=float, default=0.0001) parser.add_argument('-s', '--scales', nargs='+', default=[1, 2, 3, 4], type=int) parser.add_argument('-r', '--resume', type=str, default=None) parser.add_argument('--log_dir', type=str, default='logs') parser.add_argument('--checkpoint_dir', type=str, default='checkpoints') parser.add_argument('--output_dir', type=str, default='outputs') parser.add_argument('--metadata_dir', type=str, default='metadata') parser.add_argument('--logger_name', type=str, default='AbstractionLogger') parser.add_argument('--num_epochs', type=int, default=60) parser.add_argument('--start_epoch', type=int, default=0) parser.add_argument('--max_step', type=int, default=None) parser.add_argument('--val_freq', type=int, default=1) parser.add_argument('--log_freq', type=int, default=20) parser.add_argument('--checkpoint_freq', type=int, default=1000) parser.add_argument('--cudnn_enabled', default=True, type=utils.str2bool) parser.add_argument('--cudnn_benchmark', default=True, type=utils.str2bool) parser.add_argument('--clip_gradient', type=int, default=20) parser.add_argument('--evaluate', action='store_true') parser.add_argument('-bm', '--basemodel_name', type=str, default='resnet3d50') parser.add_argument('--prefix', type=str, default='') parser.add_argument('--return_metric', type=str, default='top1@abstr') args = parser.parse_args() return args def get_model(model_name, dataset_name, scales=4, basemodel='resnet3d50'): feature_dim = {'resnet3d50': 2048}.get(basemodel, 2048) model_dict = { 'AbstractionEmbeddingModule': { 'func': models.AbstractionEmbeddingModule, 'in_features': feature_dim, 'out_features': feature_dim, 'num_nodes': NUM_NODES[dataset_name], 'embedding_dim': 300, 'bottleneck_dim': 512, 'scales': scales, }, }.get(model_name) model_func = model_dict.pop('func') return model_func(**model_dict) def get_criterion(names=['CE', 'MSE'], cuda=True): criterions = {name: CRITERIONS[name]['func']() for name in names} if cuda: criterions = {name: crit.cuda() for name, crit in criterions.items()} return criterions def get_optimizer(model, optimizer_name, lr=0.001): optim_dict = OPTIMIZERS[optimizer_name] optim_func = optim_dict.pop('func', torch.optim.Adam) optimizer = optim_func(model.parameters(), **{**optim_dict, 'lr': lr}) return optimizer def get_scheduler(optimizer, scheduler_name='CosineAnnealingLR', **kwargs): sched_func = getattr(torch.optim.lr_scheduler, scheduler_name) func_kwargs, _ = utils.split_kwargs_by_func(sched_func, kwargs) sched_kwargs = {**SCHEDULER_DEFAULTS.get(scheduler_name, {}), **func_kwargs} scheduler = sched_func(optimizer, **sched_kwargs) return scheduler def get_logger(args): logger_func = getattr(logger, args.logger_name) logger_dict, _ = utils.split_kwargs_by_func(logger_func, vars(args).copy()) return logger_func(**logger_dict)
32.956522
86
0.650836
775
6,822
5.490323
0.230968
0.06134
0.115864
0.022562
0.191774
0.111633
0.036663
0.021152
0
0
0
0.017627
0.193345
6,822
206
87
33.116505
0.755588
0
0
0.094444
0
0
0.324831
0.163295
0
0
0
0
0
1
0.033333
false
0
0.027778
0
0.094444
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
a0dac9d01fbc63e4052a6ea761aeaa779debac1b
2,021
py
Python
Spider/SpiderLab/lab3/lab3/spiders/spider_msg.py
JimouChen/python-application
b7b16506a17e2c304d1c5fabd6385e96be211c56
[ "Apache-2.0" ]
1
2020-08-09T12:47:27.000Z
2020-08-09T12:47:27.000Z
Spider/SpiderLab/lab3/lab3/spiders/spider_msg.py
JimouChen/Python_Application
b7b16506a17e2c304d1c5fabd6385e96be211c56
[ "Apache-2.0" ]
null
null
null
Spider/SpiderLab/lab3/lab3/spiders/spider_msg.py
JimouChen/Python_Application
b7b16506a17e2c304d1c5fabd6385e96be211c56
[ "Apache-2.0" ]
null
null
null
import scrapy from bs4 import BeautifulSoup from lab3.items import Lab3Item class QuoteSpider(scrapy.Spider): name = 'quotes' start_urls = ['http://quotes.toscrape.com/page/1/'] page_num = 1 # 对爬取到的信息进行解析 def parse(self, response, **kwargs): soup = BeautifulSoup(response.body, 'html.parser') nodes = soup.find_all('div', {'class': 'quote'}) for node in nodes: text = node.find('span', {'class': 'text'}).text author = node.find('small', {'class': 'author'}).text tags = node.find_all('a', {'class': 'tag'}) tags_list = [] for tag in tags: tags_list.append(tag.text) # 接下来找作者链接,进去爬取里面的信息 author_link = 'http://quotes.toscrape.com/' + node.find_all('span')[1].a['href'] # 抛给author_parse进行处理 yield response.follow(author_link, self.author_parse) # print('{0:<4}:{1:<20} said:{2:<20}\n{3}'.format(self.page_num, author, text, tags_list)) item = Lab3Item(author=author, text=text, tags=tags_list) yield item print('=' * 80 + 'page:',self.page_num,'saved successfully!' + '=' * 80) # 下面爬取下一页的链接 try: self.page_num += 1 url = soup.find('li', {'class': 'next'}).a['href'] if url: next_link = 'http://quotes.toscrape.com/' + url yield scrapy.Request(next_link, callback=self.parse) except Exception: print('所有页面信息爬取结束!!!') def author_parse(self, response, **kwargs): soup = BeautifulSoup(response.body, 'html.parser') author_name = soup.find_all('div', {'class': 'author-details'})[0].find('h3').text birthday = soup.find('span').text bio = soup.find('div', {'class': 'author-description'}).text # print('{}: {}\n{}\n{}\n'.format(self.page_num, author_name, birthday, bio)) item = Lab3Item(name=author_name, birthday=birthday, bio=bio) yield item
40.42
102
0.568036
242
2,021
4.644628
0.35124
0.031139
0.039146
0.05605
0.229537
0.11032
0.11032
0.11032
0.11032
0.11032
0
0.016183
0.266205
2,021
49
103
41.244898
0.74174
0.111331
0
0.108108
0
0
0.158189
0
0.027027
0
0
0
0
1
0.054054
false
0
0.081081
0
0.243243
0.054054
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
a0db51a733ae0c8c54da89e34dba10cbd38f7150
1,236
py
Python
Aditya/Parametric_Models/WeiExpLog.py
cipheraxat/Survival-Analysis
fb7ecbe4a61fc72785a4327c86e0f81a58c5b3df
[ "Apache-2.0" ]
7
2020-06-14T20:43:55.000Z
2020-06-23T06:07:08.000Z
Aditya/Parametric_Models/WeiExpLog.py
Abhijit2505/Survival-Analysis
94c0c386aacfe03a9f2f018511236292f36c4ed9
[ "Apache-2.0" ]
14
2020-06-20T06:28:50.000Z
2020-09-08T15:54:29.000Z
Aditya/Parametric_Models/WeiExpLog.py
Abhijit2505/Survival-Analysis
94c0c386aacfe03a9f2f018511236292f36c4ed9
[ "Apache-2.0" ]
9
2020-06-19T03:50:21.000Z
2021-05-10T18:19:26.000Z
import matplotlib.pyplot as plt from lifelines import (WeibullFitter, ExponentialFitter, LogNormalFitter, LogLogisticFitter) import pandas as pd data = pd.read_csv('Dataset/telco_customer.csv') data['tenure'] = pd.to_numeric(data['tenure']) data = data[data['tenure'] > 0] # Replace yes and No in the Churn column to 1 and 0. 1 for the event and 0 for the censured data. data['Churn'] = data['Churn'].apply(lambda x: 1 if x == 'Yes' else 0) fig, axes = plt.subplots(2, 2, figsize=( 16, 12)) T = data['tenure'] E = data['Churn'] wbf = WeibullFitter().fit(T, E, label='WeibullFitter') ef = ExponentialFitter().fit(T, E, label='ExponentialFitter') lnf = LogNormalFitter().fit(T, E, label='LogNormalFitter') llf = LogLogisticFitter().fit(T, E, label='LogLogisticFitter') wbf.plot_cumulative_hazard(ax=axes[0][0]) ef.plot_cumulative_hazard(ax=axes[0][1]) lnf.plot_cumulative_hazard(ax=axes[1][0]) llf.plot_cumulative_hazard(ax=axes[1][1]) plt.suptitle( 'Parametric Model Implementation of cumulative hazard function on the Telco dataset') fig.text(0.5, 0.04, 'Timeline', ha='center') fig.text(0.04, 0.5, 'Probability', va='center', rotation='vertical') plt.savefig('Images/WeiExpLogx.jpeg') plt.show()
34.333333
97
0.711974
186
1,236
4.672043
0.435484
0.09206
0.023015
0.04603
0.124281
0.124281
0
0
0
0
0
0.02881
0.12945
1,236
35
98
35.314286
0.77881
0.076861
0
0
0
0
0.239684
0.042142
0
0
0
0
0
1
0
false
0
0.115385
0
0.115385
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
a0de95c4112c071280835a86de6b15a92fec2e83
2,260
py
Python
spoteno/steps/numbers.py
Z-80/spoteno
5d2ae7da437cfd8f9cf351b9602269c115dcd46f
[ "MIT" ]
2
2020-01-16T10:23:05.000Z
2021-11-17T15:44:29.000Z
spoteno/steps/numbers.py
Z-80/spoteno
5d2ae7da437cfd8f9cf351b9602269c115dcd46f
[ "MIT" ]
null
null
null
spoteno/steps/numbers.py
Z-80/spoteno
5d2ae7da437cfd8f9cf351b9602269c115dcd46f
[ "MIT" ]
2
2021-03-25T12:06:36.000Z
2021-11-17T15:44:30.000Z
import re import num2words INT_PATTERN = re.compile(r'^-?[0-9]+$') FLOAT_PATTERN = re.compile(r'^-?[0-9]+[,\.][0-9]+$') ORDINAL_PATTERN = re.compile(r'^[0-9]+\.?$') NUM_PATTERN = re.compile(r'^-?[0-9]+([,\.][0-9]+$)?') class NumberToWords: def __init__(self, lang_code): self.lang_code = lang_code def run(self, token): float_match = FLOAT_PATTERN.match(token) if float_match is not None: out = [] if token.startswith('-'): out.append('minus') token = token[1:] num_word = num2words.num2words( float(token.replace(',', '.')), lang=self.lang_code ).lower() out.extend(num_word.split(' ')) return out int_match = INT_PATTERN.match(token) if int_match is not None: out = [] if token.startswith('-'): out.append('minus') token = token[1:] num_word = num2words.num2words( int(token.replace(',', '.')), lang=self.lang_code ).lower() out.extend(num_word.split(' ')) return out return [token] class OrdinalNumberToWords: def __init__(self, lang_code): self.lang_code = lang_code def run(self, token): match = ORDINAL_PATTERN.match(token) if match is not None: num_word = num2words.num2words( int(token[:-1]), lang=self.lang_code, to='ordinal' ).lower() return num_word.split(' ') return [token] class SplitNumberSuffix: """ If any of the given strings is directly connected to a number it is separated. "2000%" -> "2000" "%" But not "2000%ff" """ def __init__(self, suffixes): self.suffixes = sorted(suffixes, reverse=True) def run(self, token): for s in self.suffixes: if token.endswith(s): should_be_number = token[:-len(s)] match = NUM_PATTERN.match(should_be_number) if match is not None: return [token[:-len(s)], token[-len(s):]] return [token]
23.541667
61
0.511504
255
2,260
4.364706
0.254902
0.06469
0.075472
0.061096
0.49416
0.465409
0.398922
0.398922
0.361186
0.361186
0
0.023161
0.350442
2,260
95
62
23.789474
0.735014
0.052655
0
0.542373
0
0
0.04354
0.021297
0
0
0
0
0
1
0.101695
false
0
0.033898
0
0.305085
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
a0e444f5e01631d54753ab517309246502cc9089
4,950
py
Python
resources/portfolio_book.py
basgir/bibliotek
42456ced804a2c9570227b393de662847283c76f
[ "MIT" ]
null
null
null
resources/portfolio_book.py
basgir/bibliotek
42456ced804a2c9570227b393de662847283c76f
[ "MIT" ]
null
null
null
resources/portfolio_book.py
basgir/bibliotek
42456ced804a2c9570227b393de662847283c76f
[ "MIT" ]
null
null
null
########################################### # Author : Bastien Girardet, Deborah De Wolff # Date : 13.05.2018 # Course : Applications in Object-oriented Programming and Databases # Teachers : Binswanger Johannes, Zürcher Ruben # Project : Bibliotek # Name : portfolio_book.py Portfolio_book Flask_restful resource # ######################################### from flask_restful import Resource, reqparse from models.portfolio_book import PortfolioBookModel from models.book import BookModel class PortfolioBook(Resource): """PortfolioBook. Resource that helps with dealing with Http request for a portfolio_book provided an id. HTTP GET call : /portfolios/<int:portfolioId>/books HTTP DELETE call : /portfolios/<int:portfolioId>/books """ # we parse the args parser = reqparse.RequestParser() # The parser require some arguments that ifnot fulfilled, return an error parser.add_argument('bookId', type=int, required=True, help="Each relation does have a book id" ) parser.add_argument('portfolioId', type=int, required=True, help="Each relation does have a portfolio id" ) def get(self, portfolioId): """GET request that deals with requests that look for a portfolio book relation given a portfolioId""" # Call the model to find the portfolio book relations that has a specific portfolio Id portfolio_book = PortfolioBookModel.find_by_portfolio_id(portfolioId) # If found if portfolio_book: # We return the list of relations as json return {'Portfolio Book of Portfolio {}'.format(portfolioId): list(map(lambda x: x.json(), portfolio_book))}, 201 else: # If not found we return an error return {'message': 'This portofolio does not exist or does not have any book in the portfolio'}, 404 def delete(self, portfolioId): """DELETE request that deals with the deletion of all relations that belongs to a portfolioId""" # Call the model to find all entries that have a certain portfolioId portfolio_book = PortfolioBookModel.find_by_portfolio_id(portfolioId) # if found if portfolio_book: # we delete portfolio_book.delete_from_db() return {"Portfolio relations deleted"}, 201 else: # Else error return {'message': 'This Portfolio relations does not exist or does not have any book in the portfolio'}, 404 class PortfolioBookList(Resource): """Portfoliobook. Resource that deals with requests that insert new portfolio _ book relations into the database. HTTP GET call : /portoflio/books """ def get(self): """GET request that returns the list of all the portfolio book relations""" # return all as json return {'Portfolio Books': list(map(lambda x: x.json(), PortfolioBookModel.query.all()))},200 class PortfolioBookEdit(Resource): """Book. Resource that helps with dealing with Http request that create or delete portfolio book relations provided a portfolioId and bookId. HTTP POST call : /portfolios/<int:portfolioId>/books/<int:bookId> HTTP DELETE call : /portfolios/<int:portfolioId>/books/<int:bookId> """ def post(self, portfolioId, bookId): """POST request create a portfolio_book relation provided a portfolioId and a bookId""" relation = PortfolioBookModel.does_this_relation_exists(portfolioId, bookId) # Check if the relation already exists if relation: return {"message": "The relation already exists"}, 500 else: try: # Call the model by providing the two arguments relation = PortfolioBookModel(bookId,portfolioId) # Save and commit relation.save_to_db() except: return {"message": "An error occurred inserting the relation portfolio_book. Check whether the book or the portofolio do exist"}, 500 # return the json return relation.json(), 201 def delete(self, portfolioId, bookId): """DELETE request that delete a portfolio_book relation provided a portfolioId and a bookId""" # Fetch the relation relation = PortfolioBookModel.find_by_portfolio_and_book(portfolioId, bookId) # if exists if relation: try: # we delete it relation.delete_from_db() return {'message': 'Relation deleted'} except: return {'message': 'Error while deleting the relation.'} else: # if not found return {'message' : 'Relation not found'}, 404
40.57377
149
0.625051
560
4,950
5.458929
0.271429
0.080798
0.018319
0.036637
0.281322
0.254171
0.24174
0.177298
0.149166
0.149166
0
0.009943
0.288889
4,950
121
150
40.909091
0.858523
0.378788
0
0.339623
0
0
0.196044
0
0
0
0
0
0
1
0.09434
false
0
0.056604
0
0.433962
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
a0e4dae891748b8a01307ae7aac7bc7715d4cc4e
9,199
py
Python
examples/the-feeling-of-success/run_experiments.py
yujialuo/erdos
7a631b55895f1a473b0f4d38a0d6053851e65b5d
[ "Apache-2.0" ]
null
null
null
examples/the-feeling-of-success/run_experiments.py
yujialuo/erdos
7a631b55895f1a473b0f4d38a0d6053851e65b5d
[ "Apache-2.0" ]
null
null
null
examples/the-feeling-of-success/run_experiments.py
yujialuo/erdos
7a631b55895f1a473b0f4d38a0d6053851e65b5d
[ "Apache-2.0" ]
null
null
null
import logging from absl import app from sensor_msgs.msg import Image from insert_table_op import InsertTableOperator from insert_block_op import InsertBlockOperator from init_robot_op import InitRobotOperator from gel_sight_op import GelSightOperator from mock_loc_obj_op import MockLocateObjectOperator from goto_xyz_op import GoToXYZOperator from move_above_object_op import MoveAboveObjectOperator from mock_gripper_op import MockGripperOperator from mock_grasp_object_op import MockGraspObjectOperator from raise_object_op import RaiseObjectOperator from mock_predict_grip_op import MockPredictGripOperator from random_position_op import RandomPositionOperator from mock_ungrasp_object_op import MockUngraspObjectOperator import erdos.graph from erdos.ros.ros_subscriber_op import ROSSubscriberOp logger = logging.getLogger(__name__) table_init_arguments = {"_x": 0.75, "_y": 0.0, "_z": 0.0, "ref_frame": "world"} block_init_arguments = { "_x": 0.4225, "_y": 0.1265, "_z": 0.7725, "ref_frame": "world" } robot_init_arguments = { "joint_angles": { 'right_j0': -0.041662954890248294, 'right_j1': -1.0258291091425074, 'right_j2': 0.0293680414401436, 'right_j3': 2.17518162913313, 'right_j4': -0.06703022873354225, 'right_j5': 0.3968371433926965, 'right_j6': 1.7659649178699421 }, "limb_name": "right" } def construct_graph(graph): logger.info("Starting the construction of the graph.") # First, insert the table in the world. insert_table_op = graph.add( InsertTableOperator, init_args=table_init_arguments) # Now, insert the block in the world. insert_block_op = graph.add( InsertBlockOperator, init_args=block_init_arguments) graph.connect([insert_table_op], [insert_block_op]) # Initialize the robot and move it to the rest position. init_robot_op = graph.add( InitRobotOperator, init_args=robot_init_arguments) graph.connect([insert_block_op], [init_robot_op]) # Initialize the gelsight operators and connect them to the rostopics. gel_sight_topics = [("/gelsightA/image_raw", Image, "gelsightA"), ("/gelsightB/image_raw", Image, "gelsightB")] ros_gel_sight_op = graph.add( ROSSubscriberOp, name='ros_gel_sight', init_args={'ros_topics_type': gel_sight_topics}, setup_args={'ros_topics_type': gel_sight_topics}) gel_sight_a = graph.add( GelSightOperator, name="gelsight-a-op", init_args={'output_name': "gelsight-stream-a"}, setup_args={ 'input_name': "gelsightA", 'output_name': "gelsight-stream-a" }) gel_sight_b = graph.add( GelSightOperator, name="gelsight-b-op", init_args={'output_name': "gelsight-stream-b"}, setup_args={ 'input_name': "gelsightB", 'output_name': "gelsight-stream-b" }) graph.connect([ros_gel_sight_op], [gel_sight_a]) graph.connect([ros_gel_sight_op], [gel_sight_b]) # Retrieve the kinect images from the rostopics and feed them to the # object locator. ros_kinect_topics = [("/kinectA/image_raw", Image, "image-stream"), ("/kinectA/depth_raw", Image, "depth-stream")] ros_kinect_op = graph.add( ROSSubscriberOp, name='ros_kinect', init_args={'ros_topics_type': ros_kinect_topics}, setup_args={'ros_topics_type': ros_kinect_topics}) locate_object_op = graph.add( MockLocateObjectOperator, name='locate-object-op', init_args={ 'image_stream_name': 'image-stream', 'depth_stream_name': 'depth-stream', 'trigger_stream_name': InitRobotOperator.stream_name }, setup_args={ 'image_stream_name': 'image-stream', 'depth_stream_name': 'depth-stream', 'trigger_stream_name': InitRobotOperator.stream_name }) graph.connect([ros_kinect_op, init_robot_op], [locate_object_op]) # Move the Sawyer arm above the detected object. goto_xyz_move_above_op = graph.add( GoToXYZOperator, name='goto-xyz', init_args={ 'limb_name': 'right', 'output_stream_name': 'goto-move-above' }, setup_args={ 'input_stream_name': MoveAboveObjectOperator.goto_stream_name, 'output_stream_name': 'goto-move-above' }) move_above_object_op = graph.add( MoveAboveObjectOperator, name='controller', setup_args={ 'trigger_stream_name': MockLocateObjectOperator.stream_name, 'goto_xyz_stream_name': 'goto-move-above' }) graph.connect([locate_object_op, goto_xyz_move_above_op], [move_above_object_op]) graph.connect([move_above_object_op], [goto_xyz_move_above_op]) # Closes the gripper. gripper_close_op = graph.add( MockGripperOperator, name="gripper-close-op", init_args={ 'gripper_speed': 0.25, 'output_stream_name': 'gripper_close_stream' }, setup_args={ 'gripper_stream': MockGraspObjectOperator.gripper_stream, 'output_stream_name': 'gripper_close_stream' }) grasp_object_op = graph.add( MockGraspObjectOperator, name='mock-grasp-object', setup_args={ 'trigger_stream_name': MoveAboveObjectOperator.stream_name, 'gripper_stream_name': 'gripper_close_stream' }) graph.connect([move_above_object_op, gripper_close_op], [grasp_object_op]) graph.connect([grasp_object_op], [gripper_close_op]) # Raises the object. raise_object_op = graph.add( RaiseObjectOperator, name='raise-object', setup_args={ 'location_stream_name': MockLocateObjectOperator.stream_name, 'trigger_stream_name': MockGraspObjectOperator. action_complete_stream_name }) goto_xyz_raise_op = graph.add( GoToXYZOperator, name="goto-xyz-raise", init_args={ 'limb_name': 'right', 'output_stream_name': 'goto_xyz_raise' }, setup_args={ 'input_stream_name': RaiseObjectOperator.stream_name, 'output_stream_name': 'goto_xyz_raise' }) graph.connect([locate_object_op, grasp_object_op], [raise_object_op]) graph.connect([raise_object_op], [goto_xyz_raise_op]) # Predicts whether the grip was successful using the gelsight cameras. predict_grip_op = graph.add( MockPredictGripOperator, name='predict-grip-op', setup_args={ 'gel_sight_a_stream_name': 'gelsight-stream-a', 'gel_sight_b_stream_name': 'gelsight-stream-b', 'trigger_stream_name': 'goto_xyz_raise' }) graph.connect([gel_sight_a, gel_sight_b, goto_xyz_raise_op], [predict_grip_op]) # If the grip is successful, we return it to a random location. random_position_op = graph.add( RandomPositionOperator, name="random-pos-op", setup_args={ 'locate_object_stream_name': MockLocateObjectOperator.stream_name, 'trigger_stream_name': MockPredictGripOperator.success_stream_name, 'goto_xyz_stream_name': 'goto_random_pos' }) goto_xyz_random_op = graph.add( GoToXYZOperator, name="goto-xyz-random", init_args={ 'limb_name': 'right', 'output_stream_name': 'goto_random_pos' }, setup_args={ 'input_stream_name': RandomPositionOperator.position_stream_name, 'output_stream_name': 'goto_random_pos' }) graph.connect([locate_object_op, predict_grip_op, goto_xyz_random_op], [random_position_op]) graph.connect([random_position_op], [goto_xyz_random_op]) # Now, ungrasp the object. gripper_open_op = graph.add( MockGripperOperator, name="gripper-open-op", init_args={ 'gripper_speed': 0.25, 'output_stream_name': 'gripper_open_stream' }, setup_args={ 'gripper_stream': MockUngraspObjectOperator.gripper_stream, 'output_stream_name': 'gripper_open_stream' }) ungrasp_object_op = graph.add( MockUngraspObjectOperator, name = "ungrasp-object-op", setup_args = { 'trigger_stream_name': RandomPositionOperator.\ action_complete_stream_name, 'gripper_stream_name': 'gripper_open_stream' }) graph.connect([random_position_op, gripper_open_op], [ungrasp_object_op]) graph.connect([ungrasp_object_op], [gripper_open_op]) logger.info("Finished constructing the execution graph!") def main(argv): # Create the graph. graph = erdos.graph.get_current_graph() construct_graph(graph) # Execute the graph. graph.execute("ros") try: while True: pass except KeyboardInterrupt: pass if __name__ == "__main__": app.run(main)
35.245211
79
0.655941
1,044
9,199
5.391762
0.161877
0.079943
0.030201
0.021318
0.416415
0.293658
0.203766
0.107834
0.074436
0.052585
0
0.022318
0.245027
9,199
260
80
35.380769
0.788193
0.060767
0
0.298643
0
0
0.209391
0.008232
0
0
0
0
0
1
0.00905
false
0.00905
0.081448
0
0.090498
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
a0e5feb7c20a84c78be8423f81add0bb2c5c4589
2,686
py
Python
junction/tickets/migrations/0001_initial.py
theSage21/junction
ac713edcf56c41eb3f066da776a0a5d24e55b46a
[ "MIT" ]
192
2015-01-12T06:21:24.000Z
2022-03-10T09:57:37.000Z
junction/tickets/migrations/0001_initial.py
theSage21/junction
ac713edcf56c41eb3f066da776a0a5d24e55b46a
[ "MIT" ]
621
2015-01-01T09:19:17.000Z
2021-05-28T09:27:35.000Z
junction/tickets/migrations/0001_initial.py
theSage21/junction
ac713edcf56c41eb3f066da776a0a5d24e55b46a
[ "MIT" ]
207
2015-01-05T16:39:06.000Z
2022-02-15T13:18:15.000Z
# -*- coding: utf-8 -*- from __future__ import unicode_literals import jsonfield.fields from django.conf import settings from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ] operations = [ migrations.CreateModel( name="Ticket", fields=[ ( "id", models.AutoField( verbose_name="ID", serialize=False, auto_created=True, primary_key=True, ), ), ( "created_at", models.DateTimeField(auto_now_add=True, verbose_name="Created At"), ), ( "modified_at", models.DateTimeField( auto_now=True, verbose_name="Last Modified At" ), ), ("order_no", models.CharField(max_length=255)), ("order_cost", models.FloatField()), ("ticket_no", models.CharField(max_length=255)), ("name", models.CharField(max_length=255)), ("email", models.EmailField(max_length=75)), ("city", models.CharField(max_length=255, null=True, blank=True)), ("zipcode", models.IntegerField(null=True, blank=True)), ("address", models.CharField(max_length=255, null=True, blank=True)), ("status", models.CharField(max_length=255)), ("others", jsonfield.fields.JSONField()), ( "created_by", models.ForeignKey( related_name="created_ticket_set", verbose_name="Created By", blank=True, on_delete=models.deletion.CASCADE, to=settings.AUTH_USER_MODEL, null=True, ), ), ( "modified_by", models.ForeignKey( related_name="updated_ticket_set", verbose_name="Modified By", blank=True, on_delete=models.deletion.CASCADE, to=settings.AUTH_USER_MODEL, null=True, ), ), ], options={"abstract": False}, bases=(models.Model,), ), ]
35.813333
87
0.44341
205
2,686
5.595122
0.370732
0.054926
0.094159
0.125545
0.397559
0.25109
0.200523
0.200523
0.200523
0.123801
0
0.014374
0.456069
2,686
74
88
36.297297
0.770705
0.007818
0
0.294118
0
0
0.078483
0
0
0
0
0
0
1
0
false
0
0.058824
0
0.102941
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
a0e63766143621d523ba6066faa521d14ec9c390
1,300
py
Python
src/bin/calc_stats.py
sw005320/PytorchWaveNetVocoder
b92d7af7d5f2794291e0d462694c0719f75ca469
[ "Apache-2.0" ]
1
2021-01-18T06:22:30.000Z
2021-01-18T06:22:30.000Z
src/bin/calc_stats.py
sw005320/PytorchWaveNetVocoder
b92d7af7d5f2794291e0d462694c0719f75ca469
[ "Apache-2.0" ]
null
null
null
src/bin/calc_stats.py
sw005320/PytorchWaveNetVocoder
b92d7af7d5f2794291e0d462694c0719f75ca469
[ "Apache-2.0" ]
null
null
null
#!/usr/bin/env python # -*- coding: utf-8 -*- # Copyright 2017 Tomoki Hayashi (Nagoya University) # Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0) from __future__ import print_function import argparse import numpy as np from sklearn.preprocessing import StandardScaler from utils import read_hdf5 from utils import read_txt from utils import write_hdf5 def main(): parser = argparse.ArgumentParser() parser.add_argument( "--feats", default=None, required=True, help="name of the list of hdf5 files") parser.add_argument( "--stats", default=None, required=True, help="filename of hdf5 format") args = parser.parse_args() # read list and define scaler filenames = read_txt(args.feats) scaler = StandardScaler() print("number of training utterances =", len(filenames)) # process over all of data for filename in filenames: feat = read_hdf5(filename, "/feat_org") scaler.partial_fit(feat[:, 1:]) # add uv term mean = np.zeros((feat.shape[1])) scale = np.ones((feat.shape[1])) mean[1:] = scaler.mean_ scale[1:] = scaler.scale_ # write to hdf5 write_hdf5(args.stats, "/mean", mean) write_hdf5(args.stats, "/scale", scale) if __name__ == "__main__": main()
24.074074
60
0.665385
174
1,300
4.816092
0.5
0.03222
0.053699
0.045346
0.064439
0
0
0
0
0
0
0.021505
0.213077
1,300
53
61
24.528302
0.797654
0.176154
0
0.066667
0
0
0.118532
0
0
0
0
0
0
1
0.033333
false
0
0.233333
0
0.266667
0.066667
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
a0e9174ff5dee90055733752e0b8cd4f3423f64e
1,654
py
Python
SoftUni-Python-Programming-Course/Exam-Preparation/medicines_in_carton.py
vladislav-karamfilov/Python-Playground
ed83a693d37ff0c1565ece49d2a5d9ecd32c9aac
[ "MIT" ]
1
2019-04-07T23:10:27.000Z
2019-04-07T23:10:27.000Z
SoftUni-Python-Programming-Course/Exam-Preparation/medicines_in_carton.py
vladislav-karamfilov/Python-Playground
ed83a693d37ff0c1565ece49d2a5d9ecd32c9aac
[ "MIT" ]
null
null
null
SoftUni-Python-Programming-Course/Exam-Preparation/medicines_in_carton.py
vladislav-karamfilov/Python-Playground
ed83a693d37ff0c1565ece49d2a5d9ecd32c9aac
[ "MIT" ]
null
null
null
# Problem description: http://python3.softuni.bg/student/lecture/assignment/56b749af7e4f59b649b7e626/ class Medicine: def __init__(self, name, w, h, d): self.name = name self.w = w self.h = h self.d = d def can_be_put_in_carton(self, carton_w, carton_h, carton_d): sorted_medicine_dimensions = sorted([self.w, self.h, self.d]) sorted_carton_dimensions = sorted([carton_w, carton_h, carton_d]) return all(sorted_medicine_dimensions[d] <= sorted_carton_dimensions[d] for d in range(3)) def read_medicines(medicines_file_path): result = [] with open(medicines_file_path, encoding='utf-8') as f: for line in f: if line: medicine_info = line.split(',') medicine_name = ''.join(medicine_info[:-3]) medicine_w = float(medicine_info[-3]) medicine_h = float(medicine_info[-2]) medicine_d = float(medicine_info[-1]) result.append(Medicine(medicine_name, medicine_w, medicine_h, medicine_d)) return result def main(): try: carton_w = float(input()) carton_h = float(input()) carton_d = float(input()) medicines_file_path = input() medicines = read_medicines(medicines_file_path) except: print('INVALID INPUT') return medicines_that_can_be_put_in_carton = \ [medicine for medicine in medicines if medicine.can_be_put_in_carton(carton_w, carton_h, carton_d)] for medicine in medicines_that_can_be_put_in_carton: print(medicine.name) if __name__ == '__main__': main()
29.535714
107
0.638452
215
1,654
4.562791
0.274419
0.061162
0.03262
0.040775
0.217125
0.123344
0.059123
0
0
0
0
0.01876
0.258767
1,654
55
108
30.072727
0.781403
0.059855
0
0
0
0
0.017386
0
0
0
0
0
0
1
0.105263
false
0
0
0
0.210526
0.052632
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
a0e9bc2b96c3d8a0da5092d2ce1abf89a56a046d
858
py
Python
circuitpy_examples/week1/04_ramp_LED_brightness.py
WSU-Physics/phys150
043ebf8212b56a988ef8e41a4464400bec5a7dc1
[ "MIT" ]
null
null
null
circuitpy_examples/week1/04_ramp_LED_brightness.py
WSU-Physics/phys150
043ebf8212b56a988ef8e41a4464400bec5a7dc1
[ "MIT" ]
null
null
null
circuitpy_examples/week1/04_ramp_LED_brightness.py
WSU-Physics/phys150
043ebf8212b56a988ef8e41a4464400bec5a7dc1
[ "MIT" ]
null
null
null
# Adam Beardsley # starting from from adafruit example # https://learn.adafruit.com/welcome-to-circuitpython/creating-and-editing-code # import board import digitalio import time led = digitalio.DigitalInOut(board.LED) led.direction = digitalio.Direction.OUTPUT ramp_time = 3 # Time to ramp up, in seconds period = 0.01 # Time per cycle, in seconds step = period / ramp_time # how much to increment the brightness each cycle while True: brightness = 0 # Start off while brightness < 1: T_on = brightness * period T_off = period - T_on led.value = True time.sleep(T_on) led.value = False time.sleep(T_off) brightness += step # Convince yourself the expression for step (line 14) is correct # How can you *test* that step is correct? # Can you reverse the program (start bright, get dim)
28.6
79
0.698135
124
858
4.774194
0.548387
0.015203
0.02027
0.037162
0
0
0
0
0
0
0
0.012012
0.223776
858
29
80
29.586207
0.876877
0.462704
0
0
0
0
0
0
0
0
0
0
0
1
0
false
0
0.166667
0
0.166667
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
a0ead277852aac4f9b24d58dbb1630e69b9f9cac
1,099
py
Python
__main__.py
Makeeyaf/SiteChecker
969bdedd2d5df36220ff9fcc41e44cf1db0cca00
[ "MIT" ]
1
2021-01-06T01:45:41.000Z
2021-01-06T01:45:41.000Z
__main__.py
Makeeyaf/SiteChecker
969bdedd2d5df36220ff9fcc41e44cf1db0cca00
[ "MIT" ]
2
2021-01-03T13:25:39.000Z
2021-01-03T15:57:01.000Z
__main__.py
Makeeyaf/SiteChecker
969bdedd2d5df36220ff9fcc41e44cf1db0cca00
[ "MIT" ]
null
null
null
import argparse from site_checker import SiteChecker if __name__ == "__main__": parser = argparse.ArgumentParser(description="Check sites text.") parser.add_argument("config", type=str, nargs=1, help="Path to config json file.") parser.add_argument( "-a", dest="apiKey", type=str, nargs=1, required=True, help="Pushbullet API key.", ) parser.add_argument( "-m", dest="maxFailCount", type=int, nargs=1, help="Max fail count." ) parser.add_argument( "-u", dest="updateCycle", type=int, nargs=1, help="Update cycle in second" ) parser.add_argument( "-v", dest="isVerbose", action="store_true", help="Verbose mode." ) parser.add_argument( "-q", dest="isQuiet", action="store_true", help="Quiet mode. Does not call pushbullet", ) args = parser.parse_args() k = SiteChecker( args.config[0], args.apiKey[0], args.isQuiet, args.isVerbose, args.maxFailCount, args.updateCycle, ) k.check()
26.166667
86
0.586897
126
1,099
4.97619
0.5
0.086124
0.162679
0.041467
0.054226
0
0
0
0
0
0
0.007538
0.275705
1,099
41
87
26.804878
0.780151
0
0
0.131579
0
0
0.214741
0
0
0
0
0
0
1
0
false
0
0.052632
0
0.052632
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
a0eb34e703fb20df0982cbdc1702ff56c69d7bb6
1,563
py
Python
autop-listener/autop-listener.py
yuriel-v/ansible
f6e8fcb1edfbef550da2fe217cfd84941523f692
[ "MIT" ]
null
null
null
autop-listener/autop-listener.py
yuriel-v/ansible
f6e8fcb1edfbef550da2fe217cfd84941523f692
[ "MIT" ]
null
null
null
autop-listener/autop-listener.py
yuriel-v/ansible
f6e8fcb1edfbef550da2fe217cfd84941523f692
[ "MIT" ]
null
null
null
import os from pathlib import Path from datetime import datetime from json import dumps import flask as fsk from flask import request, jsonify, Response app = fsk.Flask(__name__) app.config['DEBUG'] = False homedir = os.getenv('HOME') @app.route('/provision', methods=['POST']) def auto_provision(): Path(f'{homedir}/log/ansible').mkdir(parents=True, exist_ok=True) req = request.get_json() try: vm_type = req.pop('type') vm_ip = req.pop('ip') if not isinstance(req['extras'], dict): raise Exception("Invalid extras element type") except Exception: return Response('{"response": "Wrongly formatted request"}', 400) req['extras']['global_vm_shortname'] = req['extras'].pop('desc') req['extras']['global_vm_hostname'] = req['extras'].pop('name') extra_vars = str(dumps(req['extras'])).replace('"', '\\"') ansible_command = "tmux send-keys -t autopshell " ansible_command += f"'ansible-playbook {homedir}/ansible/global.yml -i {vm_ip}, --tags \"{vm_type}\" --extra-vars \"{extra_vars}\" " ansible_command += f"| tee {homedir}/log/ansible/{req['extras']['global_vm_hostname']}-{datetime.now().isoformat()}.log' C-m" os.system(ansible_command) return jsonify({'response': 'Ansible command fired'}) @app.route('/getkey', methods=['GET']) def get_public_key(): with open(f'{homedir}/.ssh/ansible/id_ansible.pub', 'r') as pkfile: return jsonify({'publickey': pkfile.readline().rstrip()}) if __name__ == "__main__": app.run(host='0.0.0.0', port=4960)
32.5625
136
0.658989
209
1,563
4.76555
0.483254
0.063253
0.045181
0.051205
0.050201
0
0
0
0
0
0
0.008346
0.15675
1,563
47
137
33.255319
0.747344
0
0
0
0
0.029412
0.332266
0.114597
0
0
0
0
0
1
0.058824
false
0
0.176471
0
0.323529
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
a0ee65cec9b822e4705a0e2c457a3bbab820bf6b
1,314
py
Python
cryptographyMachine/cryptographyMachine.py
anuranjan08/CryptoMachine
5a1d68adbe88708f21902d1d44a636c043f6ed28
[ "MIT" ]
null
null
null
cryptographyMachine/cryptographyMachine.py
anuranjan08/CryptoMachine
5a1d68adbe88708f21902d1d44a636c043f6ed28
[ "MIT" ]
null
null
null
cryptographyMachine/cryptographyMachine.py
anuranjan08/CryptoMachine
5a1d68adbe88708f21902d1d44a636c043f6ed28
[ "MIT" ]
null
null
null
def machine(): keys='abcdefghijklmnopqrstuvwxyz !' values=keys[-1]+keys[0:-1] """ In encrytpDict: In decryptDict: keys Values keys Values 'a' '!' '!' 'a' 'b' 'a' 'a' 'b' . . . . . . . . . . . . """ encryptDict=dict(zip(keys,values)) decryptDict=dict(zip(values,keys)) """ Asking user for the user input and the mode. """ message=input("Please enter your secret message") mode=input("Please enter your mode: Encode(E) or Decode(D)") """ if the mode is encryption(E)/decryption(D): We will create a listin which we run a dictionary comprehension and if that particular letter is there in encrytion/decryption dictionary , we will fetch the value of that letter and we will append that to list.Similary for other letters in the message. """ if mode.upper()=='E': newMessage=''.join([encryptDict[letter] for letter in message.lower()]) elif mode.upper()=='D': newMessage=''.join([decryptDict[letter] for letter in message.lower()]) else: print("Please enter a correct choice") return newMessage print(machine())
27.375
89
0.547945
150
1,314
4.8
0.453333
0.041667
0.008333
0.055556
0.080556
0.080556
0
0
0
0
0
0.003429
0.334094
1,314
47
90
27.957447
0.819429
0
0
0
0
0
0.220257
0.041801
0
0
0
0
0
1
0.066667
false
0
0
0
0.133333
0.133333
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
a0ee8d887762a2061e866ff6d3e72e86639288e1
645
py
Python
tests/test_ioeeg_abf.py
wonambi-python/wonambi
4e2834cdd799576d1a231ecb48dfe4da1364fe3a
[ "BSD-3-Clause" ]
63
2017-12-30T08:11:17.000Z
2022-01-28T10:34:20.000Z
tests/test_ioeeg_abf.py
wonambi-python/wonambi
4e2834cdd799576d1a231ecb48dfe4da1364fe3a
[ "BSD-3-Clause" ]
23
2017-09-08T08:29:49.000Z
2022-03-17T08:19:13.000Z
tests/test_ioeeg_abf.py
wonambi-python/wonambi
4e2834cdd799576d1a231ecb48dfe4da1364fe3a
[ "BSD-3-Clause" ]
12
2017-09-18T12:48:36.000Z
2021-09-22T07:16:07.000Z
from numpy import isnan from wonambi import Dataset from .paths import axon_abf_file d = Dataset(axon_abf_file) def test_abf_read(): assert len(d.header['chan_name']) == 1 assert d.header['start_time'].minute == 47 data = d.read_data(begtime=1, endtime=2) assert data.data[0][0, 0] == 2.1972655922581912 markers = d.read_markers() assert len(markers) == 0 def test_abf_boundary(): data = d.read_data(begsam=-10, endsam=5) assert isnan(data.data[0][0, :10]).all() n_smp = d.header['n_samples'] data = d.read_data(begsam=n_smp - 2, endsam=n_smp + 10) assert isnan(data.data[0][0, 2:]).all()
21.5
59
0.662016
107
645
3.82243
0.373832
0.0489
0.066015
0.095355
0.195599
0.102689
0
0
0
0
0
0.074286
0.186047
645
29
60
22.241379
0.704762
0
0
0
0
0
0.043411
0
0
0
0
0
0.352941
1
0.117647
false
0
0.176471
0
0.294118
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
a0f3c7164fd5d0e03360ed4d29df99912a368e12
915
py
Python
day02/day02.py
pogross/adventofcode2021
33fc177d30e1104a6203e435f83594c4d3774cdb
[ "MIT" ]
null
null
null
day02/day02.py
pogross/adventofcode2021
33fc177d30e1104a6203e435f83594c4d3774cdb
[ "MIT" ]
null
null
null
day02/day02.py
pogross/adventofcode2021
33fc177d30e1104a6203e435f83594c4d3774cdb
[ "MIT" ]
null
null
null
def execute_command(command: str) -> (int): direction, magnitude = command.split(" ") horizontal, depth = 0, 0 if direction == "forward": horizontal += int(magnitude) elif direction == "up": depth -= int(magnitude) elif direction == "down": depth += int(magnitude) return horizontal, depth def chain_commands(commands: list[str]) -> (int): horizontal, depth = 0, 0 for command in commands: horizontal_change, depth_change = execute_command(command) horizontal += horizontal_change depth += depth_change return horizontal, depth if __name__ == "__main__": with open("input.txt") as f: raw = f.read() commands = [x for x in raw.split("\n")] horizontal, depth = chain_commands(commands) print(f"First answer is {horizontal*depth}") # print(f"Second answer is {count_increasing(measurements, 3)}")
26.911765
68
0.636066
107
915
5.280374
0.411215
0.159292
0.074336
0.060177
0
0
0
0
0
0
0
0.007184
0.239344
915
33
69
27.727273
0.804598
0.06776
0
0.173913
0
0
0.078731
0
0
0
0
0
0
1
0.086957
false
0
0
0
0.173913
0.043478
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
a0fccc7e51abcecde4662d4c35aa618544e6087c
7,500
py
Python
Perceptual Hash -Asher/ex1/example_solution.py
kidist-amde/image-search-engine
467d022f7248a74822dd9ae938b5b86333ce417a
[ "MIT" ]
null
null
null
Perceptual Hash -Asher/ex1/example_solution.py
kidist-amde/image-search-engine
467d022f7248a74822dd9ae938b5b86333ce417a
[ "MIT" ]
null
null
null
Perceptual Hash -Asher/ex1/example_solution.py
kidist-amde/image-search-engine
467d022f7248a74822dd9ae938b5b86333ce417a
[ "MIT" ]
null
null
null
import os import cv2 from sklearn.cluster import KMeans, DBSCAN, MiniBatchKMeans from scipy import spatial from sklearn.preprocessing import StandardScaler import numpy as np from tqdm import tqdm import argparse parser = argparse.ArgumentParser(description='Challenge presentation example') parser.add_argument('--data_path', '-d', type=str, default='dataset', help='Dataset path') parser.add_argument('--output_dim', '-o', type=int, default=20, help='Descriptor length') parser.add_argument('--save_dir', '-s', type=str, default=None, help='Save or not gallery/query feats') parser.add_argument('--random', '-r', action='store_true', help='Random run') args = parser.parse_args() class Dataset(object): def __init__(self, data_path): self.data_path = data_path assert os.path.exists(self.data_path), 'Insert a valid path!' self.data_classes = os.listdir(self.data_path) self.data_mapping = {} for c, c_name in enumerate(self.data_classes): temp_path = os.path.join(self.data_path, c_name) temp_images = os.listdir(temp_path) for i in temp_images: img_tmp = os.path.join(temp_path, i) if img_tmp.endswith('.jpg'): if c_name == 'distractor': self.data_mapping[img_tmp] = -1 else: self.data_mapping[img_tmp] = int(c_name) print('Loaded {:d} from {:s} images'.format(len(self.data_mapping.keys()), self.data_path)) def get_data_paths(self): images = [] classes = [] for img_path in self.data_mapping.keys(): if img_path.endswith('.jpg'): images.append(img_path) classes.append(self.data_mapping[img_path]) return images, np.array(classes) def num_classes(self): return len(self.data_classes) class FeatureExtractor(object): def __init__(self, feature_extractor, model, out_dim=20, scale=None, subsample=100): self.feature_extractor = feature_extractor self.model = model self.scale = scale self.subsample = subsample def get_descriptor(self, img_path): img = cv2.imread(img_path) if self.gray: img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) kp, descs = self.feature_extractor.detectAndCompute(img, None) return descs def fit_model(self, data_list): training_feats = [] # we extact SIFT descriptors for img_path in tqdm(data_list, desc='Fit extraction'): descs = self.get_descriptor(img_path) if descs is None: continue if self.subsample: # TODO: change here sub_idx = np.random.choice(np.arange(descs.shape[0]), self.subsample) descs = descs[sub_idx, :] training_feats.append(descs) training_feats = np.concatenate(training_feats) print('--> Model trained on {} features'.format(training_feats.shape)) # we fit the model self.model.fit(training_feats) print('--> Model fitted') def fit_scaler(self, data_list): features = self.extract_features(data_list) print('--> Scale trained on {}'.format(features.shape)) self.scale.fit(features) print('--> Scale fitted') def extract_features(self, data_list): # we init features features = np.zeros((len(data_list), self.model.n_clusters)) for i, img_path in enumerate(tqdm(data_list, desc='Extraction')): # get descriptor descs = self.get_descriptor(img_path) # 2220x128 descs preds = self.model.predict(descs) histo, _ = np.histogram(preds, bins=np.arange(self.model.n_clusters+1), density=True) # append histogram features[i, :] = histo return features def scale_features(self, features): # we return the normalized features return self.scale.transform(features) def topk_accuracy(gt_label, matched_label, k=1): matched_label = matched_label[:, :k] total = matched_label.shape[0] correct = 0 for q_idx, q_lbl in enumerate(gt_label): correct+= np.any(q_lbl == matched_label[q_idx, :]).item() acc_tmp = correct/total return acc_tmp def main(): data_path = 'C:/Users/21032/Desktop/dataset' # we define training dataset training_path = os.path.join(data_path, 'training') # we define validation dataset validation_path = os.path.join(data_path, 'validation') gallery_path = os.path.join(validation_path, 'gallery') query_path = os.path.join(validation_path, 'query') training_dataset = Dataset(data_path=training_path) gallery_dataset = Dataset(data_path=gallery_path) query_dataset = Dataset(data_path=query_path) # get training data and classes training_paths, training_classes = training_dataset.get_data_paths() # we get validation gallery and query data gallery_paths, gallery_classes = gallery_dataset.get_data_paths() query_paths, query_classes = query_dataset.get_data_paths() if not args.random: feature_extractor = cv2.SIFT_create() # we define model for clustering model = KMeans(n_clusters=args.output_dim, n_init=10, max_iter=5000, verbose=False) # model = MiniBatchKMeans(n_clusters=args.output_dim, random_state=0, batch_size=100, max_iter=100, verbose=False) scale = StandardScaler() # we define the feature extractor providing the model extractor = FeatureExtractor(feature_extractor=feature_extractor, model=model, scale=scale, out_dim=args.output_dim) # we fit the KMeans clustering model extractor.fit_model(training_paths) extractor.fit_scaler(training_paths) # now we can use features # we get query features query_features = extractor.extract_features(query_paths) query_features = extractor.scale_features(query_features) # we get gallery features gallery_features = extractor.extract_features(gallery_paths) gallery_features = extractor.scale_features(gallery_features) print(gallery_features.shape, query_features.shape) pairwise_dist = spatial.distance.cdist(query_features, gallery_features, 'minkowski', p=2.) print('--> Computed distances and got c-dist {}'.format(pairwise_dist.shape)) indices = np.argsort(pairwise_dist, axis=-1) else: indices = np.random.randint(len(gallery_paths), size=(len(query_paths), len(gallery_paths))) gallery_matches = gallery_classes[indices] print('########## RESULTS ##########') for k in [1, 3, 10]: topk_acc = topk_accuracy(query_classes, gallery_matches, k) print('--> Top-{:d} Accuracy: {:.3f}'.format(k, topk_acc)) if __name__ == '__main__': main()
34.246575
122
0.608133
878
7,500
4.973804
0.238041
0.032975
0.016487
0.016029
0.065033
0.03618
0
0
0
0
0
0.009601
0.291733
7,500
218
123
34.40367
0.8125
0.078267
0
0.040816
0
0
0.075149
0.004352
0
0
0
0.004587
0.006803
1
0.07483
false
0
0.054422
0.013605
0.183673
0.061224
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
a0fd2af6803ffa9be2e8f4bfae48a6a7e68eb4ea
179,927
py
Python
cyberradiodriver/CyberRadioDriver/radio.py
CyberRadio/CyberRadioDriver
44e6fc0e805981981514e6edc18d11d5fa33e659
[ "MIT" ]
null
null
null
cyberradiodriver/CyberRadioDriver/radio.py
CyberRadio/CyberRadioDriver
44e6fc0e805981981514e6edc18d11d5fa33e659
[ "MIT" ]
null
null
null
cyberradiodriver/CyberRadioDriver/radio.py
CyberRadio/CyberRadioDriver
44e6fc0e805981981514e6edc18d11d5fa33e659
[ "MIT" ]
null
null
null
#!/usr/bin/env python ############################################################### # \package CyberRadioDriver.radio # # \brief Defines basic functionality for radio handler objects. # # \note This module defines basic behavior only. To customize # a radio handler class for a particular radio, derive a new # class from the appropriate base class. It is recommended # that behavior specific to a given radio be placed in the # module that supports that radio. # # \author NH # \author DA # \author MN # \copyright Copyright (c) 2014-2021 CyberRadio Solutions, Inc. # All rights reserved. # ############################################################### # Imports from other modules in this package from . import command from . import components from . import configKeys from . import log from . import transport # Imports from external modules # Python standard library imports import ast import copy import datetime import json import math import sys import time import traceback import threading ## # \internal # \brief Returns the MAC address and IP address for a given Ethernet interface. # # \param ifname The name of t# \author DA # \param ifname The Ethernet system interface ("eth0", for example). # \returns A 2-tuple: (MAC Address, IP Address). def getInterfaceAddresses(ifname): import socket,fcntl,struct s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) info = fcntl.ioctl(s.fileno(), 0x8927, struct.pack('256s', ifname[:15])) mac = ''.join(['%02x:' % ord(char) for char in info[18:24]])[:-1] ip = socket.inet_ntoa(fcntl.ioctl( s.fileno(), 0x8915, # SIOCGIFADDR struct.pack('256s', ifname[:15]) )[20:24]) return mac,ip ## # \internal # \brief VITA 49 interface specification class. # # The _ifSpec class describes how the VITA 49 interface is set up for # a particular radio. Each radio should have its own interface # specification, implemented as a subclass of _ifSpec. # # Radio handler classes need to set static member "ifSpec" to the interface # specification class that the radio uses. class _ifSpec(): ## Whether Vita 49.1 is used vita49_1 = True ## Whether Vita 49.0 is used vita49_0 = True ## Size of the VITA 49 header, in 32-byte words headerSizeWords = 0 ## Size of the payload, in 32-byte words payloadSizeWords = 0 ## Size of the VITA 49 "tail", in 32-byte words tailSizeWords = 0 ## Byte order used by the radio. byteOrder = "little" ## Whether the I/Q data in the payload are swapped iqSwapped = False @classmethod def getFrameInfoDict(cls, self): return { "headerWords": cls.headerSizeWords, "payloadWords": cls.payloadSizeWords, "tailWords": cls.tailSizeWords, "frameSize": (cls.headerSizeWords+cls.payloadSizeWords+cls.tailSizeWords)*4, "v49.1": cls.vita49_1, "v49.0": cls.vita49_0, "byteSwap": cls.byteOrder!=sys.byteorder, "iqSwap": cls.iqSwapped, } #-- Radio Handler Objects ---------------------------------------------# ## # \brief Base radio handler class. # # This class implements the CyberRadioDriver.IRadio interface. # # To add a supported radio to this driver, derive a class from # _radio and change the static members of the new class to describe the # capabilities of that particular radio. Each supported radio should # have its own module under the CyberRadioDriver.radios package tree. # # A radio handler object maintains a series of component objects, one # per component of each type (tuner, WBDDC, NBDDC, etc.). Each component # object is responsible for managing the hardware object that it represents. # Each component object is also responsible for querying the component's # current configuration and for maintaining the object's configuration # as it changes during radio operation. # # A radio handler object also maintains its own configuration, for settings # that occur at the radio level and are not managed by a component object. # # \note Several static members of this class have no function within the # code, but instead help CyberRadioDriver.getRadioObjectDocstring() generate # appropriate documentation for derived radio handler classes. # # \implements CyberRadioDriver::IRadio class _radio(log._logger, configKeys.Configurable): _name = "NDRgeneric" ## \brief Radio uses JSON command/response interface? json = False ## \brief VITA 49 interface specification class name (see _ifSpec class). ifSpec = _ifSpec ## \brief Dictionary of VITA 49 interface specification classes, keyed by # payload type strings, for those radios that support more than one VITA # packet format. ifSpecMap = {} ## \brief Analog-to-digital Converter clock rate adcRate = 1.0 # Tuner settings ## \brief Number of tuners numTuner = 0 ## \brief Number of tuner boards numTunerBoards = 1 ## \brief Tuner index base (what number indices start at) tunerIndexBase = 0 ## \brief Tuner component type tunerType = components._tuner ## \brief Tuner index overrides. Used for radios with # WBDDC settings ## \brief Number of WBDDCs available numWbddc = numTuner ## \brief WBDDC index base (what number indices start at) wbddcIndexBase = 1 ## \brief WBDDC component type wbddcType = components._wbddc # NBDDC settings ## \brief Number of NBDDCs numNbddc = 0 ## \brief NBDDC index base (what number indices start at) nbddcIndexBase = 1 ## \brief NBDDC component type nbddcType = components._nbddc ## \brief NBDDC index list override. This is a list of discrete indices # for radios where the indices are a subset of the full index list. # This should be set to None otherwise. nbddcIndexOverride = None # FFT Processor Settings ## \brief Number of FFT Channels numFftStream = 0 ## \brief FFT stream index base (what number indices start at) fftStreamIndexBase = 0 ## \brief FFT stream component type fftStreamType = None # Transmitter settings ## \brief Number of transmitters numTxs = 0 ## \brief Transmitter index base (what number indices start at) txIndexBase = 1 ## \brief Transmitter component type txType = None # WBDUC Settings ## \brief Number of WBDUC numWbduc = 0 ## \brief WBDUC index base (what number indices start at) wbducIndexBase = 1 ## \brief WBDUC component type wbducType = None # NBDUC Settings ## \brief Number of NBDUC numNbduc = 0 ## \brief NBDUC index base (what number indices start at) nbducIndexBase = 1 ## \brief NBDUC component type nbducType = None # WBDDC Group settings ## \brief Number of WBDDC groups available numWbddcGroups = 0 ## \brief WBDDC group index base (what number indices start at) wbddcGroupIndexBase = 1 ## \brief WBDDC Group component type wbddcGroupType = None # NBDDC Group settings ## \brief Number of NBDDC groups available numNbddcGroups = 0 ## \brief NBDDC group index base (what number indices start at) nbddcGroupIndexBase = 1 ## \brief NBDDC Group component type nbddcGroupType = None # Combined DDC Group settings ## \brief Number of combined DDC groups available numCddcGroups = 0 ## \brief Combined DDC group index base (what number indices start at) cddcGroupIndexBase = 1 ## \brief Combined DDC Group component type cddcGroupType = None # WBDUC Group settings ## \brief Number of WBDUC groups available numWbducGroups = 0 ## \brief WBDUC group index base (what number indices start at) wbducGroupIndexBase = 1 ## \brief WBDUC Group component type wbducGroupType = None # Tuner Group settings ## \brief Number of tuner groups available numTunerGroups = 0 ## \brief Tuner group index base (what number indices start at) tunerGroupIndexBase = 1 ## \brief Tuner Group component type tunerGroupType = None # UDP destination information ## \brief What the UDP destination setting represents for this radio udpDestInfo = "" ## \brief Number of Gigabit Ethernet ports numGigE = 0 ## \brief Gigabit Ethernet port index base (what number indices start at) gigEIndexBase = 1 ## \brief Number of destination IP table entries for each Gigabit Ethernet port numGigEDipEntries = 0 ## \brief Gigabit Ethernet destination IP table index base (what number indices start at) gigEDipEntryIndexBase = 0 # Supported command set. Each member listed here is either a # command class (one derived from command._commandBase) or None # if the command is not supported for a given radio. ## \brief Command: Identity query idnQry = command.idn ## \brief Command: Version query verQry = command.ver ## Command: Hardware revision query hrevQry = command.hrev ## \brief Command: Status query statQry = command.stat ## \brief Command: Tuner status query tstatQry = command.tstat ## \brief Command: Time adjustment set/query tadjCmd = None ## \brief Command: Reset resetCmd = command.reset ## \brief Command: Configuration mode set/query cfgCmd = command.cfg ## \brief Command: Pulse-per-second (PPS) set/query ppsCmd = None ## \brief Command: UTC time set/query utcCmd = None ## \brief Command: Reference mode set/query refCmd = command.ref ## \brief Command: Reference bypass mode set/query rbypCmd = None ## \brief Command: Source IP address set/query sipCmd = command.sip ## \brief Command: Destination IP address set/query dipCmd = command.dip ## \brief Command: Source MAC address set/query # # \note Most radios support \e querying the source MAC address, but few # support \e setting it. smacCmd = command.smac ## \brief Command: Destination MAC address set/query dmacCmd = command.dmac ## \brief Command: Calibration frequency set/query calfCmd = None ## \brief Command: Narrowband source selection set/query nbssCmd = None ## \brief Command: Frequency normalization mode set/query fnrCmd = None ## \brief Command: GPS receiver enable set/query gpsCmd = None ## \brief Command: GPS position query gposCmd = None ## \brief Command: Reference tuning voltage set/query rtvCmd = None ## \brief Command: Radio temperature query tempCmd = None ## \brief Command: GPIO output (static) set/query gpioStaticCmd = None ## \brief Command: GPIO output (sequence) set/query gpioSeqCmd = None ## \brief Command: Gigabit Ethernet interface flow control set/query tgfcCmd = None ## \brief Coherent tuning command cohTuneCmd = None ## \brief FPGA state selection command fpgaStateCmd = None ## \brief Radio function (mode) selection command funCmd = None ## \brief Radio Cntrl command cntrlCmd = None # Mode settings ## \brief Supported reference modes refModes = {} ## \brief Supported reference bypass modes rbypModes = {} ## \brief Supported VITA 49 enabling options vitaEnableOptions = {} ## \brief Supported connection modes connectionModes = ["tty"] ## \brief Default baud rate (has no effect if radio does not use TTY) defaultBaudrate = 921600 ## \brief Default port number (has no effect if radio does not use network connections) defaultPort = 8617 ## \brief Default timeout for communications over the radio transport defaultTimeout = transport.radio_transport.defaultTimeout ## \brief Does this radio support setting the tuner bandwidth? tunerBandwithSettable = False ## \brief Tuner bandwidth (Hz) for radios that do not support setting it tunerBandwidthConstant = 40e6 ## # \brief The list of valid configuration keywords supported by this # object. Override in derived classes as needed. validConfigurationKeywords = [configKeys.CONFIG_MODE, configKeys.REFERENCE_MODE, configKeys.BYPASS_MODE, configKeys.CALIB_FREQUENCY, configKeys.FNR_MODE, configKeys.GPS_ENABLE, configKeys.REF_TUNING_VOLT, configKeys.GIGE_FLOW_CONTROL, ] ## \brief Default "set time" value setTimeDefault = False ## # \brief Constructs a radio handler object. # # \copydetails CyberRadioDriver::IRadio::\_\_init\_\_() def __init__(self, *args, **kwargs): self._setConfigLock = threading.RLock() # Set up configuration capability configKeys.Configurable.__init__(self) # Consume keyword arguments "verbose" and "logFile" for logging support log._logger.__init__(self, *args, **kwargs) # Now consume our own self.setTime = kwargs.get("setTime",self.setTimeDefault) self.logCtrl = kwargs.get("logCtrl",None) self.transportTimeout = kwargs.get("timeout", None) self.clientId = kwargs.get("clientId", None) if self.transportTimeout is None: self.transportTimeout = self.defaultTimeout self.name = "%s%s"%(self._name,"-%s"%kwargs.get("name") if "name" in kwargs else "",) self.logIfVerbose("Verbose mode!") # Communication transport in use self.transport = None self.tunerDict = {} self.wbddcDict = {} self.nbddcDict = {} self.fftStreamDict = {} self.txDict = {} self.wbducDict = {} self.nbducDict = {} self.wbddcGroupDict = {} self.nbddcGroupDict = {} self.cddcGroupDict = {} self.wbducGroupDict = {} self.tunerGroupDict = {} self.componentList = [] # Little hack to ensure numWbddc is always set (we didn't always have this attribute). if self.numWbddc is None: self.numWbddc = self.numTuner # Form the actual index lists for the different components. Now that certain components # have discrete index values rather than a full sequence, we need to store these for # later use. self.tunerIndexList = list(range(self.tunerIndexBase, self.tunerIndexBase + self.numTuner)) self.wbddcIndexList = list(range(self.wbddcIndexBase, self.wbddcIndexBase + self.numWbddc)) self.nbddcIndexList = self.nbddcIndexOverride if self.nbddcIndexOverride is not None else \ list(range(self.nbddcIndexBase, self.nbddcIndexBase + self.numNbddc)) self.fftStreamIndexList = list(range(self.fftStreamIndexBase, self.fftStreamIndexBase + self.numFftStream)) self.txIndexList = list(range(self.txIndexBase, self.txIndexBase + self.numTxs)) self.wbducIndexList = list(range(self.wbducIndexBase, self.wbddcIndexBase + self.numWbduc)) self.nbducIndexList = list(range(self.nbducIndexBase, self.nbddcIndexBase + self.numNbduc)) self.wbddcGroupIndexList = list(range(self.wbddcGroupIndexBase, self.wbddcGroupIndexBase + self.numWbddcGroups)) self.nbddcGroupIndexList = list(range(self.nbddcGroupIndexBase, self.nbddcGroupIndexBase + self.numNbddcGroups)) self.cddcGroupIndexList = list(range(self.cddcGroupIndexBase, self.cddcGroupIndexBase + self.numCddcGroups)) self.wbducGroupIndexList = list(range(self.wbducGroupIndexBase, self.wbducGroupIndexBase + self.numWbducGroups)) self.tunerGroupIndexList = list(range(self.tunerGroupIndexBase, self.tunerGroupIndexBase + self.numTunerGroups)) self.gigEIndexList = list(range(self.gigEIndexBase, self.gigEIndexBase + self.numGigE)) self.gigEDipEntryIndexList = list(range(self.gigEDipEntryIndexBase, self.gigEDipEntryIndexBase + self.numGigEDipEntries)) self.txToneGenIndexList = [] if self.numTxs == 0 else \ list(range(self.txType.toneGenIndexBase, self.txType.toneGenIndexBase + self.txType.numToneGen)) # Make component objects for objRange,objType,objDict in ( \ (self.tunerIndexList,self.tunerType,self.tunerDict), \ (self.wbddcIndexList,self.wbddcType,self.wbddcDict), \ (self.nbddcIndexList,self.nbddcType,self.nbddcDict), \ (self.fftStreamIndexList,self.fftStreamType,self.fftStreamDict), \ (self.txIndexList,self.txType,self.txDict), \ (self.wbducIndexList,self.wbducType,self.wbducDict), \ (self.nbducIndexList,self.nbducType,self.nbducDict), \ (self.wbddcGroupIndexList,self.wbddcGroupType,self.wbddcGroupDict), \ (self.nbddcGroupIndexList,self.nbddcGroupType,self.nbddcGroupDict), \ (self.cddcGroupIndexList,self.cddcGroupType,self.cddcGroupDict), \ (self.wbducGroupIndexList,self.wbducGroupType,self.wbducGroupDict), \ (self.tunerGroupIndexList,self.tunerGroupType,self.tunerGroupDict), \ ): if objType is not None: for objInd in objRange: objDict[objInd] = objType(parent=self, transport=None, index=objInd, verbose=self.verbose, logFile=self.logFile) self.componentList.append( objDict[objInd] ) self.sipTable = {} self.dipTable = {} self.versionInfo = {} # State variables # -- is the radio connected through crdd? self.isCrddConnection = False # -- crdd command prefix, which tells crdd that this isn't a pass-through # radio command. Set this to four vertical bars, because no NDR-class # radio uses them. self.crddCommandPrefix = "||||" # Set the time on the radio self.setTime = False self.connectError = "" ## # \brief Destroys a radio handler object. # # \copydetails CyberRadioDriver::IRadio::\_\_del\_\_() def __del__(self): if self.isConnected(): self.disconnect() ## # \brief Indicates whether the radio is connected. # # \copydetails CyberRadioDriver::IRadio::isConnected() def isConnected(self,): return (self.transport is not None and self.transport.connected) ## # \brief Returns version information for the radio. # # \copydetails CyberRadioDriver::IRadio::getVersionInfo() def getVersionInfo(self): # If this is a crdd connection, try to get the version info from # crdd's radio handler rather than through direct radio commands if self.isCrddConnection: # Get the radio's version information from crdd rsp = self._crddSendCommand(cmd="GETVERINFO") if rsp is not None: # Set the version info by running the first response string (the # version info dict) through ast.literal_eval(). self.versionInfo = ast.literal_eval(rsp[0]) # Query hardware for details if we don't have them already if not all([key in self.versionInfo for key in \ [configKeys.VERINFO_MODEL, configKeys.VERINFO_SN]]): cmd = self.idnQry(parent=self, query=True, verbose=self.verbose, logFile=self.logFile) cmd.send( self.sendCommand, ) self._addLastCommandErrorInfo(cmd) rspInfo = cmd.getResponseInfo() if rspInfo is not None: self.versionInfo.update(rspInfo) if not all([key in self.versionInfo for key in [configKeys.VERINFO_SW, configKeys.VERINFO_FW, configKeys.VERINFO_REF]]): cmd = self.verQry(parent=self, query=True, verbose=self.verbose, logFile=self.logFile) cmd.send( self.sendCommand, ) self._addLastCommandErrorInfo(cmd) rspInfo = cmd.getResponseInfo() if rspInfo is not None: self.versionInfo.update(rspInfo) if not all([key in self.versionInfo for key in [configKeys.VERINFO_MODEL, configKeys.VERINFO_SN, configKeys.VERINFO_UNITREV, configKeys.VERINFO_HW]]): cmd = self.hrevQry(parent=self, query=True, verbose=self.verbose, logFile=self.logFile) cmd.send( self.sendCommand, ) self._addLastCommandErrorInfo(cmd) rspInfo = cmd.getResponseInfo() if rspInfo is not None: # Don't mask previously determined model and S/N information! for key in [configKeys.VERINFO_MODEL, configKeys.VERINFO_SN]: if key in self.versionInfo and key in rspInfo: del rspInfo[key] self.versionInfo.update(rspInfo) for key in [configKeys.VERINFO_MODEL, configKeys.VERINFO_SN, configKeys.VERINFO_SW, configKeys.VERINFO_FW, configKeys.VERINFO_REF, configKeys.VERINFO_UNITREV, configKeys.VERINFO_HW]: if key not in self.versionInfo: self.versionInfo[key] = "N/A" return self.versionInfo ## # \brief Returns connection information for the radio. # # \copydetails CyberRadioDriver::IRadio::getConnectionInfo() def getConnectionInfo(self): connectionInfo = {} # Connection information connectionInfo["mode"] = self.mode if self.mode in ("tcp","udp","https"): connectionInfo["hostname"] = self.host_or_dev connectionInfo["port"] = "%d" % self.port_or_baudrate elif self.mode == "tty": connectionInfo["device"] = self.host_or_dev connectionInfo["baudrate"] = "%d" % self.port_or_baudrate return connectionInfo ## # \brief Connects to a given radio. # # \copydetails CyberRadioDriver::IRadio::connect() def connect(self,mode,host_or_dev,port_or_baudrate=None,setTime=False,initDdc=False, reset=False, fcState=None): if mode in self.connectionModes: self.mode = mode self.host_or_dev = host_or_dev self.port_or_baudrate = port_or_baudrate if self.port_or_baudrate is None: self.port_or_baudrate = self.defaultBaudrate if mode == "tty" else \ self.defaultPort self.logIfVerbose("USING PORT %r"%(self.port_or_baudrate)) if self.transport is not None: self.transport.disconnect() self.transport = None self.transport = transport.radio_transport(parent=self,verbose=self.verbose, logFile=self.logFile, logCtrl=self.logCtrl, json=self.json, timeout=self.transportTimeout) if self.transport.connect(mode, self.host_or_dev, self.port_or_baudrate): if self.isCrddConnection: self._crddInitialize() # Query the configuration if we didn't already have it if self.configuration == {}: self._queryConfiguration() for obj in self.componentList: obj.addTransport(self.transport, self.sendCommand, not self.isCrddConnection) self.getVersionInfo() if reset: self.sendReset() if setTime: self.setTimeNextPps() if initDdc: self.setDdcConfiguration(wideband=True,) self.setDdcConfiguration(wideband=False,) if fcState is not None: try: self.setTenGigFlowControlStatus(fcState) except: pass return True else: self.connectError = self.transport.connectError self.disconnect() return False else: self.log("Unsupported connection mode: %s", str(mode)) return False ## # \brief Disconnects from the radio. # # \copydetails CyberRadioDriver::IRadio::disconnect() def disconnect(self): try: self.transport.disconnect() except: self.logIfVerbose(traceback.format_exc()) #traceback.print_exc() for obj in self.componentList: obj.delTransport() self.configuration = {} ## # \brief Sends a command to the radio. # # \copydetails CyberRadioDriver::IRadio::sendCommand() def sendCommand(self,cmdString,timeout=None): # Sanity-check: Don't bother trying if we don't have a # transport object, or if it's disconnected if self.transport is None or not self.transport.isConnected(): return None # Check if this is an outgoing crdd command. These commands don't # use JSON framing, so we want to avoid trying to run it through # the JSON layer (which won't work). isCrddCommand = cmdString.startswith(self.crddCommandPrefix) try: if not isCrddCommand and self.json: if isinstance(cmdString, str): jsonCmd = json.loads(cmdString) elif isinstance(cmdString, dict): jsonCmd = cmdString jsonCmd["msg"] = command.jsonConfig.newMessageId() x = self.transport.sendCommandAndReceive(json.dumps(jsonCmd),timeout) else: x = self.transport.sendCommandAndReceive(cmdString, timeout, useJson=False) if not self.transport.connected: self.transport.disconnect() return None else: return x except: self.logIfVerbose(traceback.format_exc()) #traceback.print_exc() self.transport.disconnect() return None ## # \brief Sets the radio configuration. # # \copydetails CyberRadioDriver::IRadio::setConfiguration() def setConfiguration(self, configDict={}): if self.isCrddConnection: return self._crddSetConfiguration(configDict) else: with self._setConfigLock: self.cmdErrorInfo = [] # Normalize the incoming configuration dictionary before trying to process it. configDict2 = self._normalizeConfigDict(configDict) success = configKeys.Configurable.setConfiguration(self, **configDict2) # Tuner configuration tunerConfDict = configDict2.get(configKeys.CONFIG_TUNER,{}) for index in self.tunerIndexList: if index in tunerConfDict: confDict = tunerConfDict[index] confDict[configKeys.TUNER_INDEX] = index success &= self.setTunerConfigurationNew(**confDict) # DDC configuration for ddcType in [configKeys.CONFIG_WBDDC, configKeys.CONFIG_NBDDC]: isWideband = (ddcType == configKeys.CONFIG_WBDDC) ddcConfDict = configDict2.get(configKeys.CONFIG_DDC,{}).get(ddcType,{}) ddcIndexRange = self.wbddcIndexList if isWideband else self.nbddcIndexList for index in ddcIndexRange: if index in ddcConfDict: confDict = ddcConfDict[index] confDict[configKeys.DDC_INDEX] = index success &= self.setDdcConfigurationNew(wideband=isWideband, **confDict) # IP configuration success &= self.setIpConfigurationNew(configDict2.get(configKeys.CONFIG_IP, {})) # Transmitter configuration txConfDict = configDict2.get(configKeys.CONFIG_TX,{}) for index in self.getTransmitterIndexRange(): if index in txConfDict: confDict = txConfDict[index] confDict[configKeys.TX_INDEX] = index success &= self.setTxConfigurationNew(**confDict) for ducType in [configKeys.CONFIG_WBDUC, configKeys.CONFIG_NBDUC]: isWideband = (ducType == configKeys.CONFIG_WBDUC) ducConfDict = configDict2.get(configKeys.CONFIG_DUC,{}).get(ducType,{}) ducIndexRange = self.wbducIndexList if isWideband else self.nbducIndexList for index in ducIndexRange: if index in ducConfDict: confDict = ducConfDict[index] confDict[configKeys.DUC_INDEX] = index success &= self.setDucConfigurationNew(wideband=isWideband, **confDict) # DDC group configuration for ddcType in [configKeys.CONFIG_WBDDC_GROUP, configKeys.CONFIG_NBDDC_GROUP]: # Flag for forcing the driver to query DDCs for status information forceDdcQuery = False isWideband = (ddcType == configKeys.CONFIG_WBDDC_GROUP) ddcGroupConfDict = configDict2.get(configKeys.CONFIG_DDC_GROUP,{}).get(ddcType,{}) ddcGroupIndexRange = self.wbddcGroupIndexList if isWideband else self.nbddcGroupIndexList for index in ddcGroupIndexRange: if index in ddcGroupConfDict: confDict = ddcGroupConfDict[index] confDict[configKeys.INDEX] = index success &= self.setDdcGroupConfigurationNew(wideband=isWideband, **confDict) # Force DDC query if DDC grouping configuration gets changed forceDdcQuery = True # This section forces hardware queries to update the corresponding DDC # and DDC group configurations. if forceDdcQuery: ddcDict = self.wbddcDict if isWideband else self.nbddcDict for i in self._getIndexList(None, ddcDict): ddcDict[i]._queryConfiguration() ddcGroupDict = self.wbddcGroupDict if isWideband else self.nbddcGroupDict for i in self._getIndexList(None, ddcGroupDict): ddcGroupDict[i]._queryConfiguration() # Combined DDC group configuration for ddcType in [configKeys.CONFIG_COMBINED_DDC_GROUP]: #self.logIfVerbose("[ndr551][setConfiguration()] Configure combined DDCs") # Flag for forcing the driver to query DDCs for status information forceDdcQuery = False ddcGroupConfDict = configDict2.get(configKeys.CONFIG_DDC_GROUP,{}).get(ddcType,{}) ddcGroupIndexRange = self.cddcGroupIndexList for index in ddcGroupIndexRange: if index in ddcGroupConfDict: confDict = ddcGroupConfDict[index] confDict[configKeys.INDEX] = index #self.logIfVerbose("[ndr551][setConfiguration()] Combined DDC", index) #self.logIfVerbose("[ndr551][setConfiguration()] %s" % confDict) success &= self.setCombinedDdcGroupConfigurationNew(**confDict) # Force DDC query if DDC grouping configuration gets changed forceDdcQuery = True # This section forces hardware queries to update the corresponding DDC # and DDC group configurations. if forceDdcQuery: for isWideband in [True, False]: ddcDict = self.wbddcDict if isWideband else self.nbddcDict for i in self._getIndexList(None, ddcDict): ddcDict[i]._queryConfiguration() ddcGroupDict = self.cddcGroupDict for i in self._getIndexList(None, ddcGroupDict): ddcGroupDict[i]._queryConfiguration() # DUC configuration for ducType in [configKeys.CONFIG_WBDUC_GROUP]: # Flag for forcing the driver to query DUCs for status information forceDucQuery = False isWideband = (ducType == configKeys.CONFIG_WBDUC_GROUP) ducGroupConfDict = configDict2.get(configKeys.CONFIG_DUC_GROUP,{}).get(ducType,{}) ducGroupIndexRange = self.wbducGroupIndexList if isWideband else self.nbducGroupIndexList for index in ducGroupIndexRange: if index in ducGroupConfDict: confDict = ducGroupConfDict[index] confDict[configKeys.INDEX] = index success &= self.setDucGroupConfigurationNew(wideband=isWideband, **confDict) # Force DUC query if DUC grouping configuration gets changed forceDucQuery = True # This section forces hardware queries to update the corresponding DUC # and DUC group configurations. if forceDucQuery: ducDict = self.wbducDict if isWideband else self.nbducDict for i in self._getIndexList(None, ducDict): ducDict[i]._queryConfiguration() ducGroupDict = self.wbducGroupDict if isWideband else self.nbducGroupDict for i in self._getIndexList(None, ducGroupDict): ducGroupDict[i]._queryConfiguration() # FFT streaming configuration fftStreamConfDict = configDict2.get(configKeys.CONFIG_FFT,{}) for index in self.fftStreamIndexList: if index in fftStreamConfDict: confDict = fftStreamConfDict[index] confDict[configKeys.FFT_INDEX] = index success &= self.setFftStreamConfiguration(**confDict) # Tuner group configuration forceTunerQuery = False tunerGroupConfDict = configDict2.get(configKeys.CONFIG_TUNER_GROUP,{}) tunerGroupIndexRange = self.tunerGroupIndexList for index in tunerGroupIndexRange: if index in tunerGroupConfDict: confDict = tunerGroupConfDict[index] confDict[configKeys.INDEX] = index success &= self.setTunerGroupConfigurationNew(**confDict) # Force tuner query if tuner grouping configuration gets changed forceTunerQuery = True if forceTunerQuery: for i in self._getIndexList(None, self.tunerDict): self.tunerDict[i]._queryConfiguration() for i in self._getIndexList(None, self.tunerGroupDict): self.tunerGroupDict[i]._queryConfiguration() return success ## # \brief Sets the radio configuration based on a sequence of configuration # dictionary keys. # # \copydetails CyberRadioDriver::IRadio::setConfigurationByKeys() def setConfigurationByKeys(self, value=None, *keys): configDict = {} self._dictEnsureEntrySet(configDict, value, *keys) return self.setConfiguration(configDict) ## # \brief Gets the radio configuration. # # \copydetails CyberRadioDriver::IRadio::getConfiguration() def getConfiguration(self): ret = None if self.isCrddConnection: ret = self._crddGetConfiguration() else: self.cmdErrorInfo = [] ret = configKeys.Configurable.getConfiguration(self) # Get tuner configuration if self.numTuner > 0: ret[configKeys.CONFIG_TUNER] = self.getTunerConfigurationNew() # Get DDC configuration if self.numWbddc > 0: ret[configKeys.CONFIG_DDC] = {} # -- Wideband ret[configKeys.CONFIG_DDC][configKeys.CONFIG_WBDDC] = self.getDdcConfigurationNew(wideband=True) if self.numNbddc > 0: # -- Narrowband ret[configKeys.CONFIG_DDC][configKeys.CONFIG_NBDDC] = self.getDdcConfigurationNew(wideband=False) if self.numFftStream > 0: ret[configKeys.CONFIG_FFT] = self.getFftStreamConfiguration() # Get transmitter configuration if self.numTxs > 0: ret[configKeys.CONFIG_TX] = self.getTxConfigurationNew() # Get DUC configuration if self.numTxs > 0: ret[configKeys.CONFIG_DUC] = {} # -- Wideband ret[configKeys.CONFIG_DUC][configKeys.CONFIG_WBDUC] = self.getDucConfigurationNew(wideband=True) if self.numNbduc > 0: # -- Narrowband ret[configKeys.CONFIG_DDC][configKeys.CONFIG_NBDUC] = self.getDucConfigurationNew(wideband=False) # Get DDC group configuration if self.numWbddcGroups > 0: ret[configKeys.CONFIG_DDC_GROUP] = {} # -- Wideband ret[configKeys.CONFIG_DDC_GROUP][configKeys.CONFIG_WBDDC_GROUP] = \ self.getDdcGroupConfigurationNew(wideband=True) if self.numNbddcGroups > 0: # -- Narrowband ret[configKeys.CONFIG_DDC_GROUP][configKeys.CONFIG_NBDDC_GROUP] = \ self.getDdcGroupConfigurationNew(wideband=False) elif self.numCddcGroups > 0: ret[configKeys.CONFIG_DDC_GROUP] = {} ret[configKeys.CONFIG_DDC_GROUP][configKeys.CONFIG_COMBINED_DDC_GROUP] = \ self.getCombinedDdcGroupConfigurationNew() # Get DUC group configuration if self.numWbducGroups > 0: ret[configKeys.CONFIG_DUC_GROUP] = {} # -- Wideband ret[configKeys.CONFIG_DUC_GROUP][configKeys.CONFIG_WBDUC_GROUP] = \ self.getDucGroupConfigurationNew(wideband=True) # if self.numNbducGroups > 0: # # -- Narrowband # ret[configKeys.CONFIG_DUC_GROUP][configKeys.CONFIG_NBDUC_GROUP] = \ # self.getDucGroupConfigurationNew(wideband=False) # Get tuner group configuration if self.numTunerGroups > 0: ret[configKeys.CONFIG_TUNER_GROUP] = \ self.getTunerGroupConfigurationNew() return ret ## # \brief Gets radio configuration information based on a sequence of configuration # dictionary keys. # # \copydetails CyberRadioDriver::IRadio::getConfigurationByKeys() def getConfigurationByKeys(self, *keys): return self._dictSafeGet(self.getConfiguration(), None, *keys) ## # \brief Queries the radio hardware to get its configuration. # # \copydetails CyberRadioDriver::IRadio::queryConfiguration() def queryConfiguration(self): return self.queryConfigurationByKeys() ## # \brief Queries the radio hardware to get a subset of configuration information, # based on a sequence of configuration dictionary keys. # # \copydetails CyberRadioDriver::IRadio::queryConfigurationByKeys() def queryConfigurationByKeys(self, *keys): self.cmdErrorInfo = [] ret = {} if self.isCrddConnection: ret = self._crddQueryConfigurationByKeys(*keys) else: if len(keys) == 0: ret = configKeys.Configurable.queryConfiguration(self) # Get tuner configuration if self.numTuner > 0: if len(keys) == 0: ret[configKeys.CONFIG_TUNER] = self.queryTunerConfigurationNew(tunerIndex=None) elif len(keys) > 0 and keys[0] == configKeys.CONFIG_TUNER: tunerIndex = None if len(keys) == 1 else int(keys[1]) ret[configKeys.CONFIG_TUNER] = self.queryTunerConfigurationNew(tunerIndex=tunerIndex) # Get DDC configuration if self.numWbddc > 0: if len(keys) == 0 or keys[0] == configKeys.CONFIG_DDC: ret[configKeys.CONFIG_DDC] = {} # -- Wideband if len(keys) < 2: ret[configKeys.CONFIG_DDC][configKeys.CONFIG_WBDDC] = self.queryDdcConfigurationNew( wideband=True, ddcIndex=None) elif keys[1] == configKeys.CONFIG_WBDDC: ddcIndex = None if len(keys) == 2 else int(keys[2]) ret[configKeys.CONFIG_DDC][configKeys.CONFIG_WBDDC] = self.queryDdcConfigurationNew( wideband=True, ddcIndex=ddcIndex) # -- Narrowband if self.numNbddc > 0: if len(keys) < 2: ret[configKeys.CONFIG_DDC][configKeys.CONFIG_NBDDC] = self.queryDdcConfigurationNew( wideband=False, ddcIndex=None) elif keys[1] == configKeys.CONFIG_NBDDC: ddcIndex = None if len(keys) == 2 else int(keys[2]) ret[configKeys.CONFIG_DDC][configKeys.CONFIG_NBDDC] = self.queryDdcConfigurationNew( wideband=False, ddcIndex=ddcIndex) # Get FFT Stream configuration if self.numFftStream > 0: if len(keys) == 0: ret[configKeys.CONFIG_FFT] = self.queryFftStreamConfiguration(fftStreamIndex=None) elif len(keys) > 0 and keys[0] == configKeys.CONFIG_FFT: fftStreamIndex = None if len(keys) == 1 else int(keys[1]) ret[configKeys.CONFIG_FFT] = self.queryFftStreamConfiguration(fftStreamIndex=fftStreamIndex) # Get transmitter configuration if self.numTxs > 0: if len(keys) == 0: ret[configKeys.CONFIG_TX] = self.queryTxConfigurationNew(txIndex=None) elif len(keys) > 0 and keys[0] == configKeys.CONFIG_TX: txIndex = None if len(keys) == 1 else int(keys[1]) ret[configKeys.CONFIG_TX] = self.queryTxConfigurationNew(txIndex=txIndex) # Get DUC configuration if self.numTxs > 0: if len(keys) == 0 or keys[0] == configKeys.CONFIG_DUC: ret[configKeys.CONFIG_DUC] = {} # -- Wideband if len(keys) < 2: ret[configKeys.CONFIG_DUC][configKeys.CONFIG_WBDUC] = self.queryDucConfigurationNew( wideband=True, ducIndex=None) elif keys[1] == configKeys.CONFIG_WBDUC: ducIndex = None if len(keys) == 2 else int(keys[2]) ret[configKeys.CONFIG_DUC][configKeys.CONFIG_WBDUC] = self.queryDucConfigurationNew( wideband=True, ducIndex=ducIndex) # -- Narrowband if self.numNbduc > 0: if len(keys) < 2: ret[configKeys.CONFIG_DUC][configKeys.CONFIG_NBDUC] = self.queryDucConfigurationNew( wideband=False, ducIndex=None) elif keys[1] == configKeys.CONFIG_NBDUC: ducIndex = None if len(keys) == 2 else int(keys[2]) ret[configKeys.CONFIG_DUC][configKeys.CONFIG_NBDUC] = self.queryDucConfigurationNew( wideband=False, ducIndex=ducIndex) # Get DDC group configuration if any( [self.numWbddcGroups > 0, self.numNbddcGroups > 0, self.numCddcGroups > 0] ): if len(keys) == 0 or keys[0] == configKeys.CONFIG_DDC_GROUP: ret[configKeys.CONFIG_DDC_GROUP] = {} # -- Wideband if self.numWbddcGroups > 0: if len(keys) < 2: ret[configKeys.CONFIG_DDC_GROUP][configKeys.CONFIG_WBDDC_GROUP] = \ self.queryDdcGroupConfigurationNew(wideband=True, ddcGroupIndex=None) elif keys[1] == configKeys.CONFIG_WBDDC_GROUP: ddcGroupIndex = None if len(keys) == 2 else int(keys[2]) ret[configKeys.CONFIG_DDC_GROUP][configKeys.CONFIG_WBDDC_GROUP] = \ self.queryDdcGroupConfigurationNew(wideband=True, ddcGroupIndex=ddcGroupIndex) # -- Narrowband if self.numNbddcGroups > 0: if len(keys) < 2: ret[configKeys.CONFIG_DDC_GROUP][configKeys.CONFIG_NBDDC_GROUP] = \ self.queryDdcGroupConfigurationNew(wideband=False, ddcGroupIndex=None) elif keys[1] == configKeys.CONFIG_NBDDC_GROUP: ddcGroupIndex = None if len(keys) == 2 else int(keys[2]) ret[configKeys.CONFIG_DDC_GROUP][configKeys.CONFIG_NBDDC_GROUP] = \ self.queryDdcGroupConfigurationNew(wideband=False, ddcGroupIndex=ddcGroupIndex) # -- Combined (wideband + narrowband) if self.numCddcGroups > 0: if len(keys) < 2: ret[configKeys.CONFIG_DDC_GROUP][configKeys.CONFIG_COMBINED_DDC_GROUP] = \ self.queryCombinedDdcGroupConfigurationNew(ddcGroupIndex=None) elif keys[1] == configKeys.CONFIG_COMBINED_DDC_GROUP: ddcGroupIndex = None if len(keys) == 2 else int(keys[2]) ret[configKeys.CONFIG_DDC_GROUP][configKeys.CONFIG_COMBINED_DDC_GROUP] = \ self.queryCombinedDdcGroupConfigurationNew(ddcGroupIndex=ddcGroupIndex) # Get DUC group configuration if any( [self.numWbducGroups > 0] ): if len(keys) == 0 or keys[0] == configKeys.CONFIG_DUC_GROUP: ret[configKeys.CONFIG_DUC_GROUP] = {} # -- Wideband if self.numWbducGroups > 0: if len(keys) < 2: ret[configKeys.CONFIG_DUC_GROUP][configKeys.CONFIG_WBDUC_GROUP] = \ self.queryDucGroupConfigurationNew(wideband=True, ducGroupIndex=None) elif keys[1] == configKeys.CONFIG_WBDUC_GROUP: ducGroupIndex = None if len(keys) == 2 else int(keys[2]) ret[configKeys.CONFIG_DUC_GROUP][configKeys.CONFIG_WBDUC_GROUP] = \ self.queryDucGroupConfigurationNew(wideband=True, ducGroupIndex=ducGroupIndex) # Get tuner group configuration if self.numTunerGroups > 0: if len(keys) == 0: ret[configKeys.CONFIG_TUNER_GROUP] = self.queryTunerGroupConfigurationNew( tunerGroupIndex=None) elif len(keys) > 0 and keys[0] == configKeys.CONFIG_TUNER_GROUP: tunerGroupIndex = None if len(keys) == 1 else int(keys[1]) ret[configKeys.CONFIG_TUNER_GROUP] = self.queryTunerGroupConfigurationNew( tunerGroupIndex=tunerGroupIndex) # Query IP configuration if len(keys) == 0 or keys[0] == configKeys.CONFIG_IP: if len(keys) == 0: ret[configKeys.CONFIG_IP] = self.queryIpConfigurationNew(gigEPortIndex=None) elif len(keys) > 0 and keys[0] == configKeys.CONFIG_IP: gigEPortIndex = None if len(keys) == 1 else int(keys[1]) ret[configKeys.CONFIG_IP] = self.queryIpConfigurationNew(gigEPortIndex=gigEPortIndex) # Update the internal configuration dictionary based on query results self.configuration.update(ret) # Return the result return ret ## # \protected # \brief Queries hardware to determine the object's current configuration. def _queryConfiguration(self): # Call the base-class implementation configKeys.Configurable._queryConfiguration(self) # Override for cmdClass, configKey in [ \ (self.cfgCmd, configKeys.CONFIG_MODE), \ (self.refCmd, configKeys.REFERENCE_MODE), \ (self.rbypCmd, configKeys.BYPASS_MODE), \ (self.calfCmd, configKeys.CALIB_FREQUENCY), \ (self.fnrCmd, configKeys.FNR_MODE), \ (self.gpsCmd, configKeys.GPS_ENABLE), \ (self.rtvCmd, configKeys.REF_TUNING_VOLT), \ (self.fpgaStateCmd, configKeys.FPGA_STATE), \ (self.funCmd, configKeys.RADIO_FUNCTION), \ (self.refCmd, configKeys.STATUS_PPS_SOURCE), \ # (self.cntrlCmd, configKeys.CNTRL_IF_OUT), \ ]: if cmdClass is not None: cmd = cmdClass(parent=self, query=True, verbose=self.verbose, logFile=self.logFile) cmd.send( self.sendCommand, ) self._addLastCommandErrorInfo(cmd) rspInfo = cmd.getResponseInfo() #self.logIfVerbose("DEBUG:", cmd.mnemonic, "rspInfo=", rspInfo) if rspInfo is not None: self.configuration[configKey] = rspInfo.get(configKey, 0) # IP configuration query -- The format of this section depends on whether # the radio has Gigabit Ethernet ports on it or not. if configKeys.CONFIG_IP not in self.configuration: self.configuration[configKeys.CONFIG_IP] = {} self.configuration[configKeys.CONFIG_IP].update( self.queryIpConfigurationNew() ) ## # \protected # \brief Issues hardware commands to set the object's current configuration. def _setConfiguration(self, confDict): ret = True for cmdClass, configKey in [ \ (self.cfgCmd, configKeys.CONFIG_MODE), \ (self.refCmd, configKeys.REFERENCE_MODE), \ (self.rbypCmd, configKeys.BYPASS_MODE), \ (self.calfCmd, configKeys.CALIB_FREQUENCY), \ (self.fnrCmd, configKeys.FNR_MODE), \ (self.gpsCmd, configKeys.GPS_ENABLE), \ (self.rtvCmd, configKeys.REF_TUNING_VOLT), \ (self.fpgaStateCmd, configKeys.FPGA_STATE), \ (self.refCmd, configKeys.STATUS_PPS_SOURCE), \ (self.cntrlCmd, configKeys.CNTRL_IF_OUT), \ ]: cDict = { "parent": self, \ "verbose": self.verbose, \ "logFile": self.logFile, \ configKey: confDict.get(configKey, 0) } if configKey in confDict and cmdClass is not None and \ cmdClass.settable: cmd = cmdClass(**cDict) ret &= cmd.send( self.sendCommand, ) ret &= cmd.success self._addLastCommandErrorInfo(cmd) if ret: self.configuration[configKey] = getattr(cmd, configKey) pass return ret ## # \protected # \brief Gets whether or not the given (nested) dictionary has an entry for the given keys. # # \param dicty The dictionary to search. # \param keys A number of comma-separated search keys, each pointing to a deeper level # of the dictionary hierarchy. # \return True if the dictionary has the entry, False otherwise. def _dictHasEntry(self, dicty, *keys): ret = True keysOk = [ q != "" for q in keys ] if all(keysOk): tmp = dicty for key in keys: if key not in tmp: ret = False break else: tmp = tmp[key] else: ret = False return ret ## # \protected # \brief Ensures that we make an entry in the given dictionary with the specified keys, using # the provided default for the entry. # # @param dicty The dictionary to manipulate. # @param default The default value to use for the entry if it does not already exist. # @param keys A number of comma-separated search keys, each pointing to a deeper level # of the dictionary hierarchy. def _dictEnsureEntry(self, dicty, default, *keys): tmp = dicty # Create intermediate sub-dicts if needed for i, key in enumerate(keys): if i < len(keys)-1: if key not in tmp: #print "[DBG] sub-dict key", key, "not present" tmp[key] = {} else: #print "[DBG] sub-dict key", key, "present" pass tmp = tmp[key] else: if key not in tmp: #print "[DBG] value key", key, "not present" tmp[key] = default else: #print "[DBG] value key", key, "present" pass pass ## # \protected # \brief Ensures that a given nested dictionary item is set to the provided value, # even if the item does not already exist. # \param dicty The dictionary to manipulate. # \param value The value to set the entry to. # \param keys A number of comma-separated search keys, each pointing to a deeper level # of the dictionary hierarchy. def _dictEnsureEntrySet(self, dicty, value, *keys): self._dictEnsureEntry(dicty, value, *keys) tmp = dicty for i, key in enumerate(keys): if i < len(keys)-1: tmp = tmp[key] else: try: tmp[key] = copy.deepcopy(value) except: tmp[key] = value ## # \protected # \brief Gets a value from a dictionary in a "safe" way, using a default in case there is # no entry for the given set of keys. # # \param dicty The dictionary to query. # \param default The default value to use if the keys do not point to a valid entry. # \param keys A number of comma-separated search keys, each pointing to a deeper level # of the dictionary hierarchy. # \return The entry from the dictionary, or the default if the entry does not exist. def _dictSafeGet(self, dicty, default, *keys): ret = default if len(keys) > 0 else dicty if self._dictHasEntry(dicty, *keys): tmp = dicty for i, key in enumerate(keys): if i < len(keys)-1: tmp = tmp[key] else: ret = tmp[key] return ret ## # \internal # \brief Initializes the radio handler object after connecting to crdd. # def _crddInitialize(self): # Optionally, send crdd our client ID if self.clientId is not None: rsp = self._crddSendCommand(cmd="CLIENTID", data=self.clientId) # Get the radio's current configuration from crdd self._crddGetConfiguration() pass ## # \internal # \brief Sends a command to crdd. # \note This capability does not depend on whether the radio is JSON or not. # \param cmd Command mnemonic # \param data Data to send as a command parameter. What actually gets sent # over the link is this object's string representation. Can be None, in # which case only the command gets sent. # \returns Either a list of response strings (if the command completed # successfully), or None (if it did not). def _crddSendCommand(self, cmd, data=None): outCmd = self.crddCommandPrefix + str(cmd) if data is not None: outCmd += " " + str(data) outCmd += "\n" return self.sendCommand(outCmd) ## # \internal # \brief Unpacks the provided configuration dictionary, setting the # configuration of all components. # \param configuration Fully-specified configuration dictionary. def _crddUnpackConfiguration(self, configuration): # Unpack the full configuration fullConfiguration = copy.deepcopy(configuration) # -- Tuner configuration cDict = fullConfiguration.pop(configKeys.CONFIG_TUNER, {}) for index in list(cDict.keys()): self.tunerDict[index].configuration = cDict[index] # -- DDC configuration cDict = fullConfiguration.pop(configKeys.CONFIG_DDC, {}) for ddcType in list(cDict.keys()): ddcDict = self.wbddcDict if ddcType == "narrowband": ddcDict = self.nbddcDict for index in list(cDict[ddcType].keys()): ddcDict[index].configuration = cDict[ddcType][index] # -- FFT streams cDict = fullConfiguration.pop(configKeys.CONFIG_FFT, {}) for index in list(cDict.keys()): self.fftStreamDict[index].configuration = cDict[index] # -- TX configuration cDict = fullConfiguration.pop(configKeys.CONFIG_TX, {}) for index in list(cDict.keys()): cDict2 = cDict[index].pop(configKeys.CONFIG_CW, {}) for index2 in list(cDict2.keys()): self.txDict[index].toneGenDict[index2].configuration = cDict2[index2] self.txDict[index].configuration = cDict[index] # -- DUC configuration cDict = fullConfiguration.pop(configKeys.CONFIG_DUC, {}) for ducType in list(cDict.keys()): ducDict = self.wbducDict if ducType == "narrowband": ducDict = self.nbducDict for index in list(cDict[ducType].keys()): ducDict[index].configuration = cDict[ducType][index] # -- DDC group configuration cDict = fullConfiguration.pop(configKeys.CONFIG_DDC_GROUP, {}) for ddcType in list(cDict.keys()): ddcDict = self.wbddcGroupDict if ddcType == "narrowband": ddcDict = self.nbddcGroupDict elif ddcType == "combined": ddcDict = self.cddcGroupDict for index in list(cDict[ddcType].keys()): ddcDict[index].configuration = cDict[ddcType][index] # -- WBDUC groups cDict = fullConfiguration.pop(configKeys.CONFIG_DUC_GROUP, {}) for ducType in list(cDict.keys()): ducDict = self.wbducGroupDict #if ducType == "narrowband": # ducDict = self.nbducGroupDict for index in list(cDict[ducType].keys()): ducDict[index].configuration = cDict[ducType][index] # -- Tuner groups cDict = fullConfiguration.pop(configKeys.CONFIG_TUNER_GROUP, {}) for index in list(cDict.keys()): self.tunerGroupDict[index].configuration = cDict[index] # -- What is left after all the popping are the radio-specific # config items, and the IP config self.configuration = fullConfiguration pass ## # \internal # \brief Gets the radio's current configuration from crdd. # \note This capability does not depend on whether the radio is JSON or not. # \returns Either the returned configuration dictionary (if the command # completed successfully), or an empty dictionary (if it did not). def _crddGetConfiguration(self): ret = {} # Get the radio's current configuration from crdd rsp = self._crddSendCommand(cmd="GETCFG", data=None) # Deal with out-of-bound conditions try: if all( [ rsp is not None, rsp != "Empty Read", rsp[0] != "TIMEOUT" ] ): # Get the returned full configuration by running the first response # string (the config dict) through ast.literal_eval(). ret = ast.literal_eval(rsp[0]) # Unpack the full configuration self._crddUnpackConfiguration(ret) except: pass return ret ## # \internal # \brief Sets the radio's current configuration using crdd. # \note This capability does not depend on whether the radio is JSON or not. # \return True if all commands completed successfully, False otherwise. # Use getLastCommandErrorInfo() to retrieve any error information. def _crddSetConfiguration(self, configDict={}): ret = False # Get the radio's current configuration from crdd rsp = self._crddSendCommand(cmd="SETCFG", data=configDict) # Deal with out-of-bound conditions try: if all( [ rsp is not None, rsp != "Empty Read", rsp[0] != "TIMEOUT" ] ): #self.log("[DBG] rsp =", str(rsp)) # First response string: SUCCESS or ERROR (plus error info) ret = ( rsp[0] == "SUCCESS" ) if not ret: # Grab the error info (serialized as a list of strings) self.cmdErrorInfo = ast.literal_eval(rsp[0].replace("ERROR: ", "")) # Second response string: Updated configuration dictionary string. # Run this through ast.literal_eval(). configuration = ast.literal_eval(rsp[1]) # Unpack the full configuration self._crddUnpackConfiguration(configuration) except: pass return ret ## # \internal # \brief Queries the radio's current configuration from crdd. # \note This capability does not depend on whether the radio is JSON or not. # \param keys List of keys used to specify which configuration values to query. # \returns Either the returned configuration dictionary (if the command # completed successfully), or an empty dictionary (if it did not). def _crddQueryConfigurationByKeys(self, *keys): ret = {} # Query the radio's current configuration from crdd rsp = self._crddSendCommand(cmd="QUERYCFGK", data=list(keys)) # Deal with out-of-bound conditions try: if all( [ rsp is not None, rsp != "Empty Read", rsp[0] != "TIMEOUT" ] ): # Get the returned configuration by running the first response # string (the config dict) through ast.literal_eval(). ret = ast.literal_eval(rsp[0]) except: pass return ret ## # \internal # \brief Gets the list of currently connected data port indices from crdd. # \note This capability does not depend on whether the radio is JSON or not. # \returns Either the returned data port list (if the command # completed successfully), or an empty list (if it did not). def _crddGetConnectedDataPortIndices(self): ret = [] # Get the radio's current configuration from crdd rsp = self._crddSendCommand(cmd="QUERYCDPS", data=None) # Deal with out-of-bound conditions try: if all( [ rsp is not None, rsp != "Empty Read", rsp[0] != "TIMEOUT" ] ): # Get the returned list by running the first response # string (the data port list) through ast.literal_eval(). ret = ast.literal_eval(rsp[0]) except: pass return ret ## # \internal # \brief Helper method for converting Unicode strings to ASCII strings # during the JSON conversion process. # # The JSON-formatted string will have elements whose names # correspond to the names of this entity's attributes. # # \param data The entity being encoded as JSON. @staticmethod def encodeJsonAsAscii(data): def _foo(item): ret = item if isinstance(item, str): ret = item.encode('ascii') elif isinstance(item, list): ret = [ _foo(q) for q in item ] elif isinstance(item, dict): ret = { _foo(key): _foo(value) for key, value in item.items() } return ret adjPairs = [] for pair in data: adjPairs.append( (_foo(pair[0]), _foo(pair[1])) ) return dict(adjPairs) ## # \brief Resets the radio. # # \copydetails CyberRadioDriver::IRadio::sendReset() def sendReset(self, resetType=None): if self.resetCmd is not None: cDict = { "parent": self, "verbose": self.verbose, "logFile": self.logFile, configKeys.RESET_TYPE: resetType, } cmd = self.resetCmd(**cDict) cmd.send( self.sendCommand, ) return cmd.success else: return False #time.sleep(20) #self.connect(self.mode,self.host_or_dev,self.port_or_baudrate) ## # \brief Gets the pulse-per-second (PPS) rising edge from the radio. # # \copydetails CyberRadioDriver::IRadio::getPps() def getPps(self): if self.ppsCmd is not None: cmd = command.pps(parent=self,query=True, verbose=self.verbose, logFile=self.logFile) cmd.send(self.sendCommand, timeout=cmd.timeout) return cmd.success else: return False ## # \brief Sets the time for the next PPS rising edge on the radio. # # \copydetails CyberRadioDriver::IRadio::setTimeNextPps() def setTimeNextPps(self,checkTime=False,useGpsTime=False,newPpsTime=None): if self.ppsCmd is not None and self.utcCmd is not None: if self.getPps(): if newPpsTime is not None: nextSecond = int( _radio.timeFromString(newPpsTime, utc=True) ) cmd = self.utcCmd( parent=self, utcTime=str(nextSecond), verbose=self.verbose, logFile=self.logFile ) elif useGpsTime: cmd = self.utcCmd( parent=self, utcTime="g" ) else: nextSecond = int( math.floor( time.time() ) )+1 cmd = self.utcCmd( parent=self, utcTime=str(nextSecond), verbose=self.verbose, logFile=self.logFile ) cmd.send( self.sendCommand, timeout=cmd.timeout ) if checkTime: radioUtc = self.getTimeNextPps() self.logIfVerbose("Set time = %d & Query time = %d" % (nextSecond,radioUtc)) return radioUtc==nextSecond else: return cmd.success else: self.log("ERROR, ERROR, ERROR".center(80,"!")) return False else: return False ## # \brief Gets the current radio time. # # \copydetails CyberRadioDriver::IRadio::getTimeNow() def getTimeNow(self): if self.utcCmd is not None: cmd = self.utcCmd( parent=self, query=True, verbose=self.verbose, logFile=self.logFile ) cmd.send( self.sendCommand, timeout=cmd.timeout ) return cmd.getResponseInfo().get(configKeys.TIME_UTC, None) else: return None ## # \brief Gets the time for the next PPS rising edge on the radio. # # \copydetails CyberRadioDriver::IRadio::getTimeNextPps() def getTimeNextPps(self): if self.ppsCmd is not None and self.utcCmd is not None: if self.getPps(): cmd = self.utcCmd( parent=self, query=True, verbose=self.verbose, logFile=self.logFile ) cmd.send( self.sendCommand, timeout=cmd.timeout ) return cmd.getResponseInfo().get(configKeys.TIME_UTC, None) else: return None else: return None ## # \brief Gets the status from the radio. # # \copydetails CyberRadioDriver::IRadio::getStatus() def getStatus(self): if self.statQry is not None: cmd = self.statQry(parent=self, verbose=self.verbose, logFile=self.logFile) cmd.send( self.sendCommand ) return cmd.getResponseInfo() else: self.log("No status query available.") return None ## # \brief Gets the RF tuner status from the radio. # # \copydetails CyberRadioDriver::IRadio::getTstatus() def getTstatus(self): if self.tstatQry is not None: cmd = self.tstatQry(parent=self, verbose=self.verbose, logFile=self.logFile) cmd.send( self.sendCommand ) return cmd.getResponseInfo() else: self.log("No tuner status query available.") return None ## # \brief Sets the reference mode on the radio. # # \copydetails CyberRadioDriver::IRadio::setReferenceMode() def setReferenceMode(self,mode): try: modeInt = int(mode) if int(mode) in list(self.refModes.keys()) else None except: modeInt = None if modeInt is not None and self.refCmd is not None: self.logIfVerbose("Setting reference mode %d (%s)"%(modeInt,self.refModes.get(modeInt))) cmd = self.refCmd(parent=self, referenceMode=modeInt, verbose=self.verbose, logFile=self.logFile) ret = cmd.send( self.sendCommand ) if ret and cmd.success: self.configuration[configKeys.REFERENCE_MODE] = getattr(cmd, configKeys.REFERENCE_MODE) return cmd.success else: return False ## # \brief Sets the reference bypass mode on the radio. # # \copydetails CyberRadioDriver::IRadio::setBypassMode() def setBypassMode(self,mode): try: modeInt = int(mode) if int(mode) in list(self.rbypModes.keys()) else None except: modeInt = None if modeInt is not None and self.rbypCmd is not None: self.logIfVerbose("Setting bypass mode %d (%s)"%(modeInt,self.rbypModes.get(modeInt))) cmd = self.rbypCmd(parent=self, bypassMode=modeInt, verbose=self.verbose, logFile=self.logFile) ret = cmd.send( self.sendCommand ) if ret and cmd.success: self.configuration[configKeys.BYPASS_MODE] = getattr(cmd, configKeys.BYPASS_MODE) return cmd.success else: return False ## # \brief Sets the time adjustment for tuners on the radio. # # \copydetails CyberRadioDriver::IRadio::setTimeAdjustment() def setTimeAdjustment(self, tunerIndex=None, timeAdjustValue=0): if self.tadjCmd is not None: success = True for i in self._getIndexList(tunerIndex, self.tunerDict): # cmd = self.tadjCmd(parent=self,index=i, timingAdjustment=timeAdjustValue, # verbose=self.verbose, logFile=self.logFile) # success &= cmd.send( self.sendCommand ) success &= self.setConfiguration( { configKeys.CONFIG_TUNER : { i: { configKeys.TUNER_TIMING_ADJ: timeAdjustValue, } } } ) return success else: return False ## # \brief Sets the calibration frequency on the radio. # # \copydetails CyberRadioDriver::IRadio::setCalibrationFrequency() def setCalibrationFrequency(self, calibFrequency=0): if self.calfCmd is not None: cmd = self.calfCmd(parent=self, calibFrequency=calibFrequency, verbose=self.verbose, logFile=self.logFile) ret = cmd.send( self.sendCommand ) if ret and cmd.success: self.configuration[configKeys.CALIB_FREQUENCY] = getattr(cmd, configKeys.CALIB_FREQUENCY) return cmd.success else: return False ## # \brief Gets the current GPS position. # # \copydetails CyberRadioDriver::IRadio::getGpsPosition() def getGpsPosition(self): # Helper function that converts GPS coordinates from the NMEA # format to decimal degrees def degMinToDecimalDeg(coordinate): # Converts from [NESW](d)ddmm.mmmm(mm) format to decimal degrees # degDigits == number of digits used for degrees (2 for lat, 3 for lon) # Last (decimal places + 3) characters == Minutes ret = 0.0 # -- Get the sign from the directional indicator sgn = (-1 if coordinate[0] in ["W", "S"] else 1) # -- Find the decimal point position coord = coordinate[1:] dotPos = coord.find(".") minLen = len(coord) - dotPos + 2 min = float( coord[-minLen:] ) deg = float( coord[:-minLen] ) if deg < 0.0: ret = deg - min / 60.0 else: ret = deg + min / 60.0 ret = ret * sgn return ret if self.gposCmd is not None: cmd = self.gposCmd( parent=self, query=True, verbose=self.verbose, logFile=self.logFile ) cmd.send( self.sendCommand, timeout=cmd.timeout ) latStr = cmd.getResponseInfo().get(configKeys.GPS_LATITUDE, "N0000.000000") lonStr = cmd.getResponseInfo().get(configKeys.GPS_LONGITUDE, "E0000.000000") return ( degMinToDecimalDeg(latStr), degMinToDecimalDeg(lonStr) ) else: return (0.0, 0.0) ## # \brief Gets the current radio temperature. # # \copydetails CyberRadioDriver::IRadio::getTemperature() def getTemperature(self): if self.tempCmd is not None: cmd = self.tempCmd( parent=self, query=True, verbose=self.verbose, logFile=self.logFile ) cmd.send( self.sendCommand, timeout=cmd.timeout ) return cmd.getResponseInfo().get(configKeys.TEMPERATURE, 0) else: return 0 ## # \brief Gets the current GPIO output bits. # # \copydetails CyberRadioDriver::IRadio::getGpioOutput() def getGpioOutput(self): if self.gpioStaticCmd is not None: cmd = self.gpioStaticCmd( parent=self, query=True, verbose=self.verbose, logFile=self.logFile ) cmd.send( self.sendCommand, timeout=cmd.timeout ) return cmd.getResponseInfo().get(configKeys.GPIO_VALUE, 0) else: return 0 ## # \brief Gets the GPIO output settings for a given sequence index. # # \copydetails CyberRadioDriver::IRadio::getGpioOutputByIndex() def getGpioOutputByIndex(self, index): if self.gpioSeqCmd is not None: cmd = self.gpioSeqCmd( parent=self, query=True, index=index, verbose=self.verbose, logFile=self.logFile ) cmd.send( self.sendCommand, timeout=cmd.timeout ) return ( cmd.getResponseInfo().get(configKeys.GPIO_VALUE, 0), cmd.getResponseInfo().get(configKeys.GPIO_DURATION, 0), cmd.getResponseInfo().get(configKeys.GPIO_LOOP, 0) ) else: return (0, 0, 0) ## # \brief Sets the current GPIO output bits. # # \copydetails CyberRadioDriver::IRadio::setGpioOutput() def setGpioOutput(self, value): if self.gpioStaticCmd is not None: cmd = self.gpioStaticCmd(parent=self, value=value, verbose=self.verbose, logFile=self.logFile) ret = cmd.send( self.sendCommand ) return cmd.success else: return False ## # \brief Sets the GPIO output settings for a given sequence index. # # \copydetails CyberRadioDriver::IRadio::setGpioOutputByIndex() def setGpioOutputByIndex(self, index, value, duration, loop, go): if self.gpioSeqCmd is not None: cmd = self.gpioSeqCmd(parent=self, index=index, value=value, duration=duration, loop=loop, go=go, verbose=self.verbose, logFile=self.logFile) ret = cmd.send( self.sendCommand ) return cmd.success else: return False ## # \brief Gets the current bandwith of the given tuner. # \copydetails CyberRadioDriver::IRadio::getTunerBandwidth() def getTunerBandwidth(self, tuner): if tuner not in self.getTunerIndexRange(): raise ValueError("Invalid tuner specified") ret = self.tunerBandwidthConstant if self.tunerBandwithSettable: ifFilter = self.getConfigurationByKeys( configKeys.CONFIG_TUNER, tuner, configKeys.TUNER_IF_FILTER ) if ifFilter is not None: ret = ifFilter * 1e6 return ret ## # \brief Gets the name of the radio. # # \copydetails CyberRadioDriver::IRadio::getName() @classmethod def getName(cls): return cls._name ## # \brief Gets the number of tuners on the radio. # # \copydetails CyberRadioDriver::IRadio::getNumTuner() @classmethod def getNumTuner(cls): return len(cls.getTunerIndexRange()) ## # \brief Gets the number of tuner boards on the radio. # # \copydetails CyberRadioDriver::IRadio::getNumTunerBoards() @classmethod def getNumTunerBoards(cls): return cls.numTunerBoards ## # \brief Gets the index range for the tuners on the radio. # # \copydetails CyberRadioDriver::IRadio::getTunerIndexRange() @classmethod def getTunerIndexRange(cls): return list(range(cls.tunerIndexBase, cls.tunerIndexBase + cls.numTuner, 1)) ## # \brief Gets the frequency range for the tuners on the radio. # # \copydetails CyberRadioDriver::IRadio::getTunerFrequencyRange() @classmethod def getTunerFrequencyRange(cls): return cls.tunerType.frqRange ## # \brief Gets the frequency resolution for tuners on the radio. # # \copydetails CyberRadioDriver::IRadio::getTunerFrequencyRes() @classmethod def getTunerFrequencyRes(cls): return cls.tunerType.frqRes ## # \brief Gets the frequency unit for tuners on the radio. # # \copydetails CyberRadioDriver::IRadio::getTunerFrequencyUnit() @classmethod def getTunerFrequencyUnit(cls): return cls.tunerType.frqUnits ## # \brief Gets the attenuation range for the tuners on the radio. # # \copydetails CyberRadioDriver::IRadio::getTunerAttenuationRange() @classmethod def getTunerAttenuationRange(cls): return cls.tunerType.attRange ## # \brief Gets the attenuation resolution for tuners on the radio. # # \copydetails CyberRadioDriver::IRadio::getTunerAttenuationRes() @classmethod def getTunerAttenuationRes(cls): return cls.tunerType.attRes ## # \brief Gets the ifFilter list for the tuners of the radio # # \copydetails CyberRadioDriver::IRadio::getTunerIfFilterList() @classmethod def getTunerIfFilterList(cls): return cls.tunerType.ifFilters ## # \brief Gets whether or not the radio supports setting tuner # bandwidth # # \copydetails CyberRadioDriver::IRadio::isTunerBandwidthSettable() @classmethod def isTunerBandwidthSettable(cls): return cls.tunerBandwithSettable ## # \brief Gets the number of wideband DDCs on the radio. # # \copydetails CyberRadioDriver::IRadio::getNumWbddc() @classmethod def getNumWbddc(cls): return len(cls.getWbddcIndexRange()) ## # \brief Gets whether the DDCs on the radio have selectable sources. # # \copydetails CyberRadioDriver::IRadio::isDdcSelectableSource() @classmethod def isDdcSelectableSource(cls, wideband): ddcType = cls.wbddcType if wideband else cls.nbddcType return False if ddcType is None else ddcType.selectableSource ## # \brief Gets whether the wideband or narrowband DDCs on the radio are tunable. # # \copydetails CyberRadioDriver::IRadio::isNbddcTunable() @classmethod def isDdcTunable(cls, wideband): ddcType = cls.wbddcType if wideband else cls.nbddcType return False if ddcType is None else ddcType.tunable ## # \brief Gets the index range for the wideband DDCs on the radio. # # \copydetails CyberRadioDriver::IRadio::getWbddcIndexRange() @classmethod def getWbddcIndexRange(cls): return list(range(cls.wbddcIndexBase, cls.wbddcIndexBase + cls.numWbddc, 1)) ## # \brief Gets whether the wideband DDCs on the radio are tunable. # # \copydetails CyberRadioDriver::IRadio::isWbddcSelectableSource() @classmethod def isWbddcSelectableSource(cls): return False if cls.wbddcType is None else cls.wbddcType.selectableSource ## # \brief Gets whether the wideband DDCs on the radio have selectable # sources. # # \copydetails CyberRadioDriver::IRadio::isWbddcTunable() @classmethod def isWbddcTunable(cls): return False if cls.wbddcType is None else cls.wbddcType.tunable ## # \brief Gets the frequency offset range for the wideband DDCs on the radio. # # \copydetails CyberRadioDriver::IRadio::getWbddcFrequencyRange() @classmethod def getWbddcFrequencyRange(cls): return (0.0,0.0) if cls.wbddcType is None else cls.wbddcType.frqRange ## # \brief Gets the frequency offset resolution for wideband DDCs on the radio. # # \copydetails CyberRadioDriver::IRadio::getWbddcFrequencyRes() @classmethod def getWbddcFrequencyRes(cls): return 0.0 if cls.wbddcType is None else cls.wbddcType.frqRes ## # \brief Gets the allowed rate set for the wideband DDCs on the radio. # # \copydetails CyberRadioDriver::IRadio::getWbddcRateSet() @classmethod def getWbddcRateSet(cls, index=None): return cls.getDdcRateSet(True, index) ## # \brief Gets the allowed rate list for the wideband DDCs on the radio. # # \copydetails CyberRadioDriver::IRadio::getWbddcRateList() @classmethod def getWbddcRateList(cls, index=None): return cls.getDdcRateList(True, index) ## # \brief Gets the allowed rate set for the wideband DDCs on the radio. # # \copydetails CyberRadioDriver::IRadio::getWbddcBwSet() @classmethod def getWbddcBwSet(cls, index=None): return cls.getDdcBwSet(True, index) ## # \brief Gets the allowed rate list for the wideband DDCs on the radio. # # \copydetails CyberRadioDriver::IRadio::getWbddcBwList() @classmethod def getWbddcBwList(cls, index=None): return cls.getDdcBwList(True, index) ## # \brief Gets the number of narrowband DDCs on the radio. # # \copydetails CyberRadioDriver::IRadio::getNumNbddc() @classmethod def getNumNbddc(cls): return len(cls.getNbddcIndexRange()) ## # \brief Gets the index range for the narrowband DDCs on the radio. # # \copydetails CyberRadioDriver::IRadio::getNbddcIndexRange() @classmethod def getNbddcIndexRange(cls): if cls.numNbddc == 0: return [] elif cls.nbddcIndexOverride is not None: return cls.nbddcIndexOverride else: return list(range(cls.nbddcIndexBase, cls.nbddcIndexBase + cls.numNbddc, 1)) ## # \brief Gets whether the narrowband DDCs on the radio are tunable. # # \copydetails CyberRadioDriver::IRadio::isNbddcTunable() @classmethod def isNbddcTunable(cls): return False if cls.nbddcType is None else cls.nbddcType.tunable ## # \brief Gets whether the narrowband DDCs on the radio have selectable # sources. # # \copydetails CyberRadioDriver::IRadio::isNbddcSelectableSource() @classmethod def isNbddcSelectableSource(cls): return False if cls.nbddcType is None else cls.nbddcType.selectableSource ## # \brief Gets the frequency offset range for the narrowband DDCs on the radio. # # \copydetails CyberRadioDriver::IRadio::getNbddcFrequencyRange() @classmethod def getNbddcFrequencyRange(cls): return (0.0,0.0) if cls.nbddcType is None else cls.nbddcType.frqRange ## # \brief Gets the frequency offset resolution for narrowband DDCs on the radio. # # \copydetails CyberRadioDriver::IRadio::getNbddcFrequencyRes() @classmethod def getNbddcFrequencyRes(cls): return 0.0 if cls.nbddcType is None else cls.nbddcType.frqRes ## # \brief Gets the allowed rate set for the narrowband DDCs on the radio. # # \copydetails CyberRadioDriver::IRadio::getNbddcRateSet() @classmethod def getNbddcRateSet(cls, index=None): return cls.getDdcRateSet(False, index) ## # \brief Gets the allowed rate list for the narrowband DDCs on the radio. # # \copydetails CyberRadioDriver::IRadio::getNbddcRateList() @classmethod def getNbddcRateList(cls, index=None): return cls.getDdcRateList(False, index) ## # \brief Gets the allowed rate set for the narrowband DDCs on the radio. # # \copydetails CyberRadioDriver::IRadio::getNbddcBwSet() @classmethod def getNbddcBwSet(cls, index=None): return cls.getDdcBwSet(False, index) ## # \brief Gets the allowed rate list for the narrowband DDCs on the radio. # # \copydetails CyberRadioDriver::IRadio::getNbddcBwList() @classmethod def getNbddcBwList(cls, index=None): return cls.getDdcBwList(False, index) ## # \brief Gets the number of narrowband DDCs on the radio. # # \copydetails CyberRadioDriver::IRadio::getNumFftStream() @classmethod def getNumFftStream(cls): return len(cls.getFftStreamIndexRange()) ## # \brief Gets the index range for the narrowband DDCs on the radio. # # \copydetails CyberRadioDriver::IRadio::getFftStreamIndexRange() @classmethod def getFftStreamIndexRange(cls): return [] if cls.numFftStream == 0 else \ list(range(cls.fftStreamIndexBase, cls.fftStreamIndexBase + cls.numFftStream, 1)) ## # \brief Gets the allowed rate set for the FFTs on the radio. # # \copydetails CyberRadioDriver::IRadio::getFftStreamRateSet() @classmethod def getFftStreamRateSet(cls,): return cls.fftStreamType.getDdcRateSet() if cls.fftStreamType is not None else {} ## # \brief Gets the allowed rate list for the FFTs on the radio. # # \copydetails CyberRadioDriver::IRadio::getFftStreamRateList() @classmethod def getFftStreamRateList(cls,): return cls.fftStreamType.getDdcRateList() if cls.fftStreamType is not None else [] ## # \brief Gets the allowed window set for the FFTs on the radio. # # \copydetails CyberRadioDriver::IRadio::getFftStreamWindowSet() @classmethod def getFftStreamWindowSet(cls,): return cls.fftStreamType.getWindowSet() if cls.fftStreamType is not None else {} ## # \brief Gets the allowed window list for the FFTs on the radio. # # \copydetails CyberRadioDriver::IRadio::getFftStreamWindowList() @classmethod def getFftStreamWindowList(cls,): return sorted(cls.fftStreamType.getWindowSet().keys()) if cls.fftStreamType is not None else [] ## # \brief Gets the allowed size set for the FFTs on the radio. # # \copydetails CyberRadioDriver::IRadio::getFftStreamSizeSet() @classmethod def getFftStreamSizeSet(cls,): return cls.fftStreamType.getSizeSet() if cls.fftStreamType is not None else {} ## # \brief Gets the allowed size list for the FFTs on the radio. # # \copydetails CyberRadioDriver::IRadio::getFftStreamSizeList() @classmethod def getFftStreamSizeList(cls,): return sorted(cls.fftStreamType.getSizeSet().keys()) if cls.fftStreamType is not None else [] ## # \brief Gets the ADC sample rate for the radio. # # \copydetails CyberRadioDriver::IRadio::getAdcRate() @classmethod def getAdcRate(cls): return cls.adcRate ## # \brief Gets the VITA 49 header size for the radio. # # \copydetails CyberRadioDriver::IRadio::getVitaHeaderSize() @classmethod def getVitaHeaderSize(cls, payloadType=None): return 4 * cls.ifSpecMap.get(payloadType, cls.ifSpec).headerSizeWords ## # \brief Gets the VITA 49 payload size for the radio. # # \copydetails CyberRadioDriver::IRadio::getVitaPayloadSize() @classmethod def getVitaPayloadSize(cls, payloadType=None): return 4 * cls.ifSpecMap.get(payloadType, cls.ifSpec).payloadSizeWords ## # \brief Gets the VITA 49 tail size for the radio. # # \copydetails CyberRadioDriver::IRadio::getVitaTailSize() @classmethod def getVitaTailSize(cls, payloadType=None): return 4 * cls.ifSpecMap.get(payloadType, cls.ifSpec).tailSizeWords ## # \brief Gets dictionary with information about VITA 49 framing. # # \copydetails CyberRadioDriver::IRadio::getVitaFrameInfoDict() @classmethod def getVitaFrameInfoDict(cls, payloadType=None): return cls.ifSpecMap.get(payloadType, cls.ifSpec).getVitaFrameInfoDict() # \brief Gets whether data coming from the radio is byte-swapped with # respect to the endianness of the host operating system. # # \copydetails CyberRadioDriver::IRadio::isByteswapped() @classmethod def isByteswapped(cls, payloadType=None): return (cls.ifSpecMap.get(payloadType, cls.ifSpec).byteOrder != sys.byteorder) ## # \brief Gets whether data coming from the radio has I and Q data swapped. # # \copydetails CyberRadioDriver::IRadio::isIqSwapped() @classmethod def isIqSwapped(cls, payloadType=None): return cls.ifSpecMap.get(payloadType, cls.ifSpec).iqSwapped ## # \brief Gets the byte order for data coming from the radio. # # \copydetails CyberRadioDriver::IRadio::getByteOrder() @classmethod def getByteOrder(cls, payloadType=None): return cls.ifSpecMap.get(payloadType, cls.ifSpec).byteOrder ## # \brief Gets the number of Gigabit Ethernet interfaces on the radio. # # \copydetails CyberRadioDriver::IRadio::getNumGigE() @classmethod def getNumGigE(cls): return len(cls.getGigEIndexRange()) ## # \brief Gets the index range for the Gigabit Ethernet interfaces on the radio. # # \copydetails CyberRadioDriver::IRadio::getGigEIndexRange() @classmethod def getGigEIndexRange(cls): return [] if cls.numGigE == 0 else \ list(range(cls.gigEIndexBase, cls.gigEIndexBase + cls.numGigE, 1)) ## # \brief Gets the number of destination IP address table entries available for # each Gigabit Ethernet interface on the radio. # # \copydetails CyberRadioDriver::IRadio::getNumGigEDipEntries() @classmethod def getNumGigEDipEntries(cls): return len(cls.getGigEDipEntryIndexRange()) ## # \brief Gets the index range for the destination IP address table entries # available for the Gigabit Ethernet interfaces on the radio. # # \copydetails CyberRadioDriver::IRadio::getGigEDipEntryIndexRange() @classmethod def getGigEDipEntryIndexRange(cls): return [] if cls.numGigE == 0 else \ list(range(cls.gigEDipEntryIndexBase, \ cls.gigEDipEntryIndexBase + cls.numGigEDipEntries, 1)) ## # \brief Gets the list of connection modes that the radio supports. # # \copydetails CyberRadioDriver::IRadio::getConnectionModeList() @classmethod def getConnectionModeList(cls): return [] if cls.connectionModes is None else cls.connectionModes ## # \brief Gets whether the radio supports a given connection mode. # # \copydetails CyberRadioDriver::IRadio::isConnectionModeSupported() @classmethod def isConnectionModeSupported(cls, mode): return mode in cls.getConnectionModeList() ## # \brief Gets the radio's default baud rate. # # \copydetails CyberRadioDriver::IRadio::getDefaultBaudrate() @classmethod def getDefaultBaudrate(cls): return cls.defaultBaudrate ## # \brief Gets the radio's default control port. # # \copydetails CyberRadioDriver::IRadio::getDefaultControlPort() @classmethod def getDefaultControlPort(cls): return cls.defaultPort ## # \brief Gets the allowed VITA enable options set for the radio. # # \copydetails CyberRadioDriver::IRadio::getVitaEnableOptionSet() @classmethod def getVitaEnableOptionSet(cls): return {} if cls.vitaEnableOptions is None else cls.vitaEnableOptions ## # \brief Gets the number of transmitters on the radio. # # \copydetails CyberRadioDriver::IRadio::getNumTransmitters() @classmethod def getNumTransmitters(cls): return len(cls.getTransmitterIndexRange()) ## # \brief Gets the index range for the transmitters on the radio. # # \copydetails CyberRadioDriver::IRadio::getTransmitterIndexRange() @classmethod def getTransmitterIndexRange(cls): return [] if cls.numTxs == 0 else \ list(range(cls.txIndexBase, \ cls.txIndexBase + cls.numTxs, 1)) ## # \brief Gets the frequency range for the transmitters on the radio. # # \copydetails CyberRadioDriver::IRadio::getTransmitterFrequencyRange() @classmethod def getTransmitterFrequencyRange(cls): return (0.0,0.0) if cls.numTxs == 0 else cls.txType.frqRange ## # \brief Gets the frequency resolution for transmitters on the radio. # # \copydetails CyberRadioDriver::IRadio::getTransmitterFrequencyRes() @classmethod def getTransmitterFrequencyRes(cls): return None if cls.numTxs == 0 else cls.txType.frqRes ## # \brief Gets the frequency unit for transmitters on the radio. # # \copydetails CyberRadioDriver::IRadio::getTransmitterFrequencyUnit() @classmethod def getTransmitterFrequencyUnit(cls): return None if cls.numTxs == 0 else cls.txType.frqUnits ## # \brief Gets the attenuation range for the transmitters on the radio. # # \copydetails CyberRadioDriver::IRadio::getTransmitterAttenuationRange() @classmethod def getTransmitterAttenuationRange(cls): return (0.0,0.0) if cls.numTxs == 0 else cls.txType.attRange ## # \brief Gets the attenuation resolution for transmitters on the radio. # # \copydetails CyberRadioDriver::IRadio::getTransmitterAttenuationRes() @classmethod def getTransmitterAttenuationRes(cls): return None if cls.numTxs == 0 else cls.txType.attRes ## # \brief Gets whether transmitters on the radio support continuous-wave # (CW) tone generation. # # \copydetails CyberRadioDriver::IRadio::transmitterSupportsCW() @classmethod def transmitterSupportsCW(cls): return (cls.numTxs > 0 and issubclass(cls.txType.toneGenType, components._cwToneGen)) ## # \brief Gets the number of CW tone generators for each transmitter. # # \copydetails CyberRadioDriver::IRadio::getTransmitterCWNum() @classmethod def getTransmitterCWNum(cls): return len(cls.getTransmitterCWIndexRange()) ## # \brief Gets the CW tone generator index range for transmitters on the radio. # # \copydetails CyberRadioDriver::IRadio::getTransmitterCWIndexRange() @classmethod def getTransmitterCWIndexRange(cls): return [] if not cls.transmitterSupportsCW() else \ list(range(cls.txType.toneGenIndexBase, \ cls.txType.toneGenIndexBase + cls.txType.numToneGen, 1)) ## # \brief Gets the CW tone generator frequency range for transmitters on the radio. # # \copydetails CyberRadioDriver::IRadio::getTransmitterCWFrequencyRange() @classmethod def getTransmitterCWFrequencyRange(cls): return (0.0,0.0) if not cls.transmitterSupportsCW() else cls.txType.toneGenType.frqRange ## # \brief Gets the CW tone generator frequency resolution for transmitters on the radio. # # \copydetails CyberRadioDriver::IRadio::getTransmitterCWFrequencyRes() @classmethod def getTransmitterCWFrequencyRes(cls): return None if not cls.transmitterSupportsCW() else cls.txType.toneGenType.frqRes ## # \brief Gets the CW tone generator amplitude range for transmitters on the radio. # # \copydetails CyberRadioDriver::IRadio::getTransmitterCWAmplitudeRange() @classmethod def getTransmitterCWAmplitudeRange(cls): return (0.0,0.0) if not cls.transmitterSupportsCW() else cls.txType.toneGenType.ampRange ## # \brief Gets the CW tone generator amplitude resolution for transmitters on the radio. # # \copydetails CyberRadioDriver::IRadio::getTransmitterCWAmplitudeRes() @classmethod def getTransmitterCWAmplitudeRes(cls): return None if not cls.transmitterSupportsCW() else cls.txType.toneGenType.ampRes ## # \brief Gets the CW tone generator phase range for transmitters on the radio. # # \copydetails CyberRadioDriver::IRadio::getTransmitterCWPhaseRange() @classmethod def getTransmitterCWPhaseRange(cls): return (0.0,0.0) if not cls.transmitterSupportsCW() else cls.txType.toneGenType.phaseRange ## # \brief Gets the CW tone generator phase resolution for transmitters on the radio. # # \copydetails CyberRadioDriver::IRadio::getTransmitterCWPhaseRes() @classmethod def getTransmitterCWPhaseRes(cls): return None if not cls.transmitterSupportsCW() else cls.txType.toneGenType.phaseRes ## # \brief Gets whether transmitters on the radio support sweep functions # during continuous-wave (CW) tone generation. # # \copydetails CyberRadioDriver::IRadio::getTransmitterCWPhaseRes() @classmethod def transmitterSupportsCWSweep(cls): return cls.transmitterSupportsCW() and cls.txType.toneGenType.sweepCmd is not None ## # \brief Gets the CW tone generator sweep start frequency range for # transmitters on the radio. # # \copydetails CyberRadioDriver::IRadio::getTransmitterCWSweepStartRange() @classmethod def getTransmitterCWSweepStartRange(cls): return (0.0,0.0) if not cls.transmitterSupportsCWSweep() \ else cls.txType.toneGenType.startRange ## # \brief Gets the CW tone generator sweep start frequency resolution for # transmitters on the radio. # # \copydetails CyberRadioDriver::IRadio::getTransmitterCWSweepStartRes() @classmethod def getTransmitterCWSweepStartRes(cls): return None if not cls.transmitterSupportsCWSweep() \ else cls.txType.toneGenType.startRes ## # \brief Gets the CW tone generator sweep stop frequency range for # transmitters on the radio. # # \copydetails CyberRadioDriver::IRadio::getTransmitterCWSweepStopRange() @classmethod def getTransmitterCWSweepStopRange(cls): return (0.0,0.0) if not cls.transmitterSupportsCWSweep() \ else cls.txType.toneGenType.stopRange ## # \brief Gets the CW tone generator sweep stop frequency resolution for # transmitters on the radio. # # \copydetails CyberRadioDriver::IRadio::getTransmitterCWSweepStopRes() @classmethod def getTransmitterCWSweepStopRes(cls): return None if not cls.transmitterSupportsCWSweep() \ else cls.txType.toneGenType.stopRes ## # \brief Gets the CW tone generator sweep step frequency range for # transmitters on the radio. # # \copydetails CyberRadioDriver::IRadio::getTransmitterCWSweepStepRange() @classmethod def getTransmitterCWSweepStepRange(cls): return (0.0,0.0) if not cls.transmitterSupportsCWSweep() \ else cls.txType.toneGenType.stepRange ## # \brief Gets the CW tone generator sweep step frequency resolution for # transmitters on the radio. # # \copydetails CyberRadioDriver::IRadio::getTransmitterCWSweepStepRes() @classmethod def getTransmitterCWSweepStepRes(cls): return None if not cls.transmitterSupportsCWSweep() \ else cls.txType.toneGenType.stepRes ## # \brief Gets the CW tone generator sweep dwell time range for # transmitters on the radio. # # \copydetails CyberRadioDriver::IRadio::getTransmitterCWSweepDwellRange() @classmethod def getTransmitterCWSweepDwellRange(cls): return (0.0,0.0) if not cls.transmitterSupportsCWSweep() \ else cls.txType.toneGenType.dwellRange ## # \brief Gets the CW tone generator sweep dwell time resolution for # transmitters on the radio. # # \copydetails CyberRadioDriver::IRadio::getTransmitterCWSweepDwellRes() @classmethod def getTransmitterCWSweepDwellRes(cls): return None if not cls.transmitterSupportsCWSweep() \ else cls.txType.toneGenType.dwellRes ## # \brief Gets the number of wideband DUCs on the radio. # # \copydetails CyberRadioDriver::IRadio::getNumWbduc() @classmethod def getNumWbduc(cls): return len(cls.getWbducIndexRange()) ## # \brief Gets the index range for the wideband DUCs on the radio. # # \copydetails CyberRadioDriver::IRadio::getWbducIndexRange() @classmethod def getWbducIndexRange(cls): return list(range(cls.wbducIndexBase, cls.wbducIndexBase + cls.numWbduc, 1)) ## # \brief Gets the frequency offset range for the wideband DUCs on the radio. # # \copydetails CyberRadioDriver::IRadio::getWbducFrequencyRange() @classmethod def getWbducFrequencyRange(cls): return (0.0,0.0) if cls.wbducType is None else cls.wbducType.frqRange ## # \brief Gets the frequency resolution for wideband DUCs on the radio. # # \copydetails CyberRadioDriver::IRadio::getWbducFrequencyRes() @classmethod def getWbducFrequencyRes(cls): return 0.0 if cls.wbducType is None else cls.wbducType.frqRes ## # \brief Gets the frequency unit for wideband DUCs on the radio. # # \copydetails CyberRadioDriver::IRadio::getWbducFrequencyUnit() @classmethod def getWbducFrequencyUnit(cls): return 0.0 if cls.wbducType is None else cls.wbducType.frqUnits ## # \brief Gets the attenuation range for the wideband DUCs on the radio. # # \copydetails CyberRadioDriver::IRadio::getWbducAttenuationRange() @classmethod def getWbducAttenuationRange(cls): return (0.0,0.0) if cls.wbducType is None else cls.wbducType.attRange ## # \brief Gets the attenuation resolution for wideband DUCs on the radio. # # \copydetails CyberRadioDriver::IRadio::getWbducAttenuationRes() @classmethod def getWbducAttenuationRes(cls): return 0.0 if cls.wbducType is None else cls.wbducType.attRes ## # \brief Gets the allowed rate set for the wideband DUCs on the radio. # # \copydetails CyberRadioDriver::IRadio::getWbducRateSet() @classmethod def getWbducRateSet(cls): ducObj = cls.wbducType return ducObj.rateSet if ducObj is not None else {} ## # \brief Gets the allowed rate list for the wideband DUCs on the radio. # # \copydetails CyberRadioDriver::IRadio::getWbducRateList() @classmethod def getWbducRateList(cls): ducObj = cls.wbducType if ducObj is not None: return [ducObj.rateSet[k] for k in sorted(ducObj.rateSet.keys())] else: return [] ## # \brief Gets whether or not the wideband DUCs on the radio support loading # sample snapshots. # # \copydetails CyberRadioDriver::IRadio::wbducSupportsSnapshotLoad() @classmethod def wbducSupportsSnapshotLoad(cls): return (cls.wbducType is not None and cls.wbducType.snapshotLoadCmd is not None) ## # \brief Gets whether or not the wideband DUCs on the radio support # transmitting sample snapshots. # # \copydetails CyberRadioDriver::IRadio::wbducSupportsSnapshotTransmit() @classmethod def wbducSupportsSnapshotTransmit(cls): return (cls.wbducType is not None and cls.wbducType.snapshotTxCmd is not None) ## # \brief Gets the index range for the DDC groups on the radio. # # \copydetails CyberRadioDriver::IRadio::getWbddcGroupIndexRange() @classmethod def getDdcGroupIndexRange(cls, wideband): return cls.getWbddcGroupIndexRange() if wideband else cls.getNbddcGroupIndexRange() ## # \brief Gets the number of wideband DDC groups on the radio. # \copydetails CyberRadioDriver::IRadio::getNumWbddcGroups() @classmethod def getNumWbddcGroups(cls): return len(cls.getWbddcGroupIndexRange()) ## # \brief Gets the index range for the wideband DDC groups on the radio. # # \copydetails CyberRadioDriver::IRadio::getWbddcGroupIndexRange() @classmethod def getWbddcGroupIndexRange(cls): return list(range(cls.wbddcGroupIndexBase, cls.wbddcGroupIndexBase + cls.numWbddcGroups, 1)) ## # \brief Gets the number of narrowband DDC groups on the radio. # \copydetails CyberRadioDriver::IRadio::getNumNbddcGroups() @classmethod def getNumNbddcGroups(cls): return len(cls.getNbddcGroupIndexRange()) ## # \brief Gets the index range for the narrowband DDC groups on the radio. # # \copydetails CyberRadioDriver::IRadio::getNbddcGroupIndexRange() @classmethod def getNbddcGroupIndexRange(cls): return list(range(cls.nbddcGroupIndexBase, cls.nbddcGroupIndexBase + cls.numNbddcGroups, 1)) ## # \brief Gets the number of combined DDC groups on the radio. # \copydetails CyberRadioDriver::IRadio::getNumCombinedDdcGroups() @classmethod def getNumCombinedDdcGroups(cls): return len(cls.getCombinedDdcGroupIndexRange()) ## # \brief Gets the index range for the combined DDC groups on the radio. # # \copydetails CyberRadioDriver::IRadio::getCombinedDdcGroupIndexRange() @classmethod def getCombinedDdcGroupIndexRange(cls): return list(range(cls.cddcGroupIndexBase, cls.cddcGroupIndexBase + cls.numCddcGroups, 1)) ## # \brief Gets the number of wideband DUC groups on the radio. # # \copydetails CyberRadioDriver::IRadio::getNumWbducGroups() @classmethod def getNumWbducGroups(cls): return len(cls.getWbducGroupIndexRange()) ## # \brief Gets the index range for the wideband DUC groups on the radio. # # \copydetails CyberRadioDriver::IRadio::getWbducGroupIndexRange() @classmethod def getWbducGroupIndexRange(cls): return list(range(cls.wbducGroupIndexBase, cls.wbducGroupIndexBase + cls.numWbducGroups, 1)) # ------------- Deprecated/Helper Methods ----------------- # ## # \internal # \brief Define this object's string representation. def __str__(self): return self.name ## # \internal # \brief Helper function that returns an index list. def _getIndexList(self,objIndex,objDict): if objIndex is None: return list(objDict.keys()) elif type(objIndex) is int: return [objIndex,] if objIndex in list(objDict.keys()) else [] elif type(objIndex) is list: return [i for i in objIndex if i in list(objDict.keys())] else: return [] ## # \internal # \brief Helper function that "normalizes" an input configuration dictionary # section by doing the following: # <ul> # <li> Ensuring that keys for any enumerated entries are integers # <li> Expanding sub-dictionaries with the special "all" key # <li> Performing specialization for individual entries # # \param configDict The incoming configuration dictionary. # \param entryIndexList The list of entry indices (used in expanding "all" keys). # \return The new configuration dictionary. def _normalizeConfigDictSection(self, configDict, entryIndexList): newConfigDict = {} # Fix keys in config dictionary convertKeys = [] invalidKeys = [] for key in configDict: try: tmp = int(key) if tmp != key: convertKeys.append(key) except: if key != configKeys.ALL: invalidKeys.append(key) for key in invalidKeys: configDict.pop(key) for key in convertKeys: configDict[int(key)] = configDict.pop(key) if configKeys.ALL in configDict: tmpDict = configDict.pop(configKeys.ALL) for entryNum in entryIndexList: newConfigDict[entryNum] = copy.deepcopy(tmpDict) for entryNum in configDict: if entryNum in newConfigDict: self._dictUpdate(newConfigDict[entryNum], \ configDict[entryNum], \ newConfigDict[entryNum], \ list(configDict[entryNum].keys())) else: newConfigDict[entryNum] = copy.deepcopy(configDict[entryNum]) return newConfigDict ## # \internal # \brief Helper function that "normalizes" an input configuration dictionary # by doing the following: # <ul> # <li> Ensuring that keys for component enumerations are integers # <li> Expanding sub-dictionaries with the special "all" key # <li> Performing specialization for individual components or entries # \param configDict The incoming configuration dictionary. # \return The new configuration dictionary. def _normalizeConfigDict(self, configDict): newConfigDict = {} for configKey in configDict: if configKey == configKeys.CONFIG_TUNER: newConfigDict[configKeys.CONFIG_TUNER] = self._normalizeConfigDictSection( \ configDict[configKeys.CONFIG_TUNER], \ self.tunerIndexList) elif configKey == configKeys.CONFIG_DDC: newConfigDict[configKeys.CONFIG_DDC] = {} for ddcType in [configKeys.CONFIG_WBDDC, configKeys.CONFIG_NBDDC]: isWideband = (ddcType == configKeys.CONFIG_WBDDC) ddcConfDict = configDict[configKeys.CONFIG_DDC].get(ddcType,{}) ddcIndexRange = self.wbddcIndexList if isWideband else self.nbddcIndexList newConfigDict[configKeys.CONFIG_DDC][ddcType] = self._normalizeConfigDictSection(\ ddcConfDict, ddcIndexRange) elif self.numGigE > 0 and configKey == configKeys.CONFIG_IP: tmpDict = copy.deepcopy(configDict[configKeys.CONFIG_IP]) newConfigDict[configKeys.CONFIG_IP] = self._normalizeConfigDictSection( \ tmpDict, self.gigEIndexList) for gigEPortNum in self.gigEIndexList: if gigEPortNum in newConfigDict[configKeys.CONFIG_IP] and \ configKeys.IP_DEST in newConfigDict[configKeys.CONFIG_IP][gigEPortNum]: tmpDict = copy.deepcopy(newConfigDict[configKeys.CONFIG_IP][gigEPortNum][configKeys.IP_DEST]) newConfigDict[configKeys.CONFIG_IP][gigEPortNum][configKeys.IP_DEST] = \ self._normalizeConfigDictSection(tmpDict, \ self.gigEDipEntryIndexList) elif self.numTxs > 0 and configKey == configKeys.CONFIG_TX: tmpDict = copy.deepcopy(configDict[configKeys.CONFIG_TX]) newConfigDict[configKeys.CONFIG_TX] = self._normalizeConfigDictSection( \ tmpDict, \ self.txIndexList) for txNum in self.getTransmitterIndexRange(): if txNum in newConfigDict[configKeys.CONFIG_TX]: if configKeys.CONFIG_CW in newConfigDict[configKeys.CONFIG_TX][txNum]: newConfigDict[configKeys.CONFIG_TX][txNum][configKeys.CONFIG_CW] = \ self._normalizeConfigDictSection( newConfigDict[configKeys.CONFIG_TX][txNum][configKeys.CONFIG_CW], \ self.txToneGenIndexList) elif configKey == configKeys.CONFIG_DUC: newConfigDict[configKeys.CONFIG_DUC] = {} for ducType in [configKeys.CONFIG_WBDUC, configKeys.CONFIG_NBDUC]: isWideband = (ducType == configKeys.CONFIG_WBDUC) ducConfDict = configDict[configKeys.CONFIG_DUC].get(ducType,{}) ducIndexRange = self.wbducIndexList if isWideband else self.nbducIndexList newConfigDict[configKeys.CONFIG_DUC][ducType] = self._normalizeConfigDictSection(\ ducConfDict, ducIndexRange) pass elif configKey == configKeys.CONFIG_DDC_GROUP: newConfigDict[configKeys.CONFIG_DDC_GROUP] = {} for ddcType in [configKeys.CONFIG_WBDDC_GROUP, configKeys.CONFIG_NBDDC_GROUP, configKeys.CONFIG_COMBINED_DDC_GROUP]: isWideband = (ddcType == configKeys.CONFIG_WBDDC_GROUP) ddcGroupConfDict = configDict[configKeys.CONFIG_DDC_GROUP].get(ddcType,{}) ddcGroupIndexRange = self.wbddcGroupIndexList if isWideband else self.nbddcGroupIndexList if ddcType == configKeys.CONFIG_COMBINED_DDC_GROUP: ddcGroupIndexRange = self.cddcGroupIndexList newConfigDict[configKeys.CONFIG_DDC_GROUP][ddcType] = self._normalizeConfigDictSection(\ ddcGroupConfDict, ddcGroupIndexRange) elif configKey == configKeys.CONFIG_FFT: newConfigDict[configKeys.CONFIG_FFT] = self._normalizeConfigDictSection( \ configDict[configKeys.CONFIG_FFT], \ self.fftStreamIndexList) else: newConfigDict[configKey] = copy.deepcopy(configDict[configKey]) return newConfigDict ## # \brief Gets the radio configuration. # # \deprecated Use getConfiguration() instead. # # \return The dictionary of radio settings. def getAll(self): return self.getConfiguration() ## # \internal # \brief Helper function for setting the tuner configuration. # # Deprecated in favor of setConfiguration(). def setTunerConfigurationNew(self, *args, **kwargs): success = True tunerIndex = kwargs.get(configKeys.TUNER_INDEX, None) for i in self._getIndexList(tunerIndex, self.tunerDict): success &= self.tunerDict[i].setConfiguration(*args, **kwargs) self.cmdErrorInfo.extend(self.tunerDict[i].getLastCommandErrorInfo()) return success ## # \internal # \brief Helper function for getting the tuner configuration. # # Deprecated in favor of getConfiguration(). def getTunerConfigurationNew(self, tunerIndex=None): config = {} for i in self._getIndexList(tunerIndex, self.tunerDict): config[i] = self.tunerDict[i].getConfiguration() self.cmdErrorInfo.extend(self.tunerDict[i].getLastCommandErrorInfo()) return config ## # \internal # \brief Helper function for querying the tuner configuration. # # Deprecated in favor of queryConfiguration(). def queryTunerConfigurationNew(self, tunerIndex=None): config = {} for i in self._getIndexList(tunerIndex, self.tunerDict): config[i] = self.tunerDict[i].queryConfiguration() self.cmdErrorInfo.extend(self.tunerDict[i].getLastCommandErrorInfo()) return config ## # \internal # \brief Helper function for setting the DDC configuration. # # Deprecated in favor of setConfiguration(). def setDdcConfigurationNew(self, wideband=True, *args, **kwargs): success = True ddcDict = self.wbddcDict if wideband else self.nbddcDict ddcIndex = kwargs.get(configKeys.DDC_INDEX, None) for i in self._getIndexList(ddcIndex, ddcDict): success &= ddcDict[i].setConfiguration(*args, **kwargs) self.cmdErrorInfo.extend(ddcDict[i].getLastCommandErrorInfo()) return success ## # \internal # \brief Helper function for getting the DDC configuration. # # Deprecated in favor of getConfiguration(). def getDdcConfigurationNew(self, wideband=True, ddcIndex=None): config = {} ddcDict = self.wbddcDict if wideband else self.nbddcDict for i in self._getIndexList(ddcIndex, ddcDict): config[i] = ddcDict[i].getConfiguration() self.cmdErrorInfo.extend(ddcDict[i].getLastCommandErrorInfo()) return config ## # \internal # \brief Helper function for querying the DDC configuration. # # Deprecated in favor of queryConfiguration(). def queryDdcConfigurationNew(self, wideband=True, ddcIndex=None): config = {} ddcDict = self.wbddcDict if wideband else self.nbddcDict for i in self._getIndexList(ddcIndex, ddcDict): config[i] = ddcDict[i].queryConfiguration() self.cmdErrorInfo.extend(ddcDict[i].getLastCommandErrorInfo()) return config ## # \internal # \brief Helper function for setting the IP configuration. # # Deprecated in favor of setConfiguration(). def setIpConfigurationNew(self, confDict): success = True # IP configuration set -- The format of the configuration dictionary # depends on whether the radio has Gigabit Ethernet ports on it or not. # -- No GigE ports if self.numGigE == 0: for cmdClass, configKey in [ \ (self.sipCmd, configKeys.IP_SOURCE), \ (self.dipCmd, configKeys.IP_DEST), \ (self.smacCmd, configKeys.MAC_SOURCE), \ (self.dmacCmd, configKeys.MAC_DEST), \ ]: cDict = { "parent": self, \ "verbose": self.verbose, \ "logFile": self.logFile, \ configKey: confDict.get(configKey, 0) } if configKey in confDict and cmdClass is not None and \ cmdClass.settable: cmd = cmdClass(**cDict) success &= cmd.send( self.sendCommand, ) if success and cmd.success: self.configuration[configKeys.CONFIG_IP][configKey] = \ getattr(cmd, configKey) else: self.cmdErrorInfo.extend(cmd.errorInfo) pass pass # -- Has GigE ports else: for gigEPortNum in self.gigEIndexList: if gigEPortNum in confDict: # Set source IP address for this GigE port if self.sipCmd is not None and self.sipCmd.settable and \ configKeys.IP_SOURCE in confDict[gigEPortNum]: # What we do here depends on what "sourceIP" points to -- # either a string (NDR308-class) or a dictionary (NDR551-class) if isinstance(confDict[gigEPortNum][configKeys.IP_SOURCE], str): # Do it the NDR308 way cDict = { "parent": self, "verbose": self.verbose, "logFile": self.logFile, configKeys.GIGE_PORT_INDEX: gigEPortNum, configKeys.IP_SOURCE: confDict[gigEPortNum][configKeys.IP_SOURCE], } cmd = self.sipCmd(**cDict) success &= cmd.send( self.sendCommand, ) if success and cmd.success: self.configuration[configKeys.CONFIG_IP][gigEPortNum][configKeys.IP_SOURCE] = \ getattr(cmd, configKeys.IP_SOURCE) else: self.cmdErrorInfo.extend(cmd.errorInfo) else: # Do it the NDR551 way cDict = { "parent": self, "verbose": self.verbose, "logFile": self.logFile, configKeys.GIGE_PORT_INDEX: gigEPortNum, } if configKeys.GIGE_MAC_ADDR in confDict[gigEPortNum][configKeys.IP_SOURCE]: cDict[configKeys.GIGE_MAC_ADDR] = confDict[gigEPortNum][configKeys.IP_SOURCE][configKeys.GIGE_MAC_ADDR] if configKeys.GIGE_IP_ADDR in confDict[gigEPortNum][configKeys.IP_SOURCE]: cDict[configKeys.GIGE_IP_ADDR] = confDict[gigEPortNum][configKeys.IP_SOURCE][configKeys.GIGE_IP_ADDR] if configKeys.GIGE_NETMASK in confDict[gigEPortNum][configKeys.IP_SOURCE]: cDict[configKeys.GIGE_NETMASK] = confDict[gigEPortNum][configKeys.IP_SOURCE][configKeys.GIGE_NETMASK] if configKeys.GIGE_SOURCE_PORT in confDict[gigEPortNum][configKeys.IP_SOURCE]: cDict[configKeys.GIGE_SOURCE_PORT] = confDict[gigEPortNum][configKeys.IP_SOURCE][configKeys.GIGE_SOURCE_PORT] cmd = self.sipCmd(**cDict) success &= cmd.send( self.sendCommand, ) if success and cmd.success: #self.logIfVerbose("[setIpConfigurationNew()] cmd attributes = %s" % \ # cmd.attributeDump()) if configKeys.GIGE_MAC_ADDR in cmd.__dict__: self.configuration[configKeys.CONFIG_IP][gigEPortNum][configKeys.IP_SOURCE][configKeys.GIGE_MAC_ADDR] = \ getattr(cmd, configKeys.GIGE_MAC_ADDR) if configKeys.GIGE_IP_ADDR in cmd.__dict__: self.configuration[configKeys.CONFIG_IP][gigEPortNum][configKeys.IP_SOURCE][configKeys.GIGE_IP_ADDR] = \ getattr(cmd, configKeys.GIGE_IP_ADDR) if configKeys.GIGE_NETMASK in cmd.__dict__: self.configuration[configKeys.CONFIG_IP][gigEPortNum][configKeys.IP_SOURCE][configKeys.GIGE_NETMASK] = \ getattr(cmd, configKeys.GIGE_NETMASK) if configKeys.GIGE_SOURCE_PORT in cmd.__dict__: self.configuration[configKeys.CONFIG_IP][gigEPortNum][configKeys.IP_SOURCE][configKeys.GIGE_SOURCE_PORT] = \ getattr(cmd, configKeys.GIGE_SOURCE_PORT) else: if cmd.errorInfo is not None: self.cmdErrorInfo.extend(cmd.errorInfo) # Set destination IP table info for this GigE port if self.dipCmd is not None and self.dipCmd.settable and \ configKeys.IP_DEST in confDict[gigEPortNum]: for gigEDipEntryNum in self.gigEDipEntryIndexList: if gigEDipEntryNum in confDict[gigEPortNum][configKeys.IP_DEST]: cDict = { "parent": self, \ "verbose": self.verbose, \ "logFile": self.logFile, \ configKeys.GIGE_PORT_INDEX: gigEPortNum, \ configKeys.GIGE_DIP_INDEX: gigEDipEntryNum, \ } keys = [configKeys.GIGE_IP_ADDR, configKeys.GIGE_MAC_ADDR, \ configKeys.GIGE_SOURCE_PORT, configKeys.GIGE_DEST_PORT, \ configKeys.GIGE_ARP] self._dictUpdate(cDict, \ confDict[gigEPortNum][configKeys.IP_DEST][gigEDipEntryNum], \ self.configuration[configKeys.CONFIG_IP][gigEPortNum][configKeys.IP_DEST][gigEDipEntryNum], \ keys) # Don't send along MAC address if there is an ARP setting # and the ARP setting is True. This prevents errors being # triggered on radios with less permissive configurations # (like the NDR551). if configKeys.GIGE_ARP in cDict and cDict[configKeys.GIGE_ARP]: cDict.pop(configKeys.GIGE_MAC_ADDR, None) cmd = self.dipCmd(**cDict) success &= cmd.send( self.sendCommand, ) if success and cmd.success: for key in keys: if hasattr(cmd, key): self.configuration[configKeys.CONFIG_IP][gigEPortNum][configKeys.IP_DEST][gigEDipEntryNum][key] = \ getattr(cmd, key) else: if cmd.errorInfo is not None: self.cmdErrorInfo.extend(cmd.errorInfo) pass # Set flow control for this GigE port if self.tgfcCmd is not None and self.tgfcCmd.settable and \ configKeys.GIGE_FLOW_CONTROL in confDict[gigEPortNum]: cDict = { "parent": self, \ "verbose": self.verbose, \ "logFile": self.logFile, \ configKeys.GIGE_PORT_INDEX: gigEPortNum, \ configKeys.GIGE_FLOW_CONTROL: confDict[gigEPortNum][configKeys.GIGE_FLOW_CONTROL], \ } cmd = self.tgfcCmd(**cDict) success &= cmd.send( self.sendCommand, ) if success and cmd.success: self.configuration[configKeys.CONFIG_IP][gigEPortNum][configKeys.GIGE_FLOW_CONTROL] = \ getattr(cmd, configKeys.GIGE_FLOW_CONTROL) else: if cmd.errorInfo is not None: self.cmdErrorInfo.extend(cmd.errorInfo) pass return success ## # \internal # \brief Helper function for querying the IP configuration. # \param gigEPortIndex 10-Gig data port index, or None to query all data ports. def queryIpConfigurationNew(self, gigEPortIndex=None): # IP configuration query -- The format of this section depends on whether # the radio has Gigabit Ethernet ports on it or not. ret = {} # -- No GigE ports if self.numGigE == 0: ret = self._queryIpConfigurationNo10Gig() # -- Has GigE ports else: ret = self._queryIpConfiguration10Gig(gigEPortIndex=gigEPortIndex) return ret ## # \internal # \brief Helper function for querying the IP configuration for radios without # 10-Gig Ethernet interfaces. def _queryIpConfigurationNo10Gig(self): ret = {} for cmdClass, configKey in [ \ (self.sipCmd, configKeys.IP_SOURCE), \ (self.dipCmd, configKeys.IP_DEST), \ (self.smacCmd, configKeys.MAC_SOURCE), \ (self.dmacCmd, configKeys.MAC_DEST), \ ]: ret[configKey] = None if cmdClass is not None and cmdClass.queryable: cmd = cmdClass(parent=self, query=True, verbose=self.verbose, logFile=self.logFile) cmd.send( self.sendCommand, ) self._addLastCommandErrorInfo(cmd) rspInfo = cmd.getResponseInfo() if rspInfo is not None: ret[configKey] = rspInfo.get(configKey, "") return ret ## # \internal # \brief Helper function for querying the IP configuration for radios with # 10-Gig Ethernet interfaces. # \param gigEPortIndex 10-Gig data port index, or None to query all data ports. def _queryIpConfiguration10Gig(self, gigEPortIndex=None): ret = {} gigEPortIndexRange = self.getGigEIndexRange() if gigEPortIndex is None else [gigEPortIndex] for gigEPortNum in gigEPortIndexRange: ret[gigEPortNum] = {} # Query source IP address for this GigE port if self.sipCmd is not None and self.sipCmd.queryable: # Default source IP info if self.json: ret[gigEPortNum][configKeys.IP_SOURCE] = { configKeys.GIGE_MAC_ADDR: None, configKeys.GIGE_IP_ADDR: None, configKeys.GIGE_NETMASK: None, configKeys.GIGE_SOURCE_PORT: None, } else: ret[gigEPortNum][configKeys.IP_SOURCE] = None cDict = { "parent": self, \ "query": True, \ "verbose": self.verbose, \ "logFile": self.logFile, \ configKeys.GIGE_PORT_INDEX: gigEPortNum, \ } cmd = self.sipCmd(**cDict) cmd.send( self.sendCommand, ) self._addLastCommandErrorInfo(cmd) rspInfo = cmd.getResponseInfo() if rspInfo is not None: # How to parse this depends on whether the radio is JSON or not if self.json: # Do it NDR551-style ret[gigEPortNum][configKeys.IP_SOURCE] = {} ret[gigEPortNum][configKeys.IP_SOURCE][configKeys.GIGE_MAC_ADDR] = \ rspInfo.get(configKeys.GIGE_MAC_ADDR, "") ret[gigEPortNum][configKeys.IP_SOURCE][configKeys.GIGE_IP_ADDR] = \ rspInfo.get(configKeys.GIGE_IP_ADDR, "") ret[gigEPortNum][configKeys.IP_SOURCE][configKeys.GIGE_NETMASK] = \ rspInfo.get(configKeys.GIGE_NETMASK, "") ret[gigEPortNum][configKeys.IP_SOURCE][configKeys.GIGE_SOURCE_PORT] = \ rspInfo.get(configKeys.GIGE_SOURCE_PORT, 0) else: # Do it NDR308-style ret[gigEPortNum][configKeys.IP_SOURCE] = \ rspInfo.get(configKeys.IP_SOURCE, "") # Query destination IP table for this GigE port if self.dipCmd is not None and self.dipCmd.queryable: ret[gigEPortNum][configKeys.IP_DEST] = {} for gigEDipEntryNum in self.gigEDipEntryIndexList: ret[gigEPortNum][configKeys.IP_DEST][gigEDipEntryNum] = {} cmd = self.dipCmd(**{}) for configKey in [configKeys.GIGE_IP_ADDR, \ configKeys.GIGE_MAC_ADDR, \ configKeys.GIGE_SOURCE_PORT, \ configKeys.GIGE_DEST_PORT, \ configKeys.GIGE_ARP]: if hasattr(cmd, "queryParamMap") and configKey in cmd.queryParamMap: ret[gigEPortNum][configKeys.IP_DEST][gigEDipEntryNum][configKey] = None elif hasattr(cmd, "queryResponseData") and configKey in [q[0] for q in cmd.queryResponseData]: ret[gigEPortNum][configKeys.IP_DEST][gigEDipEntryNum][configKey] = None cDict = { "parent": self, \ "query": True, \ "verbose": self.verbose, \ "logFile": self.logFile, \ configKeys.GIGE_PORT_INDEX: gigEPortNum, \ configKeys.GIGE_DIP_INDEX: gigEDipEntryNum, \ } cmd = self.dipCmd(**cDict) cmd.send( self.sendCommand, ) rspInfo = cmd.getResponseInfo() self._addLastCommandErrorInfo(cmd) if rspInfo is not None: for configKey in [configKeys.GIGE_IP_ADDR, \ configKeys.GIGE_MAC_ADDR, \ configKeys.GIGE_SOURCE_PORT, \ configKeys.GIGE_DEST_PORT, \ configKeys.GIGE_ARP]: if configKey in rspInfo: ret[gigEPortNum][configKeys.IP_DEST][gigEDipEntryNum][configKey] = \ rspInfo[configKey] return ret ## # \internal # \brief Helper function for setting the transmitter configuration. # # Deprecated in favor of setConfiguration(). def setTxConfigurationNew(self, *args, **kwargs): success = True txIndex = kwargs.get(configKeys.TX_INDEX, None) for i in self._getIndexList(txIndex, self.txDict): success &= self.txDict[i].setConfiguration(*args, **kwargs) self.cmdErrorInfo.extend(self.txDict[i].getLastCommandErrorInfo()) return success ## # \internal # \brief Helper function for getting the transmitter configuration. # # Deprecated in favor of getConfiguration(). def getTxConfigurationNew(self, txIndex=None): config = {} for i in self._getIndexList(txIndex, self.txDict): config[i] = self.txDict[i].getConfiguration() self.cmdErrorInfo.extend(self.txDict[i].getLastCommandErrorInfo()) return config ## # \internal # \brief Helper function for querying the transmitter configuration. # # Deprecated in favor of getConfiguration(). def queryTxConfigurationNew(self, txIndex=None): config = {} for i in self._getIndexList(txIndex, self.txDict): config[i] = self.txDict[i].queryConfiguration() self.cmdErrorInfo.extend(self.txDict[i].getLastCommandErrorInfo()) return config ## # \internal # \brief Helper function for setting the DUC configuration. # # Deprecated in favor of setConfiguration(). def setDucConfigurationNew(self, wideband=True, *args, **kwargs): success = True ducDict = self.wbducDict if wideband else self.nbducDict ducIndex = kwargs.get(configKeys.DUC_INDEX, None) for i in self._getIndexList(ducIndex, ducDict): success &= ducDict[i].setConfiguration(*args, **kwargs) self.cmdErrorInfo.extend(ducDict[i].getLastCommandErrorInfo()) return success ## # \internal # \brief Helper function for getting the DUC configuration. # # Deprecated in favor of getConfiguration(). def getDucConfigurationNew(self, wideband=True, ducIndex=None): config = {} ducDict = self.wbducDict if wideband else self.nbducDict for i in self._getIndexList(ducIndex, ducDict): config[i] = ducDict[i].getConfiguration() self.cmdErrorInfo.extend(ducDict[i].getLastCommandErrorInfo()) return config ## # \internal # \brief Helper function for querying the DUC configuration. # # Deprecated in favor of getConfiguration(). def queryDucConfigurationNew(self, wideband=True, ducIndex=None): config = {} ducDict = self.wbducDict if wideband else self.nbducDict for i in self._getIndexList(ducIndex, ducDict): config[i] = ducDict[i].queryConfiguration() self.cmdErrorInfo.extend(ducDict[i].getLastCommandErrorInfo()) return config ## # \internal # \brief Helper function for getting the DDC group configuration. # # Deprecated in favor of getConfiguration(). def getDdcGroupConfigurationNew(self, wideband=True, ddcGroupIndex=None): config = {} ddcGroupDict = self.wbddcGroupDict if wideband else self.nbddcGroupDict for i in self._getIndexList(ddcGroupIndex, ddcGroupDict): config[i] = ddcGroupDict[i].getConfiguration() self.cmdErrorInfo.extend(ddcGroupDict[i].getLastCommandErrorInfo()) return config ## # \internal # \brief Helper function for querying the DDC group configuration. # # Deprecated in favor of queryConfiguration(). def queryDdcGroupConfigurationNew(self, wideband=True, ddcGroupIndex=None): config = {} ddcGroupDict = self.wbddcGroupDict if wideband else self.nbddcGroupDict for i in self._getIndexList(ddcGroupIndex, ddcGroupDict): config[i] = ddcGroupDict[i].queryConfiguration() self.cmdErrorInfo.extend(ddcGroupDict[i].getLastCommandErrorInfo()) return config ## # \internal # \brief Helper function for setting the DDC group configuration. # # Deprecated in favor of setConfiguration(). def setDdcGroupConfigurationNew(self, wideband=True, *args, **kwargs): success = True ddcGroupDict = self.wbddcGroupDict if wideband else self.nbddcGroupDict ddcGroupIndex = kwargs.get(configKeys.INDEX, None) for i in self._getIndexList(ddcGroupIndex, ddcGroupDict): success &= ddcGroupDict[i].setConfiguration(*args, **kwargs) self.cmdErrorInfo.extend(ddcGroupDict[i].getLastCommandErrorInfo()) return success ## # \internal # \brief Helper function for getting the combined DDC group configuration. # # Deprecated in favor of getConfiguration(). def getCombinedDdcGroupConfigurationNew(self, ddcGroupIndex=None): config = {} ddcGroupDict = self.cddcGroupDict for i in self._getIndexList(ddcGroupIndex, ddcGroupDict): config[i] = ddcGroupDict[i].getConfiguration() self.cmdErrorInfo.extend(ddcGroupDict[i].getLastCommandErrorInfo()) return config ## # \internal # \brief Helper function for querying the combined DDC group configuration. # # Deprecated in favor of queryConfiguration(). def queryCombinedDdcGroupConfigurationNew(self, ddcGroupIndex=None): config = {} ddcGroupDict = self.cddcGroupDict for i in self._getIndexList(ddcGroupIndex, ddcGroupDict): config[i] = ddcGroupDict[i].queryConfiguration() self.cmdErrorInfo.extend(ddcGroupDict[i].getLastCommandErrorInfo()) return config ## # \internal # \brief Helper function for setting the combined DDC group configuration. # # Deprecated in favor of setConfiguration(). def setCombinedDdcGroupConfigurationNew(self, *args, **kwargs): success = True #self.logIfVerbose("[ndr551][setCombinedDdcGroupConfigurationNew()] begin") ddcGroupDict = self.cddcGroupDict ddcGroupIndex = kwargs.get(configKeys.INDEX, None) for i in self._getIndexList(ddcGroupIndex, ddcGroupDict): success &= ddcGroupDict[i].setConfiguration(*args, **kwargs) self.cmdErrorInfo.extend(ddcGroupDict[i].getLastCommandErrorInfo()) #self.logIfVerbose("[ndr551][setCombinedDdcGroupConfigurationNew()] end") return success ## # \internal # \brief Helper function for getting the DUC group configuration. # # Deprecated in favor of getConfiguration(). def getDucGroupConfigurationNew(self, wideband=True, ducGroupIndex=None): config = {} ducGroupDict = self.wbducGroupDict if wideband else self.nbducGroupDict for i in self._getIndexList(ducGroupIndex, ducGroupDict): config[i] = ducGroupDict[i].getConfiguration() self.cmdErrorInfo.extend(ducGroupDict[i].getLastCommandErrorInfo()) return config ## # \internal # \brief Helper function for querying the DUC group configuration. # # Deprecated in favor of queryConfiguration(). def queryDucGroupConfigurationNew(self, wideband=True, ducGroupIndex=None): config = {} ducGroupDict = self.wbducGroupDict if wideband else self.nbducGroupDict for i in self._getIndexList(ducGroupIndex, ducGroupDict): config[i] = ducGroupDict[i].queryConfiguration() self.cmdErrorInfo.extend(ducGroupDict[i].getLastCommandErrorInfo()) return config ## # \internal # \brief Helper function for setting the DUC group configuration. # # Deprecated in favor of setConfiguration(). def setDucGroupConfigurationNew(self, wideband=True, *args, **kwargs): success = True ducGroupDict = self.wbducGroupDict if wideband else self.nbducGroupDict ducGroupIndex = kwargs.get(configKeys.INDEX, None) for i in self._getIndexList(ducGroupIndex, ducGroupDict): success &= ducGroupDict[i].setConfiguration(*args, **kwargs) self.cmdErrorInfo.extend(ducGroupDict[i].getLastCommandErrorInfo()) return success ## # \internal # \brief Helper function for getting the tuner group configuration. # # Deprecated in favor of getConfiguration(). def getTunerGroupConfigurationNew(self, tunerGroupIndex=None): config = {} for i in self._getIndexList(tunerGroupIndex, self.tunerGroupDict): config[i] = self.tunerGroupDict[i].getConfiguration() self.cmdErrorInfo.extend(self.tunerGroupDict[i].getLastCommandErrorInfo()) return config ## # \internal # \brief Helper function for querying the tuner group configuration. # # Deprecated in favor of queryConfiguration(). def queryTunerGroupConfigurationNew(self, tunerGroupIndex=None): config = {} for i in self._getIndexList(tunerGroupIndex, self.tunerGroupDict): config[i] = self.tunerGroupDict[i].queryConfiguration() self.cmdErrorInfo.extend(self.tunerGroupDict[i].getLastCommandErrorInfo()) return config ## # \internal # \brief Helper function for setting the tuner group configuration. # # Deprecated in favor of setConfiguration(). def setTunerGroupConfigurationNew(self, *args, **kwargs): success = True tunerGroupIndex = kwargs.get(configKeys.INDEX, None) for i in self._getIndexList(tunerGroupIndex, self.tunerGroupDict): success &= self.tunerGroupDict[i].setConfiguration(*args, **kwargs) self.cmdErrorInfo.extend(self.tunerGroupDict[i].getLastCommandErrorInfo()) return success ## # \internal # \brief Helper function for setting the FFT stream configuration. # # Deprecated in favor of setConfiguration(). # def setFftStreamConfiguration(self, *args, **kwargs): success = True index = kwargs.get(configKeys.FFT_INDEX, None) for i in self._getIndexList(index, self.fftStreamDict): success &= self.fftStreamDict[i].setConfiguration(**kwargs) self.cmdErrorInfo.extend(self.fftStreamDict[i].getLastCommandErrorInfo()) return success ## # \internal # \brief Helper function for getting the FFT stream configuration. # # Deprecated in favor of getConfiguration(). def getFftStreamConfiguration(self, fftStreamIndex=None): config = {} for i in self._getIndexList(fftStreamIndex, self.fftStreamDict): config[i] = self.fftStreamDict[i].getConfiguration() self.cmdErrorInfo.extend(self.fftStreamDict[i].getLastCommandErrorInfo()) return config ## # \internal # \brief Helper function for querying the FFT stream configuration. # # Deprecated in favor of queryConfiguration(). def queryFftStreamConfiguration(self, fftStreamIndex=None): config = {} for i in self._getIndexList(fftStreamIndex, self.fftStreamDict): config[i] = self.fftStreamDict[i].queryConfiguration() self.cmdErrorInfo.extend(self.fftStreamDict[i].getLastCommandErrorInfo()) return config ## # \internal # \brief Helper function for configuring the IP addresses. def configureIp(self,iface,udpBase=41000,maxUdp=None): success = True self.logIfVerbose( "configureIP CALLED" ) if type(iface) is list and len(iface)>1: self.logIfVerbose( "configuring dual interfaces %s"%repr(iface) ) maxUdp = 32 udpList = [] if type(udpBase) in (int,float): udpBase = [udpBase,udpBase] elif type(udpBase) is list: if len(udpBase)==1: udpBase.append(udpBase[0]) for index,interface in enumerate(iface): udpList.append( list(range(udpBase[index]+index*100,udpBase[index]+maxUdp+index*100)) ) mac,dip = getInterfaceAddresses(iface[index]) x = [ int(i) for i in dip.split(".") ] x[-1]+=10 sip = ".".join( [str(i) for i in x] ) sipCmd = command.radio_command( parent=self, cmdString="SIP %d,%s"%(index+1,sip), verbose=self.verbose, logFile=self.logFile ) success &= sipCmd.send( self.sendCommand ) for i in range(maxUdp): args = ", ".join( [str(i) for i in (index+1,i,dip,mac,udpList[index][i],udpList[index][i])] ) dipCmd = command.radio_command( parent=self, cmdString="DIP %s"%args, verbose=self.verbose, logFile=self.logFile ) success &= dipCmd.send( self.sendCommand ) else: self.logIfVerbose("configuring single interface %s"%repr(iface)) if type(iface) is list: iface = iface[0] if maxUdp is None: maxUdp = self.numWbddc+self.numNbddc self.udpList = [list(range(udpBase,udpBase+maxUdp)),] mac,dip = getInterfaceAddresses(iface) x = [ int(i) for i in dip.split(".") ] x[-1]+=10 sip = ".".join( [str(i) for i in x] ) for cmd in ( command.radio_command(parent=self, cmdString="SIP %s"%sip, verbose=self.verbose, logFile=self.logFile), \ command.radio_command(parent=self, cmdString="DIP %s"%dip, verbose=self.verbose, logFile=self.logFile), \ command.radio_command(parent=self, cmdString="TDMAC %s"%mac, verbose=self.verbose, logFile=self.logFile), \ ): success &= cmd.send( self.sendCommand ) return success ## # \brief Gets the number of DDCs on the radio. # # \copydetails CyberRadioDriver::IRadio::getNumDdc() @classmethod def getNumDdc(cls, wideband): return len(cls.getDdcIndexRange(wideband)) ## # \brief Gets the allowed rate set for the DDCs on the radio. # # \copydetails CyberRadioDriver::IRadio::getDdcRateSet() @classmethod def getDdcRateSet(cls, wideband, index=None): ddcObj = cls.wbddcType if wideband else cls.nbddcType return ddcObj.getDdcRateSet(index) if ddcObj is not None else {} ## # \brief Gets the allowed rate list for the DDCs on the radio. # # \copydetails CyberRadioDriver::IRadio::getDdcRateList() @classmethod def getDdcRateList(cls, wideband, index=None): ddcObj = cls.wbddcType if wideband else cls.nbddcType return ddcObj.getDdcRateList(index) if ddcObj is not None else [] ## # \brief Gets the allowed bandwidth set for the DDCs on the radio. # # \copydetails CyberRadioDriver::IRadio::getDdcBwSet() @classmethod def getDdcBwSet(cls, wideband, index=None): ddcObj = cls.wbddcType if wideband else cls.nbddcType return ddcObj.getDdcBwSet(index) if ddcObj is not None else {} ## # \brief Gets the allowed bandwidth list for the DDCs on the radio. # # \copydetails CyberRadioDriver::IRadio::getDdcBwList() @classmethod def getDdcBwList(cls, wideband, index=None): ddcObj = cls.wbddcType if wideband else cls.nbddcType return ddcObj.getDdcBwList(index) if ddcObj is not None else [] ## # \brief Gets the set of available DDC data formats. # # \copydetails CyberRadioDriver::IRadio::getDdcDataFormat() @classmethod def getDdcDataFormat(cls, wideband): ddcObj = cls.wbddcType if wideband else cls.nbddcType return ddcObj.getDdcDataFormat() if ddcObj is not None else {} ## # \brief Gets the frequency offset range for the narrowband DDCs on the radio. # # \copydetails CyberRadioDriver::IRadio::getNbddcFrequencyRange() @classmethod def getDdcFrequencyRange(cls, wideband, index=None): ddcType = cls.wbddcType if wideband else cls.nbddcType return (0.0,0.0) if ddcType is None else ddcType.frqRange ## # \brief Gets the list of DDC indexes for a specified type. # # \copydetails CyberRadioDriver::IRadio::getDdcIndexRange() @classmethod def getDdcIndexRange(cls, wideband): return cls.getWbddcIndexRange() if wideband else cls.getNbddcIndexRange() ## # \internal # \brief Convenience method for configuring the Ethernet addresses on a radio that does not # have Gigabit Ethernet ports. # # \param sip The source IP address. If this is None, the source IP address will not # be changed. # \param dip The destination IP address. If this is None, the destination IP address # will not be changed. # \param dmac The destination MAC address. If this is None, the destination MAC address # will not be changed. # \return True if the configuration succeeded, False otherwise. def setIpConfiguration(self, sip=None, dip=None, dmac=None): configDict = { configKeys.CONFIG_IP: { } } if sip is not None: configDict[configKeys.CONFIG_IP][configKeys.IP_SOURCE] = copy.deepcopy(sip) if dip is not None: configDict[configKeys.CONFIG_IP][configKeys.IP_DEST] = copy.deepcopy(dip) if dmac is not None: configDict[configKeys.CONFIG_IP][configKeys.MAC_DEST] = copy.deepcopy(dmac) return self._setConfiguration(configDict) ## # \internal def setDip(self,udp,dip="255.255.255.255",dmac="ff:ff:ff:ff:ff:ff",ifIndex=None,subIndex=None): pass ## # \internal # \brief Sets tuner configuration (old-style). # # \deprecated Use setConfiguration() instead. # # \param frequency Tuner frequency. # \param attenuation Tuner attenuation. # \param tunerIndex Either None (configure all tuners), an index number (configure # a specific tuner), or a list of index numbers (configure a set of tuners). # \return True if successful, False otherwise. def setTunerConfiguration(self,frequency,attenuation,tunerIndex=None): success = True for i in self._getIndexList(tunerIndex, self.tunerDict): # success &= self.tunerDict[i].setConfiguration(frequency,attenuation) success &= self.tunerDict[i].setConfiguration( **{ configKeys.TUNER_FREQUENCY: frequency, configKeys.TUNER_ATTENUATION: attenuation, } ) return success ## # \internal # \brief Gets tuner configuration (old-style). # # \deprecated Use getConfiguration() instead. # # \param tunerIndex Either None (get for all tuners), an index number (get for # a specific tuner), or a list of index numbers (get for a set of tuners). # \return A dictionary with configuration information. def getTunerConfiguration(self,tunerIndex=None): config = {} for i in self._getIndexList(tunerIndex, self.tunerDict): config[i] = self.tunerDict[i].getConfiguration() return config ## # \internal # \brief Sets tuner frequency (old-style). # # \deprecated Use setConfiguration() instead. # # \param frequency Tuner frequency. # \param tunerIndex Either None (configure all tuners), an index number (configure # a specific tuner), or a list of index numbers (configure a set of tuners). # \return True if successful, False otherwise. def setTunerFrequency(self,frequency,tunerIndex=None): success = True for i in self._getIndexList(tunerIndex, self.tunerDict): # success &= self.tunerDict[i].setFrequency(frequency) success &= self.tunerDict[i].setConfiguration( **{ configKeys.TUNER_FREQUENCY: frequency, } ) return success ## # \internal # \brief Gets tuner frequency information (old-style). # # \deprecated Use getConfiguration() instead. # # \param tunerIndex Either None (get for all tuners), an index number (get for # a specific tuner), or a list of index numbers (get for a set of tuners). # \return A dictionary with frequency information. def getTunerFrequency(self,tunerIndex=None,): frqDict = {} for i in self._getIndexList(tunerIndex, self.tunerDict): #frqDict[i] = self.tunerDict[i].getFrequency() frqDict[i] = self.tunerDict[i].configuration.get(configKeys.TUNER_FREQUENCY, None) return frqDict ## # \internal # \brief Sets tuner attenuation (old-style). # # \deprecated Use setConfiguration() instead. # # \param attenuation Tuner attenuation. # \param tunerIndex Either None (configure all tuners), an index number (configure # a specific tuner), or a list of index numbers (configure a set of tuners). # \return True if successful, False otherwise. def setTunerAttenuation(self,attenuation,tunerIndex=None): success = True for i in self._getIndexList(tunerIndex, self.tunerDict): # success &= self.tunerDict[i].setAttenuation(attenuation) success &= self.tunerDict[i].setConfiguration( **{ configKeys.TUNER_ATTENUATION: attenuation, } ) return success ## # \internal # \brief Gets tuner attenuation information (old-style). # # \deprecated Use getConfiguration() instead. # # \param tunerIndex Either None (get for all tuners), an index number (get for # a specific tuner), or a list of index numbers (get for a set of tuners). # \return A dictionary with attenuation information. def getTunerAttenuation(self,tunerIndex=None,): att = {} for i in self._getIndexList(tunerIndex, self.tunerDict): # att[i] = self.tunerDict[i].getAttenuation() att[i] = self.tunerDict[i].configuration.get(configKeys.TUNER_ATTENUATION, None) return att ## # \internal # \brief Sets DDC configuration (old-style). # # \deprecated Use setConfiguration() instead. # # \param wideband Whether the DDC is a wideband DDC. # \param ddcIndex Either None (configure all DDCs), an index number (configure # a specific DDC), or a list of index numbers (configure a set of DDCs). # \param rfIndex DDC RF index number. # \param rateIndex DDC rate index number. # \param udpDest UDP destination. # \param frequency Frequency offset. # \param enable 1 if DDC is enabled, 0 if not. # \param vitaEnable VITA 49 streaming option, as appropriate for the radio. # \param streamId VITA 49 stream ID. # \return True if successful, False otherwise. def setDdcConfiguration(self,wideband,ddcIndex=None,rfIndex=1,rateIndex=0,udpDest=0,frequency=0,enable=0,vitaEnable=0,streamId=0): success = True ddcDict = self.wbddcDict if wideband else self.nbddcDict for i in self._getIndexList(ddcIndex,ddcDict): # ddcDict[i].setConfiguration(rfIndex=rfIndex,rateIndex=rateIndex,udpDest=udpDest,frequency=frequency,enable=enable,vitaEnable=vitaEnable,streamId=streamId) success &= ddcDict[i].setConfiguration( **{ configKeys.NBDDC_RF_INDEX: rfIndex, configKeys.DDC_RATE_INDEX: rateIndex, configKeys.DDC_UDP_DESTINATION: udpDest, configKeys.DDC_FREQUENCY_OFFSET: frequency, configKeys.ENABLE: enable, configKeys.DDC_VITA_ENABLE: vitaEnable, configKeys.DDC_STREAM_ID: streamId, } ) return success ## # \brief Disables ethernet flow control on the radio. # # \copydetails CyberRadioDriver::IRadio::disableTenGigFlowControl() def disableTenGigFlowControl(self,): return self.setTenGigFlowControlStatus(False) ## # \brief Enables ethernet flow control on the radio. # # \copydetails CyberRadioDriver::IRadio::enableTenGigFlowControl() def enableTenGigFlowControl(self,): return self.setTenGigFlowControlStatus(True) ## # \brief method to enable or disable ethernet flow control on the radio. # # \copydetails CyberRadioDriver::IRadio::getTenGigFlowControlStatus() def setTenGigFlowControlStatus(self,enable=False): return False ## # \brief Queries status of flow control handling. # # \copydetails CyberRadioDriver::IRadio::getTenGigFlowControlStatus() def getTenGigFlowControlStatus(self,): return {} ## # \brief Performs coherent tuning. # # \copydetails CyberRadioDriver::IRadio::coherentTune() def coherentTune(self, cohGroup, freq): ret = True if self.cohTuneCmd is not None: cDict = { "parent": self, \ "verbose": self.verbose, \ "logFile": self.logFile, \ configKeys.TUNER_COHERENT_GROUP: cohGroup, configKeys.TUNER_FREQUENCY: freq, } cmd = self.cohTuneCmd(**cDict) ret &= cmd.send( self.sendCommand, ) self.logIfVerbose("coherentTune send result =", ret) ret &= cmd.success self.logIfVerbose("coherentTune success result =", ret) self._addLastCommandErrorInfo(cmd) if ret: self.logIfVerbose("force tuner requery") self.queryTunerConfigurationNew(tunerIndex=None) pass else: ret = False return ret ## # \brief Gets the current FPGA state. # # \copydetails CyberRadioDriver::IRadio::getFpgaState() def getFpgaState(self): ret = None if self.fpgaStateCmd is not None: ret = self.getConfigurationByKeys("fpgaState") return ret ## # \brief Sets the current FPGA state. # # \copydetails CyberRadioDriver::IRadio::setFpgaState() def setFpgaState(self, state): ret = False if self.fpgaStateCmd is not None: ret = self.setConfiguration({"fpgaState": state}) return ret # OVERRIDE ## # \brief Sets whether or not the object is in verbose mode. # # \copydetails CyberRadioDriver::IRadio::setVerbose() def setVerbose(self, verbose): # Set this object's verbose mode log._logger.setVerbose(self, verbose) # Set verbose mode on all components for obj in self.componentList: obj.setVerbose(verbose) ## # \brief Sets the log file. # # \copydetails CyberRadioDriver::IRadio::setLogFile() def setLogFile(self, logFile): # Set this object's log file log._logger.setLogFile(self, logFile) # Set log file on all components for obj in self.componentList: obj.setLogFile(logFile) ## # \brief Gets the list of connected data port interface indices. # # \copydetails CyberRadioDriver::IRadio::getConnectedDataPorts() def getConnectedDataPorts(self): ret = [] if self.isCrddConnection: ret = self._crddGetConnectedDataPortIndices() return ret ## # \internal # \brief Converts a user-specified time string into a number of seconds # since 1/1/70. # # The time string can be either: # \li Absolute time, in any supported format # \li Relative time specified as now{-n}, where n is a number of seconds # \li Relative time specified as now{-[[H:]MM:]SS} # \li "begin", which is the beginning of known time (1/1/70) # \li "end", which is the end of trackable time and far beyond the # useful life of this utility (01/18/2038) # # \throws RuntimeException if the time string format cannot be understood. # \param timestr The time string. # \param utc Whether or not the user's time string is in UTC time. # \return The time, in number of seconds since the Epoch @staticmethod def timeFromString(timestr, utc=True): ret = 0 tm = None tstr = timestr.strip() if tstr == "": ret = 0 elif tstr == "begin": ret = 0 elif tstr == "end": ret = sys.maxsize else: if tstr.find('now') != -1: tm = datetime.datetime.utcnow() if utc else datetime.datetime.now() i = tstr.find('-') if i != -1: tmp = tstr[i+1:] tm = tm - datetime.timedelta(seconds=_radio.timeSecsFromString(tmp)) else: # Replace strings "today" and "yesterday" tmToday = datetime.datetime.utcnow() if utc else datetime.datetime.now() tmYesterday = tmToday - datetime.timedelta(days=1) dateStrToday = tmToday.strftime("%Y%m%d") dateStrYesterday = tmYesterday.strftime("%Y%m%d") tstr = tstr.replace("today", dateStrToday).replace("yesterday", dateStrYesterday) # Try a series of known formats # -- Formats are 5-tuples: (format string, width, needs year, needs month, needs day) supportedFmts = [ \ ('%Y-%m-%dT%H:%M:%S%z', 24, False, False, False), \ ('%Y-%m-%dT%H:%M:%S', 19, False, False, False), \ ('%Y%m%d:%H%M%S', 15, False, False, False), \ ('%a %b %d %H:%M:%S %Y', 24, False, False, False), \ ('%b %d %H:%M:%S', 15, True, False, False), \ ('%b %d, %Y %I:%M:%S %p', 24, False, False, False), \ ('%Y-%m-%d %H:%M:%S', 19, False, False, False), \ ('%Y/%m/%d %H:%M:%S', 19, False, False, False), \ ('%Y%m%d_%H%M%S', 15, False, False, False), \ ('%m/%d/%Y %H:%M', 16, False, False, False), \ ('%m/%d/%y %H:%M:%S', 17, False, False, False), \ ('%Y%m%d', 8, False, False, False), \ ('%Y-%m-%d', 10, False, False, False), \ ('%H:%M:%S', 8, True, True, True), \ ('%H%M%S', 6, True, True, True), \ ] for fmt in supportedFmts: try: tmp = tstr[:fmt[1]].strip() #print "[DBG][timeFromString] Convert" #print "[DBG][timeFromString] -- string:", tmp #print "[DBG][timeFromString] -- format:", fmt[0] tm = datetime.datetime.strptime(tmp, fmt[0]) #print "[DBG][timeFromString] -- SUCCESS" # Replace date items from today's date as needed by the format # -- Day if fmt[4]: tm = tm.replace(day=tmToday.day) # -- Month if fmt[3]: tm = tm.replace(month=tmToday.month) # -- Year if fmt[2]: tm = tm.replace(year=tmToday.year) # But if the resulting date is in the future, then we need to dial it # back a year if tm > tmToday: tm = tm.replace(year=tmToday.year-1) break except: #print "[DBG][timeFromString] -- FAILURE" tm = None if tm is not None: ret = time.mktime(tm.timetuple()) else: raise RuntimeError("Improperly formatted time: \"" + tstr + "\"") return ret ## # Converts a time string ([+-][[H:]M:]S) to a time in seconds. # # \note Hours and minutes are not bounded in any way. These strings provide the # same result: # \li "7200" # \li "120:00" # \li "2:00:00" # # \throws RuntimeError if the time is formatted improperly. # \param timeStr The time string. # \return The number of seconds. @staticmethod def timeSecsFromString(timeStr): hrs = 0 mins = 0 secs = 0 sgn = 1 try: if "-" in timeStr: sgn = -1 tmp = timeStr.strip().translate(None, " +-") if tmp != "": vec = tmp.split(":") if vec[-1] != "": secs = int(vec[-1]) else: raise RuntimeError("Improperly formatted time: \"" + timeStr + "\"") if len(vec) > 1: if vec[-2] != "": mins = int(vec[-2]) else: raise RuntimeError("Improperly formatted time: \"" + timeStr + "\"") if len(vec) > 2: if vec[-3] != "": hrs = int(vec[-3]) else: raise RuntimeError("Improperly formatted time: \"" + timeStr + "\"") except: raise RuntimeError("Improperly formatted time: \"" + timeStr + "\"") return ( sgn * (hrs * 3600 + mins * 60 + secs) ) ## # \internal # \brief Radio handler class that supports nothing more complicated than # identifying a connected radio. # # Used internally to support radio auto-detection. # # This class implements the CyberRadioDriver.IRadio interface. # class _radio_identifier(_radio): _name = "Radio Identifier" json = False ifSpec = _ifSpec adcRate = 1.0 numTuner = 0 numTunerBoards = 0 tunerType = None numWbddc = 0 wbddcType = None numNbddc = 0 nbddcType = None numTxs = 0 txType = None numWbduc = 0 wbducType = None numNbduc = 0 nbducType = None numWbddcGroups = 0 wbddcGroupType = None numNbddcGroups = 0 nbddcGroupType = None numTunerGroups = 0 tunerGroupType = None numGigE = 0 numGigEDipEntries = 0 idnQry = command.idn verQry = command.ver hrevQry = command.hrev statQry = None tstatQry = None tadjCmd = None resetCmd = None cfgCmd = None ppsCmd = None utcCmd = None refCmd = None rbypCmd = None sipCmd = None dipCmd = None smacCmd = None dmacCmd = None calfCmd = None nbssCmd = None fnrCmd = None gpsCmd = None gposCmd = None rtvCmd = None tempCmd = None gpioStaticCmd = None gpioSeqCmd = None tgfcCmd = None refModes = {} rbypModes = {} vitaEnableOptions = {} connectionModes = ["https", "tcp", "udp", "tty"] validConfigurationKeywords = [] setTimeDefault = False # OVERRIDE ## # \protected # \brief Queries hardware to determine the object's current configuration. def _queryConfiguration(self): # Call the base-class implementation configKeys.Configurable._queryConfiguration(self) # This radio has nothing further that it can configure ## # \brief Radio function (mode) command used by JSON-based radios. # class funJSON(command._jsonCommandBase): mnemonic = "fun" queryParamMap = { configKeys.RADIO_FUNCTION: "state", } setParamMap = { configKeys.RADIO_FUNCTION: "state", } ## # \internal # \brief Radio handler class that supports nothing more complicated than # identifying a connected radio. # # Used internally to support radio auto-detection. # # This class implements the CyberRadioDriver.IRadio interface. # class _radio_identifier_json(_radio): _name = "Radio Identifier" json = True ifSpec = _ifSpec adcRate = 1.0 numTuner = 0 numTunerBoards = 0 tunerType = None numWbddc = 0 wbddcType = None numNbddc = 0 nbddcType = None numTxs = 0 txType = None numWbduc = 0 wbducType = None numNbduc = 0 nbducType = None numWbddcGroups = 0 wbddcGroupType = None numNbddcGroups = 0 nbddcGroupType = None numTunerGroups = 0 tunerGroupType = None numGigE = 0 numGigEDipEntries = 0 idnQry = None verQry = None hrevQry = None statQry = command.status_json tstatQry = None tadjCmd = None resetCmd = None cfgCmd = None ppsCmd = None utcCmd = None refCmd = None rbypCmd = None sipCmd = None dipCmd = None smacCmd = None dmacCmd = None calfCmd = None nbssCmd = None fnrCmd = None gpsCmd = None gposCmd = None rtvCmd = None tempCmd = None gpioStaticCmd = None gpioSeqCmd = None tgfcCmd = None funCmd = funJSON refModes = {} rbypModes = {} vitaEnableOptions = {} connectionModes = ["https", "tcp", "udp", "tty"] validConfigurationKeywords = [ configKeys.RADIO_FUNCTION ] setTimeDefault = False # OVERRIDE ## # \brief Returns version information for the radio. # # \copydetails CyberRadioDriver::IRadio::getVersionInfo() def getVersionInfo(self): # Query hardware for details if we don't have them already keys = [configKeys.VERINFO_MODEL, configKeys.VERINFO_SN, configKeys.VERINFO_SW, configKeys.VERINFO_FW, configKeys.VERINFO_REF, configKeys.VERINFO_UNITREV, configKeys.VERINFO_HW] if not all([key in self.versionInfo for key in keys]): cmd = self.statQry(parent=self, query=True, verbose=self.verbose, logFile=self.logFile) cmd.send( self.sendCommand, ) self._addLastCommandErrorInfo(cmd) rspInfo = cmd.getResponseInfo() if rspInfo is not None: self._dictUpdate(self.versionInfo, rspInfo, {}, keys) for key in keys: if key not in self.versionInfo: self.versionInfo[key] = "N/A" return self.versionInfo # OVERRIDE ## # \protected # \brief Queries hardware to determine the object's current configuration. def _queryConfiguration(self): # Call the base-class implementation configKeys.Configurable._queryConfiguration(self) # Call the radio function command if self.funCmd is not None: cmd = self.funCmd(parent=self, query=True, verbose=self.verbose, logFile=self.logFile) cmd.send( self.sendCommand, ) self._addLastCommandErrorInfo(cmd) rspInfo = cmd.getResponseInfo() if rspInfo is not None: for key in self.validConfigurationKeywords: val = rspInfo.get(key, None) if val is not None: self.configuration[key] = val # This radio has nothing further that it can configure #-- End Radio Handler Objects --------------------------------------------------# #-- NOTE: Radio handler objects for supporting specific radios need to be # implemented under the CyberRadioDriver.radios package tree.
43.884634
168
0.591156
17,347
179,927
6.084799
0.081051
0.028649
0.048146
0.039241
0.565299
0.519341
0.479371
0.428325
0.382547
0.34018
0
0.005405
0.324454
179,927
4,099
169
43.89534
0.862994
0.276668
0
0.472431
0
0
0.013
0
0
0
0.000094
0
0
1
0.092732
false
0.010443
0.006266
0.045948
0.291145
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
9d006b0d7e89fe26f4e43d422a80339277272355
3,836
py
Python
synthdid/variance.py
MasaAsami/pysynthdid
01afe33ae22f513c65f9cfdec56a4b21ca547c28
[ "Apache-2.0" ]
null
null
null
synthdid/variance.py
MasaAsami/pysynthdid
01afe33ae22f513c65f9cfdec56a4b21ca547c28
[ "Apache-2.0" ]
null
null
null
synthdid/variance.py
MasaAsami/pysynthdid
01afe33ae22f513c65f9cfdec56a4b21ca547c28
[ "Apache-2.0" ]
2
2022-03-11T03:13:36.000Z
2022-03-20T22:55:13.000Z
import pandas as pd import numpy as np from tqdm import tqdm class Variance(object): def estimate_variance(self, algo="placebo", replications=200): """ # algo - placebo ## The following algorithms are omitted because they are not practical. - bootstrap - jackknife """ if algo == "placebo": Y_pre_c = self.Y_pre_c.copy() Y_post_c = self.Y_post_c.copy() assert self.n_treat < Y_pre_c.shape[1] control_names = Y_pre_c.columns result_tau_sdid = [] result_tau_sc = [] result_tau_did = [] for i in tqdm(range(replications)): # setup np.random.seed(seed=self.random_seed + i) placebo_t = np.random.choice(control_names, self.n_treat, replace=False) placebo_c = [col for col in control_names if col not in placebo_t] pla_Y_pre_t = Y_pre_c[placebo_t] pla_Y_post_t = Y_post_c[placebo_t] pla_Y_pre_c = Y_pre_c[placebo_c] pla_Y_post_c = Y_post_c[placebo_c] pla_result = pd.DataFrame( { "pla_actual_y": pd.concat([pla_Y_pre_t, pla_Y_post_t]).mean( axis=1 ) } ) post_placebo_treat = pla_result.loc[ self.post_term[0] :, "pla_actual_y" ].mean() # estimation ## sdid pla_zeta = self.est_zeta(pla_Y_pre_c) pla_hat_omega = self.est_omega(pla_Y_pre_c, pla_Y_pre_t, pla_zeta) pla_hat_lambda = self.est_lambda(pla_Y_pre_c, pla_Y_post_c) ## sc pla_hat_omega_ADH = self.est_omega_ADH(pla_Y_pre_c, pla_Y_pre_t) # prediction ## sdid pla_hat_omega = pla_hat_omega[:-1] pla_Y_c = pd.concat([pla_Y_pre_c, pla_Y_post_c]) n_features = pla_Y_pre_c.shape[1] start_w = np.repeat(1 / n_features, n_features) _intercept = (start_w - pla_hat_omega) @ pla_Y_pre_c.T @ pla_hat_lambda pla_result["sdid"] = pla_Y_c.dot(pla_hat_omega) + _intercept ## sc pla_result["sc"] = pla_Y_c.dot(pla_hat_omega_ADH) # cal tau ## sdid pre_sdid = pla_result["sdid"].head(len(pla_hat_lambda)) @ pla_hat_lambda post_sdid = pla_result.loc[self.post_term[0] :, "sdid"].mean() pre_treat = (pla_Y_pre_t.T @ pla_hat_lambda).values[0] sdid_counterfuctual_post_treat = pre_treat + (post_sdid - pre_sdid) result_tau_sdid.append( post_placebo_treat - sdid_counterfuctual_post_treat ) ## sc sc_counterfuctual_post_treat = pla_result.loc[ self.post_term[0] :, "sc" ].mean() result_tau_sc.append(post_placebo_treat - sc_counterfuctual_post_treat) # did did_post_actural_treat = ( post_placebo_treat - pla_result.loc[: self.pre_term[1], "pla_actual_y"].mean() ) did_counterfuctual_post_treat = ( pla_Y_post_c.mean(axis=1).mean() - pla_Y_pre_c.mean(axis=1).mean() ) result_tau_did.append( did_post_actural_treat - did_counterfuctual_post_treat ) return ( np.var(result_tau_sdid), np.var(result_tau_sc), np.var(result_tau_did), )
36.884615
88
0.516945
477
3,836
3.708595
0.192872
0.052007
0.042397
0.040701
0.229508
0.131148
0.131148
0.071227
0
0
0
0.00651
0.399374
3,836
103
89
37.242718
0.761285
0.046142
0
0.029412
0
0
0.0184
0
0
0
0
0
0.014706
1
0.014706
false
0
0.044118
0
0.088235
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
9d01bb83bee5f2c4612c59332de6ea7b9e34ac2f
681
py
Python
todo/views.py
arascch/Todo_list
a4c88abaa4e6c1e158135b4fce4bcfbf64cb86e2
[ "Apache-2.0" ]
1
2020-03-24T09:26:23.000Z
2020-03-24T09:26:23.000Z
todo/views.py
arascch/Todo_list
a4c88abaa4e6c1e158135b4fce4bcfbf64cb86e2
[ "Apache-2.0" ]
null
null
null
todo/views.py
arascch/Todo_list
a4c88abaa4e6c1e158135b4fce4bcfbf64cb86e2
[ "Apache-2.0" ]
null
null
null
from django.shortcuts import render from django.utils import timezone from todo.models import Todo from django.http import HttpResponseRedirect def home(request): todo_items = Todo.objects.all().order_by("-added_date") return render(request , 'todo/index.html' , {"todo_items":todo_items}) def add_todo(request): Current_date = timezone.now() content = request.POST["content"] created_obj = Todo.objects.create(added_date = Current_date , text = content ) length_of_todos = Todo.objects.all().count() return HttpResponseRedirect('/') def delete_todo(request , todo_id): Todo.objects.get(id = todo_id).delete() return HttpResponseRedirect('/')
35.842105
82
0.737151
89
681
5.47191
0.449438
0.090349
0.053388
0
0
0
0
0
0
0
0
0
0.142438
681
18
83
37.833333
0.833904
0
0
0.125
0
0
0.066079
0
0
0
0
0
0
1
0.1875
false
0
0.25
0
0.625
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
9d02e73cfc6d5e0a0462f594bbcafd9199cb2c88
816
py
Python
Easy/Hangman/HangMan - Stage 6.py
michael-act/HyperSkill
ce16eb3b6f755f7f8f21a57ef2679fcb8a4bd55c
[ "MIT" ]
1
2020-11-17T18:09:30.000Z
2020-11-17T18:09:30.000Z
Easy/Hangman/HangMan - Stage 6.py
michael-act/HyperSkill
ce16eb3b6f755f7f8f21a57ef2679fcb8a4bd55c
[ "MIT" ]
null
null
null
Easy/Hangman/HangMan - Stage 6.py
michael-act/HyperSkill
ce16eb3b6f755f7f8f21a57ef2679fcb8a4bd55c
[ "MIT" ]
null
null
null
import random category = ['python', 'java', 'kotlin', 'javascript'] computer = random.choice(category) hidden = list(len(computer) * "-") print("H A N G M A N") counter = 8 while counter > 0: print() print("".join(hidden)) letter = input("Input a letter: ") if (letter in hidden) or (letter in hidden and times == 7): counter -= 1 print("No improvements") elif letter in set(computer): where = 0 for i in range(computer.count(letter)): where = computer.index(letter, 0 + where) hidden[where] = letter where += where + 1 if "-" not in hidden: print() print("".join(hidden)) print("You guessed the word!") print("You survived!") break else: counter -= 1 print("No such letter in the word") print(counter) else: print("You are hanged!")
24
61
0.616422
113
816
4.451327
0.469027
0.063618
0.055666
0.079523
0
0
0
0
0
0
0
0.0128
0.234069
816
34
62
24
0.792
0
0
0.258065
0
0
0.1875
0
0
0
0
0
0
1
0
false
0
0.032258
0
0.032258
0.354839
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
9d03157b2910202ba3c53d84197f7000003a404d
6,536
py
Python
sklcc/taskEdit.py
pyxuweitao/MSZ_YCL
23323c4660f44af0a45d6ab81cd496b81976f5a0
[ "Apache-2.0" ]
null
null
null
sklcc/taskEdit.py
pyxuweitao/MSZ_YCL
23323c4660f44af0a45d6ab81cd496b81976f5a0
[ "Apache-2.0" ]
null
null
null
sklcc/taskEdit.py
pyxuweitao/MSZ_YCL
23323c4660f44af0a45d6ab81cd496b81976f5a0
[ "Apache-2.0" ]
null
null
null
# -*- coding: utf-8 -*- """ 所有任务task相关功能函数 """ __author__ = "XuWeitao" import CommonUtilities import rawSql def getTasksList(UserID): """ 获取任务列表,包括任务流水号,创建时间,最近一次修改时间,货号,色号以及到料时间和创建人 :param UserID:创建人ID,如果为ALL则返回所有的任务列表 :return:{ "SerialNo":任务流水号, "CreateTime":任务创建时间, "LastModifiedTime":最近一次修改时间, "ProductNo":货号, "ColorNo":色号, "ArriveTime":到料时间, "Name":创建人名, "GongYingShang":{"id":供应商代码, "name":供应商名称}, "WuLiao":{"id":材料名称ID, "name":材料名称, "cata":材料种类名称}, "DaoLiaoZongShu":到料总数, "DanWei":{"id":到料总数单位ID, "name":到料总数单位} "DaoLiaoZongShu2":到料总数, "DanWei":{"id":到料总数单位ID, "name":到料总数单位}, "XieZuoRen":当前任务的协作人员,不包含任务创建者 } """ raw = rawSql.Raw_sql() raw.sql = """SELECT SerialNo, CONVERT(VARCHAR(16), CreateTime, 20) CreateTime, CONVERT(VARCHAR(16), LastModifiedTime, 20) LastModifiedTime, ProductNo, ColorNo, CONVERT(VARCHAR(10), ArriveTime, 20) ArriveTime, dbo.getUserNameByUserID(UserID), SupplierID, dbo.getSupplierNameByID(SupplierID), MaterialID, dbo.getMaterialNameByID(MaterialID), dbo.getMaterialTypeNameByID(dbo.getMaterialTypeIDByMaterialID(MaterialID)), DaoLiaoZongShu, UnitID, dbo.getUnitNameByID(UnitID), DaoLiaoZongShu2, UnitID2, dbo.getUnitNameByID(UnitID2) AS DanWei2, Inspectors, UserID FROM RMI_TASK WITH(NOLOCK)""" #身为协作人也可以看到该任务 if UserID != 'ALL': raw.sql += " WHERE CHARINDEX('%s', Inspectors) > 0 AND State = 2" % UserID else: raw.sql += " WHERE State = 0" res = raw.query_all() jsonReturn = list() for row in res: #协作人以@字符分割,但是其中包含创建任务人 Inspectors = row[18].split('@') InspectorList = list() for inspectorNo in Inspectors: if inspectorNo == row[19]: continue raw.sql = "SELECT DBO.getUserNameByUserID('%s')"%inspectorNo inspectorName = raw.query_one() if inspectorName: inspectorName = inspectorName[0] InspectorList.append({'Name':inspectorName, 'ID':inspectorNo}) jsonReturn.append({ "SerialNo":row[0], "CreateTime":row[1], "LastModifiedTime":row[2], "ProductNo":row[3], "ColorNo":row[4], "ArriveTime":row[5], "Name":row[6], "GongYingShang":{"id":row[7], "name":row[8]}, "WuLiao":{"id":row[9], "name":row[10], "cata":row[11]}, "DaoLiaoZongShu":row[12], "DanWei":{"id":row[13], "name":row[14]}, "DaoLiaoZongShu2":row[15], "DanWei2":{"id":row[16], "name":row[17]}, "XieZuoRen":InspectorList }) return jsonReturn def editTaskInfo(taskInfo, userID): """ 根据isNew字段以及传入的信息来新插入或先删除再插入一个任务数据。 :param taskInfo:任务相关信息 :param userID:用户ID :return:返回编辑成功与否的标志 """ raw = rawSql.Raw_sql() #是否退回的判定 if "isReturn" in taskInfo: raw.sql = "UPDATE RMI_TASK WITH(ROWLOCK) SET State = 2 WHERE SerialNo = '%s'"%taskInfo['SerialNo'] raw.update() else: isNew = True if taskInfo['isNew'] == "True" else False #如果没有设置为None,即使前台返回null,经JSON转义仍为None taskInfo['DaoLiaoZongShu2'] = False if 'DaoLiaoZongShu2' not in taskInfo else taskInfo['DaoLiaoZongShu2'] taskInfo['DanWei2'] = {'id':None} if 'DanWei2' not in taskInfo else taskInfo['DanWei2'] #前端传来的协作者不包含当前登录人员ID if 'XieZuoRen' in taskInfo: taskInfo['XieZuoRen'].append({'ID':userID}) taskInfo['Inspectors'] = "@".join([User['ID'] for User in taskInfo['XieZuoRen']]) else: taskInfo['Inspectors'] = userID if isNew: raw.sql = """INSERT INTO RMI_TASK WITH(ROWLOCK) (CreateTime, LastModifiedTime, ProductNo, ColorNo, ArriveTime, UserID, FlowID, MaterialID, SupplierID, UnitID, DaoLiaoZongShu, DaoLiaoZongShu2, UnitID2, Inspectors) VALUES ( getdate(), getdate(),'%s','%s', '%s', '%s', '%s', '%s', '%s', '%s', '%s', %s, %s, '%s' );""" % ( taskInfo['ProductNo'], taskInfo['ColorNo'], taskInfo['ArriveTime'][:10], userID, taskInfo['FlowID'], taskInfo['WuLiao']['id'], taskInfo['GongYingShang']['id'], taskInfo['DanWei']['id'], taskInfo['DaoLiaoZongShu'], "'"+unicode(taskInfo['DaoLiaoZongShu2'])+"'" if taskInfo['DaoLiaoZongShu2'] else "NULL", "'"+unicode(taskInfo['DanWei2']['id'])+"'" if taskInfo['DanWei2']['id'] else "NULL", taskInfo['Inspectors'] ) raw.update() #辅料表页面右上角快速新建任务流水号的返回 raw.sql = "SELECT TOP 1 SerialNo FROM RMI_TASK WHERE UserID = '%s' AND State = 2 ORDER BY CreateTime desc"%userID return raw.query_one()[0] else: raw.sql = """UPDATE RMI_TASK WITH(ROWLOCK) SET MaterialID = '%s',SupplierID = '%s', UnitID = '%s', DaoLiaoZongShu = '%s', ProductNo = '%s', ColorNo = '%s', ArriveTime = '%s', DaoLiaoZongShu2 = %s, UnitID2 = %s, Inspectors = '%s' WHERE SerialNo = '%s'""" % ( taskInfo['WuLiao']['id'], taskInfo['GongYingShang']['id'], taskInfo['DanWei']['id'], taskInfo['DaoLiaoZongShu'], taskInfo['ProductNo'], taskInfo['ColorNo'], taskInfo['ArriveTime'][:10].replace('-',''), "'"+unicode(taskInfo['DaoLiaoZongShu2'])+"'" if taskInfo['DaoLiaoZongShu2'] else "NULL", "'"+unicode(taskInfo['DanWei2']['id'])+"'" if taskInfo['DanWei2']['id'] else "NULL", taskInfo['Inspectors'], taskInfo['SerialNo']) raw.update() def getFlowList(): """ 从数据库获取所有的工作流列表 :return:返回{"name":FlowName,"value":FlowID} """ raw = rawSql.Raw_sql() raw.sql = "SELECT FlowID AS value, FlowName AS name FROM RMI_WORK_FLOW WITH(NOLOCK)" res, columns = raw.query_all(needColumnName=True) return CommonUtilities.translateQueryResIntoDict(columns, res) def commitTaskBySerialNo(SerialNo): """ 根据流水号通过任务的函数 :param SerialNo: 任务流水号 :return: """ raw = rawSql.Raw_sql() raw.sql = "UPDATE RMI_TASK SET State = 0 WHERE SerialNo = '%s'"%SerialNo raw.update() return def deleteTaskBySerialNo(SerialNo): """ 删除任务,只删除RMI_TASK表中的数据,触发器跟踪删除其他表相关信息 :param SerialNo:任务流水号 :return: """ #TODO:触发器update_other_tables_when_delete_rmi_task更新删除F01之外其他表格的数据 raw = rawSql.Raw_sql() raw.sql = "DELETE FROM RMI_TASK WHERE SerialNo='%s'"%SerialNo raw.update() #call trigger delete all task info in rmi_task_process... return def getAllMaterialByName(fuzzyName): """ 根据模糊输入获取所有材料的名称 :param fuzzyName:模糊输入 :return:{'id':材料名称ID,'name':材料名称,'cata':材料种类名称} """ raw = rawSql.Raw_sql() raw.sql = """SELECT MaterialID AS id, MaterialName AS name, dbo.getMaterialTypeNameByID(MaterialTypeID) AS cata FROM RMI_MATERIAL_NAME WITH(NOLOCK)""" if fuzzyName: raw.sql += """ WHERE MaterialName LIKE '%%%%%s%%%%'"""%fuzzyName res, cols = raw.query_all(needColumnName=True) return CommonUtilities.translateQueryResIntoDict(cols, res) else: #如果为空返回空数据,否则前端卡顿 return [{"name":u'请输入关键字', "id":"", "cata":""}]
41.106918
140
0.671665
727
6,536
5.990371
0.272352
0.026177
0.006889
0.008266
0.254879
0.240413
0.190815
0.148335
0.098737
0.098737
0
0.014958
0.15101
6,536
158
141
41.367089
0.769868
0.190024
0
0.19802
0
0.059406
0.471741
0.057837
0
0
0
0.006329
0
1
0.059406
false
0
0.019802
0
0.148515
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
9d064db24d2e119266bc78323c4a529982872160
744
py
Python
Leetcoding-Actions/my-weekly-DSA-challenge/2020-w44-p0200-Number-of-Islands.py
shoaibur/SWE
1e114a2750f2df5d6c50b48c8e439224894d65da
[ "MIT" ]
1
2020-11-14T18:28:13.000Z
2020-11-14T18:28:13.000Z
Leetcoding-Actions/my-weekly-DSA-challenge/2020-w44-p0200-Number-of-Islands.py
shoaibur/SWE
1e114a2750f2df5d6c50b48c8e439224894d65da
[ "MIT" ]
null
null
null
Leetcoding-Actions/my-weekly-DSA-challenge/2020-w44-p0200-Number-of-Islands.py
shoaibur/SWE
1e114a2750f2df5d6c50b48c8e439224894d65da
[ "MIT" ]
null
null
null
class Solution: def numIslands(self, grid: List[List[str]]) -> int: ''' T: O(mn) and S: O(1) ''' if not grid: return 0 nrow, ncol = len(grid), len(grid[0]) def exploreIsland(grid, i, j): if i < 0 or i > nrow - 1 or j < 0 or j > ncol-1 or grid[i][j] == "0": return grid[i][j] = "0" for (ni, nj) in [(i-1, j), (i+1, j), (i, j-1), (i, j+1)]: exploreIsland(grid, ni, nj) count_island = 0 for i in range(nrow): for j in range(ncol): if grid[i][j] == "1": exploreIsland(grid, i, j) count_island += 1 return count_island
32.347826
81
0.415323
106
744
2.886792
0.311321
0.045752
0.098039
0.124183
0.130719
0
0
0
0
0
0
0.038005
0.43414
744
22
82
33.818182
0.688836
0.026882
0
0
0
0
0.004286
0
0
0
0
0
0
1
0.117647
false
0
0
0
0.294118
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
9d07e918f729733a967e2d67e465e2cf7ce7d2a4
11,417
py
Python
tensor2tensor/models/revnet.py
ysglh/tensor2tensor
f55462a9928f3f8af0b1275a4fb40d13cae6cc79
[ "Apache-2.0" ]
null
null
null
tensor2tensor/models/revnet.py
ysglh/tensor2tensor
f55462a9928f3f8af0b1275a4fb40d13cae6cc79
[ "Apache-2.0" ]
null
null
null
tensor2tensor/models/revnet.py
ysglh/tensor2tensor
f55462a9928f3f8af0b1275a4fb40d13cae6cc79
[ "Apache-2.0" ]
null
null
null
# coding=utf-8 # Copyright 2017 The Tensor2Tensor Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Creates a RevNet with the bottleneck residual function. Implements the following equations described in the RevNet paper: y1 = x1 + f(x2) y2 = x2 + g(y1) However, in practice, the authors use the following equations to downsample tensors inside a RevNet block: y1 = h(x1) + f(x2) y2 = h(x2) + g(y1) In this case, h is the downsampling function used to change number of channels. These modified equations are evident in the authors' code online: https://github.com/renmengye/revnet-public For reference, the original paper can be found here: https://arxiv.org/pdf/1707.04585.pdf """ # Dependency imports from tensor2tensor.layers import common_hparams from tensor2tensor.layers import rev_block from tensor2tensor.utils import registry from tensor2tensor.utils import t2t_model import tensorflow as tf CONFIG = {'2d': {'conv': tf.layers.conv2d, 'max_pool': tf.layers.max_pooling2d, 'avg_pool': tf.layers.average_pooling2d, 'split_axis': 3, 'reduction_dimensions': [1, 2] }, '3d': {'conv': tf.layers.conv3d, 'max_pool': tf.layers.max_pooling3d, 'avg_pool': tf.layers.average_pooling2d, 'split_axis': 4, 'reduction_dimensions': [1, 2, 3] } } def f(x, depth1, depth2, dim='2d', first_batch_norm=True, layer_stride=1, training=True, padding='SAME'): """Applies bottleneck residual function for 104-layer RevNet. Args: x: input tensor depth1: Number of output channels for the first and second conv layers. depth2: Number of output channels for the third conv layer. dim: '2d' if 2-dimensional, '3d' if 3-dimensional. first_batch_norm: Whether to keep the first batch norm layer or not. Typically used in the first RevNet block. layer_stride: Stride for the first conv filter. Note that this particular 104-layer RevNet architecture only varies the stride for the first conv filter. The stride for the second conv filter is always set to 1. training: True for train phase, False for eval phase. padding: Padding for each conv layer. Returns: Output tensor after applying residual function for 104-layer RevNet. """ conv = CONFIG[dim]['conv'] with tf.variable_scope('f'): if first_batch_norm: net = tf.layers.batch_normalization(x, training=training) net = tf.nn.relu(net) else: net = x net = conv(net, depth1, 1, strides=layer_stride, padding=padding, activation=None) net = tf.layers.batch_normalization(net, training=training) net = tf.nn.relu(net) net = conv(net, depth1, 3, strides=1, padding=padding, activation=None) net = tf.layers.batch_normalization(net, training=training) net = tf.nn.relu(net) net = conv(net, depth2, 1, strides=1, padding=padding, activation=None) return net def h(x, output_channels, dim='2d', layer_stride=1, scope='h'): """Downsamples 'x' using a 1x1 convolution filter and a chosen stride. Args: x: input tensor of size [N, H, W, C] output_channels: Desired number of output channels. dim: '2d' if 2-dimensional, '3d' if 3-dimensional. layer_stride: What stride to use. Usually 1 or 2. scope: Optional variable scope for the h function. This function uses a 1x1 convolution filter and a chosen stride to downsample the input tensor x. Returns: A downsampled tensor of size [N, H/2, W/2, output_channels] if layer_stride is 2, else returns a tensor of size [N, H, W, output_channels] if layer_stride is 1. """ conv = CONFIG[dim]['conv'] with tf.variable_scope(scope): x = conv(x, output_channels, 1, strides=layer_stride, padding='SAME', activation=None) return x def init(images, num_channels, dim='2d', training=True, scope='init'): """Standard ResNet initial block used as first RevNet block. Args: images: [N, H, W, 3] tensor of input images to the model. num_channels: Output depth of convolutional layer in initial block. dim: '2d' if 2-dimensional, '3d' if 3-dimensional. training: True for train phase, False for eval phase. scope: Optional scope for the init block. Returns: Two [N, H, W, C] output activations from input images. """ conv = CONFIG[dim]['conv'] pool = CONFIG[dim]['max_pool'] with tf.variable_scope(scope): net = conv(images, num_channels, 7, strides=2, padding='SAME', activation=None) net = tf.layers.batch_normalization(net, training=training) net = tf.nn.relu(net) net = pool(net, pool_size=3, strides=2) x1, x2 = tf.split(net, 2, axis=CONFIG[dim]['split_axis']) return x1, x2 def unit(x1, x2, block_num, depth1, depth2, num_layers, dim='2d', first_batch_norm=True, stride=1, training=True): """Implements bottleneck RevNet unit from authors' RevNet-104 architecture. Args: x1: [N, H, W, C] tensor of network activations. x2: [N, H, W, C] tensor of network activations. block_num: integer ID of block depth1: First depth in bottleneck residual unit. depth2: Second depth in bottleneck residual unit. num_layers: Number of layers in the RevNet block. dim: '2d' if 2-dimensional, '3d' if 3-dimensional. first_batch_norm: Whether to keep the first batch norm layer or not. Typically used in the first RevNet block. stride: Stride for the residual function. training: True for train phase, False for eval phase. Returns: Two [N, H, W, C] output activation tensors. """ scope_name = 'unit_%d' % block_num with tf.variable_scope(scope_name): # Manual implementation of downsampling with tf.variable_scope('downsampling'): with tf.variable_scope('x1'): hx1 = h(x1, depth2, dim=dim, layer_stride=stride) fx2 = f(x2, depth1, depth2, dim=dim, layer_stride=stride, first_batch_norm=first_batch_norm, training=training) x1 = hx1 + fx2 with tf.variable_scope('x2'): hx2 = h(x2, depth2, dim=dim, layer_stride=stride) fx1 = f(x1, depth1, depth2, dim=dim, training=training) x2 = hx2 + fx1 # Full block using memory-efficient rev_block implementation. with tf.variable_scope('full_block'): residual_func = lambda x: f(x, depth1, depth2, dim=dim, training=training) x1, x2 = rev_block.rev_block(x1, x2, residual_func, residual_func, num_layers=num_layers) return x1, x2 def final_block(x1, x2, dim='2d', training=True, scope='final_block'): """Converts activations from last RevNet block to pre-logits. Args: x1: [NxHxWxC] tensor of network activations. x2: [NxHxWxC] tensor of network activations. dim: '2d' if 2-dimensional, '3d' if 3-dimensional. training: True for train phase, False for eval phase. scope: Optional variable scope for the final block. Returns: [N, hidden_dim] pre-logits tensor from activations x1 and x2. """ # Final batch norm and relu with tf.variable_scope(scope): y = tf.concat([x1, x2], axis=CONFIG[dim]['split_axis']) y = tf.layers.batch_normalization(y, training=training) y = tf.nn.relu(y) # Global average pooling net = tf.reduce_mean(y, CONFIG[dim]['reduction_dimensions'], name='final_pool', keep_dims=True) return net def revnet104(inputs, hparams, reuse=None): """Uses Tensor2Tensor memory optimized RevNet block to build a RevNet. Args: inputs: [NxHxWx3] tensor of input images to the model. hparams: HParams object that contains the following parameters, in addition to the parameters contained in the basic_params1() object in the common_hparams module: num_channels_first - A Python list where each element represents the depth of the first and third convolutional layers in the bottleneck residual unit for a given block. num_channels_second - A Python list where each element represents the depth of the second convolutional layer in the bottleneck residual unit for a given block. num_layers_per_block - A Python list containing the number of RevNet layers for each block. first_batch_norm - A Python list containing booleans representing the presence of a batch norm layer at the beginning of a given block. strides - A Python list containing integers representing the stride of the residual function for each block. num_channels_init_block - An integer representing the number of channels for the convolutional layer in the initial block. dimension - A string (either "2d" or "3d") that decides if the RevNet is 2-dimensional or 3-dimensional. reuse: Whether to reuse the default variable scope. Returns: [batch_size, hidden_dim] pre-logits tensor from the bottleneck RevNet. """ training = hparams.mode == tf.estimator.ModeKeys.TRAIN with tf.variable_scope('RevNet104', reuse=reuse): x1, x2 = init(inputs, num_channels=hparams.num_channels_init_block, dim=hparams.dim, training=training) for block_num in range(1, len(hparams.num_layers_per_block)): block = {'depth1': hparams.num_channels_first[block_num], 'depth2': hparams.num_channels_second[block_num], 'num_layers': hparams.num_layers_per_block[block_num], 'first_batch_norm': hparams.first_batch_norm[block_num], 'stride': hparams.strides[block_num]} x1, x2 = unit(x1, x2, block_num, dim=hparams.dim, training=training, **block) pre_logits = final_block(x1, x2, dim=hparams.dim, training=training) return pre_logits @registry.register_model class Revnet104(t2t_model.T2TModel): def body(self, features): return revnet104(features['inputs'], self.hparams) @registry.register_hparams def revnet_base(): """Set of hyperparameters.""" hparams = common_hparams.basic_params1() hparams.add_hparam('num_channels_first', [64, 128, 256, 416]) hparams.add_hparam('num_channels_second', [256, 512, 1024, 1664]) hparams.add_hparam('num_layers_per_block', [1, 1, 10, 1]) hparams.add_hparam('first_batch_norm', [False, True, True, True]) hparams.add_hparam('strides', [1, 2, 2, 2]) hparams.add_hparam('num_channels_init_block', 32) hparams.add_hparam('dim', '2d') hparams.optimizer = 'Momentum' hparams.learning_rate = 0.01 hparams.weight_decay = 1e-4 # Can run with a batch size of 128 with Problem ImageImagenet224 hparams.tpu_batch_size_per_shard = 128 return hparams
38.441077
80
0.681177
1,619
11,417
4.700432
0.203212
0.01774
0.023916
0.024967
0.373193
0.277267
0.188699
0.171879
0.134297
0.11866
0
0.028882
0.22668
11,417
296
81
38.570946
0.83305
0.504861
0
0.2
0
0
0.077761
0.004248
0
0
0
0
0
1
0.066667
false
0
0.041667
0.008333
0.183333
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
9d08e38fa29119640133acdff959362b1c00409d
4,166
py
Python
tests/unit/test_services.py
BlooAM/Online-shopping-app
aa68d258fe32bf5a792e534dddd9def7c25460e2
[ "MIT" ]
null
null
null
tests/unit/test_services.py
BlooAM/Online-shopping-app
aa68d258fe32bf5a792e534dddd9def7c25460e2
[ "MIT" ]
null
null
null
tests/unit/test_services.py
BlooAM/Online-shopping-app
aa68d258fe32bf5a792e534dddd9def7c25460e2
[ "MIT" ]
null
null
null
import pytest from datetime import date, timedelta from adapters import repository from domain.model import Batch, OrderLine, allocate, OutOfStock from domain import model from service_layer import handlers, unit_of_work class FakeSession: def __init__(self): self.committed = False def commit(self): self.commited = True class FakeRepository(repository.AbstractRepository): def __init__(self, products): super().__init__() self._products = set(products) def _add(self, product): self._products.add(product) def _get(self, sku): return next((p for p in self._products if p.sku == sku), None) class FakeUnitOfWork(unit_of_work.AbstractUnitOfWork): def __init__(self): self.batches = FakeRepository([]) self.committed = False def _commit(self): self.committed = True def rollback(self): pass today = date.today() tomorrow = today + timedelta(days=1) later = tomorrow + timedelta(days=10) def test_add_batch(): uow = FakeUnitOfWork() handlers.add_batch("b1", "CRUNCHY-ARMCHAIR", 100, None, uow) assert uow.batches.get("b1") is not None assert uow.committed def test_prefers_current_stock_batches_to_shipments(): in_stock_batch = Batch("in-stock-batch", "RETRO-CLOCK", 100, eta=None) shipment_batch = Batch("shipment-batch", "RETRO-CLOCK", 100, eta=tomorrow) line = OrderLine("oref", "RETRO-CLOCK", 10) allocate(line, [in_stock_batch, shipment_batch]) assert in_stock_batch.available_quantity == 90 assert shipment_batch.available_quantity == 100 def test_prefers_warehouse_batches_to_shipments(): in_stock_batch = Batch("in-stock-batch", "RETRO-CLOCK", 100, eta=None) shipment_batch = Batch("shipment-batch", "RETRO-CLOCK", 100, eta=tomorrow) repo = FakeRepository([in_stock_batch, shipment_batch]) session = FakeSession() line = OrderLine('oref', "RETRO-CLOCK", 10) handlers.allocate(line, repo, session) assert in_stock_batch.available_quantity == 90 assert shipment_batch.available_quantity == 100 def test_prefers_earlier_batches(): earliest = Batch("speedy-batch", "MINIMALIST-SPOON", 100, eta=today) medium = Batch("normal-batch", "MINIMALIST-SPOON", 100, eta=tomorrow) latest = Batch("slow-batch", "MINIMALIST-SPOON", 100, eta=later) line = OrderLine("order1", "MINIMALIST-SPOON", 10) allocate(line, [medium, earliest, latest]) assert earliest.available_quantity == 90 assert medium.available_quantity == 100 assert latest.available_quantity == 100 def test_returns_allocated_batch_ref(): in_stock_batch = Batch("in-stock-batch-ref", "HIGHBROW-POSTER", 100, eta=None) shipment_batch = Batch("shipment-batch-ref", "HIGHBROW-POSTER", 100, eta=tomorrow) line = OrderLine("oref", "HIGHBROW-POSTER", 10) allocation = allocate(line, [in_stock_batch, shipment_batch]) assert allocation == in_stock_batch.reference def test_raises_out_of_stock_exception_if_cannot_allocate(): batch = Batch('batch1', 'SMALL-FORK', 10, eta=today) allocate(OrderLine('order1', 'SMALL-FORK', 10), [batch]) with pytest.raises(OutOfStock, match='SMALL-FORK'): allocate(OrderLine('order2', 'SMALL-FORK', 1), [batch]) def test_commits(): line = model.OrderLine("o1", "OMINOUS-MIRROR", 10) batch = model.Batch("b1", "OMINOUS-MIRROR", 100, eta=None) repo = FakeRepository([batch]) session = FakeSession() handlers.allocate("o1", "OMINOUS-MIRROR", 10, repo, session) assert session.committed is True def test_allocate_returns_allocation(): uow = FakeUnitOfWork() handlers.add_batch("batch1", "COMPLICATED-LAMP", 100, None, uow) result = handlers.allocate("o1", "COMPLICATED-LAMP", 10, uow) assert result == "bach1" def test_error_for_invalid_sku(): line = model.OrderLine("o1", "NONEXISTENTSKU", 10) batch = model.Batch("b1", "AREALSKU", 100, eta=None) repo = FakeRepository([batch]) with pytest.raises(handlers.InvalidSku, match="Invalid name of SKU: NONEXISTENTSKU"): handlers.allocate("o1", "NONEXISTENTSKU", 10, repo, FakeSession())
32.046154
89
0.702112
518
4,166
5.457529
0.239382
0.029713
0.050937
0.025469
0.377432
0.294659
0.227803
0.192784
0.149982
0.149982
0
0.028316
0.169227
4,166
129
90
32.294574
0.7885
0
0
0.202247
0
0
0.136102
0
0
0
0
0
0.134831
1
0.191011
false
0.011236
0.067416
0.011236
0.303371
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
9d08ebe64750ed4ee86af0207bca624b0391ff75
1,786
py
Python
DQMOffline/L1Trigger/python/L1TEGammaOffline_cfi.py
pasmuss/cmssw
566f40c323beef46134485a45ea53349f59ae534
[ "Apache-2.0" ]
null
null
null
DQMOffline/L1Trigger/python/L1TEGammaOffline_cfi.py
pasmuss/cmssw
566f40c323beef46134485a45ea53349f59ae534
[ "Apache-2.0" ]
null
null
null
DQMOffline/L1Trigger/python/L1TEGammaOffline_cfi.py
pasmuss/cmssw
566f40c323beef46134485a45ea53349f59ae534
[ "Apache-2.0" ]
null
null
null
import FWCore.ParameterSet.Config as cms electronEfficiencyThresholds = [36, 68, 128, 176] electronEfficiencyBins = [] electronEfficiencyBins.extend(list(xrange(0, 120, 10))) electronEfficiencyBins.extend(list(xrange(120, 180, 20))) electronEfficiencyBins.extend(list(xrange(180, 300, 40))) electronEfficiencyBins.extend(list(xrange(300, 400, 100))) # just copy for now photonEfficiencyThresholds = electronEfficiencyThresholds photonEfficiencyBins = electronEfficiencyBins l1tEGammaOfflineDQM = cms.EDAnalyzer( "L1TEGammaOffline", electronCollection=cms.InputTag("gedGsfElectrons"), photonCollection=cms.InputTag("photons"), caloJetCollection=cms.InputTag("ak4CaloJets"), caloMETCollection=cms.InputTag("caloMet"), conversionsCollection=cms.InputTag("allConversions"), PVCollection=cms.InputTag("offlinePrimaryVerticesWithBS"), beamSpotCollection=cms.InputTag("offlineBeamSpot"), TriggerEvent=cms.InputTag('hltTriggerSummaryAOD', '', 'HLT'), TriggerResults=cms.InputTag('TriggerResults', '', 'HLT'), # last filter of HLTEle27WP80Sequence TriggerFilter=cms.InputTag('hltEle27WP80TrackIsoFilter', '', 'HLT'), TriggerPath=cms.string('HLT_Ele27_WP80_v13'), stage2CaloLayer2EGammaSource=cms.InputTag("caloStage2Digis", "EGamma"), histFolder=cms.string('L1T/L1TEGamma'), electronEfficiencyThresholds=cms.vdouble(electronEfficiencyThresholds), electronEfficiencyBins=cms.vdouble(electronEfficiencyBins), photonEfficiencyThresholds=cms.vdouble(photonEfficiencyThresholds), photonEfficiencyBins=cms.vdouble(photonEfficiencyBins), ) l1tEGammaOfflineDQMEmu = l1tEGammaOfflineDQM.clone( stage2CaloLayer2EGammaSource=cms.InputTag("simCaloStage2Digis"), histFolder=cms.string('L1TEMU/L1TEGamma'), )
37.208333
75
0.783875
144
1,786
9.701389
0.513889
0.094488
0.091625
0.108805
0
0
0
0
0
0
0
0.043478
0.098544
1,786
47
76
38
0.824224
0.029675
0
0
0
0
0.154913
0.031214
0
0
0
0
0
1
0
false
0
0.030303
0
0.030303
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
9d092f6e945eea14883d51652329fcd4951dee46
18,548
py
Python
ion_networks/numba_functions.py
swillems/ion_networks
5304a92248ec007ac2253f246a3d44bdb58ae110
[ "MIT" ]
2
2020-10-28T16:11:56.000Z
2020-12-03T13:19:18.000Z
ion_networks/numba_functions.py
swillems/ion_networks
5304a92248ec007ac2253f246a3d44bdb58ae110
[ "MIT" ]
null
null
null
ion_networks/numba_functions.py
swillems/ion_networks
5304a92248ec007ac2253f246a3d44bdb58ae110
[ "MIT" ]
null
null
null
#!python # external import numpy as np import numba @numba.njit(nogil=True, cache=True) def longest_increasing_subsequence(sequence): # TODO:Docstring M = np.zeros(len(sequence) + 1, np.int64) P = np.zeros(len(sequence), np.int64) max_subsequence_length = 0 for current_index, current_element in enumerate(sequence): low_bound = 1 high_bound = max_subsequence_length while low_bound <= high_bound: mid = (low_bound + high_bound) // 2 if sequence[M[mid]] <= current_element: low_bound = mid + 1 else: high_bound = mid - 1 subsequence_length = low_bound P[current_index] = M[subsequence_length - 1] M[subsequence_length] = current_index if subsequence_length > max_subsequence_length: max_subsequence_length = subsequence_length longest_increasing_subsequence = np.repeat(0, max_subsequence_length) index = M[max_subsequence_length] for current_index in range(max_subsequence_length - 1, -1, -1): longest_increasing_subsequence[current_index] = index index = P[index] return longest_increasing_subsequence @numba.njit(nogil=True, cache=True) def increase_buffer(buffer, max_batch=10**7): new_buffer = np.empty(buffer.shape[0] + max_batch, np.int64) new_buffer[:len(buffer)] = buffer return new_buffer @numba.njit(nogil=True, cache=True) def quick_align( self_mzs, other_mzs, self_mz_order, other_mz_order, other_rt_order, ppm ): # TODO: Docstring max_mz_diff = 1 + ppm * 10**-6 low_limits = np.searchsorted( self_mzs[self_mz_order], other_mzs[other_mz_order] / max_mz_diff, "left" )[other_rt_order] high_limits = np.searchsorted( self_mzs[self_mz_order], other_mzs[other_mz_order] * max_mz_diff, "right" )[other_rt_order] diffs = high_limits - low_limits ends = np.cumsum(diffs) self_indices = np.empty(ends[-1], np.int64) for l, h, e, d in zip(low_limits, high_limits, ends, diffs): self_indices[e - d: e] = self_mz_order[l: h] selection = longest_increasing_subsequence(self_indices) self_indices_mask = np.empty(len(selection) + 2, np.int64) self_indices_mask[0] = 0 self_indices_mask[1: -1] = self_indices[selection] self_indices_mask[-1] = len(self_mzs) - 1 other_indices_mask = np.empty(len(selection) + 2, np.int64) other_indices_mask[0] = 0 other_indices = np.repeat( np.arange(len(other_rt_order)), high_limits - low_limits ) other_indices_mask[1: -1] = other_indices[selection] other_indices_mask[-1] = len(other_mzs) - 1 return self_indices_mask, other_indices_mask @numba.njit(nogil=True, cache=True) def align_coordinates( queries, lower_limits, upper_limits, self_coordinates, other_coordinates, max_errors, # kind="euclidean" ): indptr = np.zeros(len(queries), np.int64) indices = np.empty(10**7, np.int64) total = 0 for index, query in enumerate(queries): low_limit = lower_limits[query] high_limit = upper_limits[query] candidate_count = high_limit - low_limit if candidate_count == 0: continue elif (candidate_count + total) >= len(indices): indices = increase_buffer(indices) dists = other_coordinates[low_limit: high_limit] - self_coordinates[query] # TODO: what if error==0? # if kind == "euclidean": dists /= max_errors dists = dists**2 projected_dists = np.sum(dists, axis=1) projected_dists = np.sqrt(projected_dists) candidates = low_limit + np.flatnonzero(projected_dists <= 1) # elif kind == "manhattan": # projected_dists = np.all(dists < max_errors, axis=1) # candidates = low_limit + np.flatnonzero(projected_dists) candidate_count = len(candidates) indices[total: total + candidate_count] = candidates indptr[index] = candidate_count total += candidate_count return (indptr, indices[:total]) @numba.njit(nogil=True, cache=True) def make_symmetric( indptr, indices, ): # TODO: multithread? offsets = np.cumsum(np.bincount(indices)) indptr_ = indptr.copy() indptr_[1:1 + offsets.shape[0]] += offsets indptr_[1 + offsets.shape[0]:] += offsets[-1] indices_ = np.empty(indptr_[-1], np.int64) pointers_ = np.empty_like(indices_) offsets = indptr_[:-1] + np.diff(indptr) for index in range(indptr.shape[0] - 1): start = indptr[index] end = indptr[index + 1] current_indices = indices[start: end] pointers = np.arange(start, end) start_ = indptr_[index] end_ = start_ + current_indices.shape[0] indices_[start_: end_] = current_indices pointers_[start_: end_] = pointers current_offsets = offsets[current_indices] indices_[current_offsets] = index pointers_[current_offsets] = pointers offsets[current_indices] += 1 return indptr_, indices_, pointers_ @numba.njit(nogil=True, cache=True) def align_edges( queries, self_indptr, self_indices, self_pointers, other_indptr, other_indices, alignment, alignment_mask, ): self_pointers_ = np.empty(10**7, np.int64) other_pointers_ = np.empty(10**7, np.int64) pointer_offset = 0 for index in queries: possible_start = self_indptr[index] possible_end = self_indptr[index + 1] if possible_start == possible_end: continue current_index = alignment[index] current_start = other_indptr[current_index] current_end = other_indptr[current_index + 1] if current_start == current_end: continue possible_indices = self_indices[possible_start: possible_end] possible_mask = alignment_mask[possible_indices] if not np.any(possible_mask): continue possible_indices = alignment[possible_indices[possible_mask]] possible_pointers = self_pointers[possible_start: possible_end][ possible_mask ] current_indices = other_indices[current_start: current_end] candidates1 = np.searchsorted( current_indices, possible_indices, "left" ) candidates2 = np.searchsorted( current_indices, possible_indices, "right" ) overlap = np.flatnonzero(candidates2 != candidates1) overlap_count = len(overlap) if len(overlap) == 0: continue elif (overlap_count + pointer_offset) >= len(self_pointers_): self_pointers_ = increase_buffer(self_pointers_) other_pointers_ = increase_buffer(other_pointers_) self_pointers_[ pointer_offset: pointer_offset + overlap_count ] = possible_pointers[overlap] other_pointers_[ pointer_offset: pointer_offset + overlap_count ] = current_start + candidates1[overlap] pointer_offset += overlap_count return self_pointers_[:pointer_offset], other_pointers_[:pointer_offset] @numba.njit(cache=True) def find_peak_indices( input_array, output_array, max_distance, ): peaks = np.zeros(int(input_array[-1]), np.int64) current_max_mz = 0 current_max_int = 0 current_max_index = 0 for index, (intensity, mz) in enumerate(zip(output_array, input_array)): if mz > current_max_mz + max_distance: peaks[int(current_max_mz)] = current_max_index current_max_mz = mz current_max_int = intensity current_max_index = index elif intensity > current_max_int: current_max_mz = mz current_max_int = intensity current_max_index = index return peaks @numba.njit(nogil=True, cache=True) def get_unique_apex_and_count( ordered_self_indices, ordered_other_indices, return_all_counts=True ): counts = np.zeros_like(ordered_self_indices) self_max = np.max(ordered_self_indices) other_max = np.max(ordered_other_indices) unique_pair = np.zeros(counts.shape[0], np.bool_) self_frequencies = np.zeros(self_max + 1, np.int64) other_frequencies = np.zeros(other_max + 1, np.int64) self_indptr = np.empty(self_max + 2, np.int64) self_indptr[0] = 0 self_indptr[1:] = np.cumsum(np.bincount(ordered_self_indices)) self_order = np.argsort(ordered_self_indices) other_indptr = np.empty(other_max + 2, np.int64) other_indptr[0] = 0 other_indptr[1:] = np.cumsum(np.bincount(ordered_other_indices)) other_order = np.argsort(ordered_other_indices) unique_count = 0 max_count = 0 apex = 0 for i in range(counts.shape[0]): self_index = ordered_self_indices[i] other_index = ordered_other_indices[i] if ( self_frequencies[self_index] == 0 ) & ( other_frequencies[other_index] == 0 ): unique_count += 1 unique_pair[i] = True if unique_count > max_count: apex = i max_count = unique_count else: self_locs = self_order[ self_indptr[self_index]: self_indptr[self_index + 1] ] if np.any(unique_pair[self_locs]): unique_count -= 1 other_locs = other_order[ other_indptr[other_index]: other_indptr[other_index + 1] ] if np.any(unique_pair[other_locs]): unique_count -= 1 unique_pair[self_locs] = False unique_pair[other_locs] = False self_frequencies[self_index] += 1 other_frequencies[other_index] += 1 counts[i] = unique_count if not return_all_counts: counts = counts[apex: apex + 1] return apex, counts @numba.njit def cluster_network( indptr, indices, edge_pointers, selected_edges, ): node_count = indptr.shape[0] - 1 clusters = np.zeros(node_count, np.int64) cluster_number = 0 for index in range(node_count): if clusters[index] != 0: continue current_cluster = set() new_indices = set() new_indices.add(index) while len(new_indices) > 0: new_index = new_indices.pop() current_cluster.add(new_index) neighbors = indices[indptr[new_index]: indptr[new_index + 1]] pointers = edge_pointers[indptr[new_index]: indptr[new_index + 1]] selected = selected_edges[pointers] new_indices |= set(neighbors[selected]) - current_cluster cluster_number += 1 for i in current_cluster: clusters[i] = cluster_number return clusters @numba.njit() def __get_candidate_peptide_indices_for_edges( indptr, indices, low_peptide_indices, high_peptide_indices, database_peptides, max_batch ): # TODO: Docstring result_indptr = np.empty(indptr[-1], np.int64) result_indices = np.empty(max_batch, np.int64) current_index = 0 for start, end, low, high in zip( indptr[:-1], indptr[1:], low_peptide_indices, high_peptide_indices, ): if (low == high) or (start == end): result_indptr[start:end] = current_index continue if ( (end - start) * (high - low) + current_index ) >= result_indices.shape[0]: new_result_indices = np.empty( max_batch + result_indices.shape[0], np.int64 ) new_result_indices[:result_indices.shape[0]] = result_indices result_indices = new_result_indices peptide_candidates = database_peptides[low: high] peptide_candidates_set = set(peptide_candidates) neighbors = indices[start: end] for i, neighbor in enumerate(neighbors): neighbor_low = low_peptide_indices[neighbor] neighbor_high = high_peptide_indices[neighbor] if neighbor_low == neighbor_high: result_indptr[start + i] = current_index continue neighbor_peptide_candidates = database_peptides[ neighbor_low: neighbor_high ] for neighbor_peptide_candidate in neighbor_peptide_candidates: if neighbor_peptide_candidate in peptide_candidates_set: result_indices[ current_index ] = neighbor_peptide_candidate current_index += 1 result_indptr[start + i] = current_index result_indptr[1:] = result_indptr[:-1] result_indptr[0] = 0 return result_indptr, result_indices[:current_index] @numba.njit(cache=True, nogil=True) def annotate_mgf( queries, spectra_indptr, low_limits, high_limits, peptide_pointers, min_score=0 ): peptide_count = np.max(peptide_pointers) + 1 count = 0 for s in queries: count += spectra_indptr[s + 1] - spectra_indptr[s] score_results = np.empty(count, np.float64) fragment_results = np.empty(count, np.int64) index_results = np.empty(count, np.int64) count_results = np.empty(count, np.int64) candidate_counts = np.empty(count, np.int64) spectrum_sizes = np.empty(count, np.int64) current_i = 0 candidates = np.empty(peptide_count, np.int64) for spectrum_index in queries: spectrum_start = spectra_indptr[spectrum_index] spectrum_end = spectra_indptr[spectrum_index + 1] spectrum_size = spectrum_end - spectrum_start if spectrum_size == 0: continue candidates[:] = 0 for ion_index in range(spectrum_start, spectrum_end): peptide_low = low_limits[ion_index] peptide_high = high_limits[ion_index] if peptide_low == peptide_high: continue peptides = peptide_pointers[peptide_low: peptide_high] candidates[peptides] += 1 for ion_index in range(spectrum_start, spectrum_end): peptide_low = low_limits[ion_index] peptide_high = high_limits[ion_index] if peptide_low == peptide_high: continue ( score, max_count, max_fragment, candidate_count ) = score_regression_estimator( candidates[peptide_pointers[peptide_low: peptide_high]], peptide_low, peptide_count ) if score > min_score: score_results[current_i] = score fragment_results[current_i] = max_fragment index_results[current_i] = ion_index count_results[current_i] = max_count candidate_counts[current_i] = candidate_count spectrum_sizes[current_i] = spectrum_size current_i += 1 return ( score_results[:current_i], fragment_results[:current_i], index_results[:current_i], count_results[:current_i], candidate_counts[:current_i], spectrum_sizes[:current_i], ) @numba.njit(cache=True, nogil=True) def annotate_network( queries, indptr, indices, edge_pointers, selected_edges, low_limits, high_limits, peptide_pointers, ): peptide_count = np.max(peptide_pointers) + 1 count = len(queries) score_results = np.empty(count, np.float64) fragment_results = np.empty(count, np.int64) index_results = np.empty(count, np.int64) count_results = np.empty(count, np.int64) candidate_counts = np.empty(count, np.int64) neighbor_counts = np.empty(count, np.int64) current_i = 0 for ion_index in queries: peptide_low = low_limits[ion_index] peptide_high = high_limits[ion_index] if peptide_low == peptide_high: continue ion_start = indptr[ion_index] ion_end = indptr[ion_index + 1] good_neighbors = selected_edges[edge_pointers[ion_start: ion_end]] neighbor_count = np.sum(good_neighbors) if neighbor_count == 0: continue neighbors = indices[ion_start: ion_end][good_neighbors] candidates = np.zeros(peptide_count, np.int64) for neighbor_ion_index in neighbors: neighbor_peptide_low = low_limits[neighbor_ion_index] neighbor_peptide_high = high_limits[neighbor_ion_index] if neighbor_peptide_low == neighbor_peptide_high: continue peptides = peptide_pointers[ neighbor_peptide_low: neighbor_peptide_high ] candidates[peptides] += 1 ( score, max_count, max_fragment, candidate_count ) = score_regression_estimator( candidates[peptide_pointers[peptide_low: peptide_high]] + 1, peptide_low, peptide_count ) if score > 0: score_results[current_i] = score fragment_results[current_i] = max_fragment index_results[current_i] = ion_index count_results[current_i] = max_count candidate_counts[current_i] = candidate_count neighbor_counts[current_i] = neighbor_count current_i += 1 return ( score_results[:current_i], fragment_results[:current_i], index_results[:current_i], count_results[:current_i], candidate_counts[:current_i], neighbor_counts[:current_i], ) @numba.njit(cache=True, nogil=True) def score_regression_estimator(candidates, offset, peptide_count): frequencies = np.bincount(candidates) frequencies = np.cumsum(frequencies[::-1])[::-1] max_count = len(frequencies) - 1 max_fragment = offset + np.flatnonzero(candidates == max_count)[0] if frequencies[-1] != 1: score = 0 elif frequencies[1] == 1: # score = 1 - 2**(-np.log2(peptide_count) * (max_count - 1)) score = 1 - peptide_count**(1 - max_count) else: x0 = 2 + np.flatnonzero(frequencies[2:] == 1)[0] y0 = np.log2(frequencies[1]) slope = y0 / (x0 - 1) score = 1 - 2**(-slope * (max_count - x0)) return score, max_count, max_fragment, len(candidates)
34.864662
82
0.630418
2,214
18,548
4.959801
0.083108
0.020399
0.021856
0.015299
0.341226
0.308624
0.239414
0.189327
0.166743
0.1529
0
0.017666
0.279761
18,548
531
83
34.93032
0.804327
0.01887
0
0.317719
0
0
0.00099
0
0
0
0
0.001883
0
1
0.026477
false
0
0.004073
0
0.057026
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
9d099c325b8e8eb13555bc61afea2a208b9050c9
241
py
Python
Programming Fundamentals/Dictionaries/bakery.py
antonarnaudov/SoftUniProjects
01cbdce2b350b57240045d1bc3e21d34f9d0351d
[ "MIT" ]
null
null
null
Programming Fundamentals/Dictionaries/bakery.py
antonarnaudov/SoftUniProjects
01cbdce2b350b57240045d1bc3e21d34f9d0351d
[ "MIT" ]
null
null
null
Programming Fundamentals/Dictionaries/bakery.py
antonarnaudov/SoftUniProjects
01cbdce2b350b57240045d1bc3e21d34f9d0351d
[ "MIT" ]
null
null
null
def result(elements): bakery = {} for i in range(0, len(elements), 2): key = elements[i] value = elements[i + 1] bakery[key] = int(value) return bakery tokens = input().split(' ') print(result(tokens))
18.538462
40
0.564315
31
241
4.387097
0.645161
0.132353
0
0
0
0
0
0
0
0
0
0.017341
0.282158
241
13
41
18.538462
0.768786
0
0
0
0
0
0.004132
0
0
0
0
0
0
1
0.111111
false
0
0
0
0.222222
0.111111
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
9d0ab807d87d356a4a4fb529654e22486400f676
1,525
py
Python
vtrace/const.py
rnui2k/vivisect
b7b00f2d03defef28b4b8c912e3a8016e956c5f7
[ "ECL-2.0", "Apache-2.0" ]
716
2015-01-01T14:41:11.000Z
2022-03-28T06:51:50.000Z
vtrace/const.py
rnui2k/vivisect
b7b00f2d03defef28b4b8c912e3a8016e956c5f7
[ "ECL-2.0", "Apache-2.0" ]
266
2015-01-01T15:07:27.000Z
2022-03-30T15:19:26.000Z
vtrace/const.py
rnui2k/vivisect
b7b00f2d03defef28b4b8c912e3a8016e956c5f7
[ "ECL-2.0", "Apache-2.0" ]
159
2015-01-01T16:19:44.000Z
2022-03-21T21:55:34.000Z
# Order must match format junk # NOTIFY_ALL is kinda special, if you registerNotifier # with it, you get ALL notifications. NOTIFY_ALL = 0 # Get all notifications NOTIFY_SIGNAL = 1 # Callback on signal/exception NOTIFY_BREAK = 2 # Callback on breakpoint / sigtrap NOTIFY_STEP = 3 # Callback on singlestep complete NOTIFY_SYSCALL = 4 # Callback on syscall (linux only for now) NOTIFY_CONTINUE = 5 # Callback on continue (not done for step) NOTIFY_EXIT = 6 # Callback on process exit NOTIFY_ATTACH = 7 # Callback on successful attach NOTIFY_DETACH = 8 # Callback on impending process detach # The following notifiers are *only* available on some platforms # (and may be kinda faked out ala library load events on posix) NOTIFY_LOAD_LIBRARY = 9 NOTIFY_UNLOAD_LIBRARY = 10 NOTIFY_CREATE_THREAD = 11 NOTIFY_EXIT_THREAD = 12 NOTIFY_DEBUG_PRINT = 13 # Some platforms support this (win32). NOTIFY_MAX = 20 # File Descriptor / Handle Types FD_UNKNOWN = 0 # Unknown or we don't have a type for it FD_FILE = 1 FD_SOCKET = 2 FD_PIPE = 3 FD_LOCK = 4 # Win32 Mutant/Lock/Semaphore FD_EVENT = 5 # Win32 Event/KeyedEvent FD_THREAD = 6 # Win32 Thread FD_REGKEY = 7 # Win32 Registry Key # Vtrace Symbol Types SYM_MISC = -1 SYM_GLOBAL = 0 # Global (mostly vars) SYM_LOCAL = 1 # Locals SYM_FUNCTION = 2 # Functions SYM_SECTION = 3 # Binary section SYM_META = 4 # Info that we enumerate # Vtrace Symbol Offsets VSYM_NAME = 0 VSYM_ADDR = 1 VSYM_SIZE = 2 VSYM_TYPE = 3 VSYM_FILE = 4
33.152174
66
0.733115
237
1,525
4.548523
0.535865
0.074212
0.03525
0.046382
0
0
0
0
0
0
0
0.040698
0.210492
1,525
45
67
33.888889
0.854651
0.55082
0
0
0
0
0
0
0
0
0
0
0
1
0
false
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
9d0d12599f8d63386d38681b6e12a10636886357
3,248
py
Python
src/ezdxf/groupby.py
jkjt/ezdxf
2acc5611b81476ea16b98063b9f55446a9182b81
[ "MIT" ]
515
2017-01-25T05:46:52.000Z
2022-03-29T09:52:27.000Z
src/ezdxf/groupby.py
jkjt/ezdxf
2acc5611b81476ea16b98063b9f55446a9182b81
[ "MIT" ]
417
2017-01-25T10:01:17.000Z
2022-03-29T09:22:04.000Z
src/ezdxf/groupby.py
jkjt/ezdxf
2acc5611b81476ea16b98063b9f55446a9182b81
[ "MIT" ]
149
2017-02-01T15:52:02.000Z
2022-03-17T10:33:38.000Z
# Purpose: Grouping entities by DXF attributes or a key function. # Copyright (c) 2017-2021, Manfred Moitzi # License: MIT License from typing import Iterable, Hashable, Dict, List, TYPE_CHECKING from ezdxf.lldxf.const import DXFValueError, DXFAttributeError if TYPE_CHECKING: from ezdxf.eztypes import DXFEntity, KeyFunc def groupby( entities: Iterable["DXFEntity"], dxfattrib: str = "", key: "KeyFunc" = None ) -> Dict[Hashable, List["DXFEntity"]]: """ Groups a sequence of DXF entities by a DXF attribute like ``'layer'``, returns a dict with `dxfattrib` values as key and a list of entities matching this `dxfattrib`. A `key` function can be used to combine some DXF attributes (e.g. layer and color) and should return a hashable data type like a tuple of strings, integers or floats, `key` function example:: def group_key(entity: DXFEntity): return entity.dxf.layer, entity.dxf.color For not suitable DXF entities return ``None`` to exclude this entity, in this case it's not required, because :func:`groupby` catches :class:`DXFAttributeError` exceptions to exclude entities, which do not provide layer and/or color attributes, automatically. Result dict for `dxfattrib` = ``'layer'`` may look like this:: { '0': [ ... list of entities ], 'ExampleLayer1': [ ... ], 'ExampleLayer2': [ ... ], ... } Result dict for `key` = `group_key`, which returns a ``(layer, color)`` tuple, may look like this:: { ('0', 1): [ ... list of entities ], ('0', 3): [ ... ], ('0', 7): [ ... ], ('ExampleLayer1', 1): [ ... ], ('ExampleLayer1', 2): [ ... ], ('ExampleLayer1', 5): [ ... ], ('ExampleLayer2', 7): [ ... ], ... } All entity containers (modelspace, paperspace layouts and blocks) and the :class:`~ezdxf.query.EntityQuery` object have a dedicated :meth:`groupby` method. Args: entities: sequence of DXF entities to group by a DXF attribute or a `key` function dxfattrib: grouping DXF attribute like ``'layer'`` key: key function, which accepts a :class:`DXFEntity` as argument and returns a hashable grouping key or ``None`` to ignore this entity """ if all((dxfattrib, key)): raise DXFValueError( "Specify a dxfattrib or a key function, but not both." ) if dxfattrib != "": key = lambda entity: entity.dxf.get_default(dxfattrib) if key is None: raise DXFValueError( "no valid argument found, specify a dxfattrib or a key function, " "but not both." ) result: Dict[Hashable, List["DXFEntity"]] = dict() for dxf_entity in entities: if not dxf_entity.is_alive: continue try: group_key = key(dxf_entity) except DXFAttributeError: # ignore DXF entities, which do not support all query attributes continue if group_key is not None: group = result.setdefault(group_key, []) group.append(dxf_entity) return result
35.692308
79
0.601293
383
3,248
5.065274
0.360313
0.039691
0.030928
0.028866
0.058763
0.042268
0.042268
0.042268
0.042268
0.042268
0
0.010837
0.289717
3,248
90
80
36.088889
0.830082
0.595751
0
0.133333
0
0
0.14336
0
0
0
0
0
0
1
0.033333
false
0
0.1
0
0.166667
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
9d0eed15b3c0630d157c26b0aac4e458a282e19f
8,527
py
Python
main_single.py
wang-chen/AirLoop
12fb442c911002427a51f00d43f747ef593bd186
[ "BSD-3-Clause" ]
39
2021-09-28T19:48:13.000Z
2022-03-17T06:44:19.000Z
main_single.py
wang-chen/AirLoop
12fb442c911002427a51f00d43f747ef593bd186
[ "BSD-3-Clause" ]
null
null
null
main_single.py
wang-chen/AirLoop
12fb442c911002427a51f00d43f747ef593bd186
[ "BSD-3-Clause" ]
3
2021-10-04T01:26:17.000Z
2022-02-12T04:48:50.000Z
#!/usr/bin/env python3 import os import tqdm import torch import random import numpy as np import torch.nn as nn import configargparse import torch.optim as optim from tensorboard import program from torch.utils.tensorboard import SummaryWriter import yaml from models import FeatureNet from datasets import get_dataset from losses import MemReplayLoss from utils.evaluation import RecognitionEvaluator from utils.misc import save_model, load_model, GlobalStepCounter, ProgressBarDescription @torch.no_grad() def evaluate(net, loader, counter, args, writer=None): net.eval() evaluator = RecognitionEvaluator(loader=loader, args=args) for images, aux, env_seq in tqdm.tqdm(loader): images = images.to(args.device) gd = net(images) evaluator.observe(gd, aux, images, env_seq) evaluator.report() def train(model, loader, optimizer, counter, args, writer=None): model.train() if 'train' in args.task: criterion = MemReplayLoss(writer=writer, viz_start=args.viz_start, viz_freq=args.viz_freq, counter=counter, args=args) last_env = None for epoch in range(args.epoch): enumerator = tqdm.tqdm(loader) pbd = ProgressBarDescription(enumerator) for images, aux, env_seq in enumerator: images = images.to(args.device) loss = criterion(model, images, aux, env_seq[0]) # in case loss is manually set to 0 to skip batches if loss.requires_grad and not loss.isnan(): loss.backward() optimizer.step(closure=criterion.ll_loss) optimizer.zero_grad() # save model on env change for env-incremental tasks if 'seq' in args.task and last_env != env_seq[0][0]: if last_env is not None: save_model(model, '%s.%s' % (args.save, last_env)) last_env = env_seq[0][0] if (args.save_freq is not None and counter.steps % args.save_freq == 0) \ or (args.save_steps is not None and counter.steps in args.save_steps): save_model(model, '%s.step%d' % (args.save, counter.steps)) pbd.update(loss) counter.step() if 'seq' in args.task: if args.save is not None: save_model(model, '%s.%s' % (args.save, last_env)) if args.ll_method is not None: criterion.ll_loss.save(task=last_env) else: save_model(model, '%s.epoch%d' % (args.save, epoch)) def main(args): if args.deterministic >= 1: torch.manual_seed(args.seed) torch.cuda.manual_seed(args.seed) np.random.seed(args.seed) random.seed(args.seed) if args.deterministic >= 2: torch.backends.cudnn.benchmark = False if args.deterministic >= 3: torch.set_deterministic(True) loader = get_dataset(args) if args.devices is None: args.devices = ['cuda:%d' % i for i in range(torch.cuda.device_count())] if torch.cuda.is_available() else ['cpu'] args.device = args.devices[0] model = FeatureNet(args.gd_dim).to(args.device) if args.load: load_model(model, args.load, device=args.device) if not args.no_parallel: model = nn.DataParallel(model, device_ids=args.devices) writer = None if args.log_dir is not None: log_dir = args.log_dir # timestamp runs into the same logdir if os.path.exists(log_dir) and os.path.isdir(log_dir): from datetime import datetime log_dir = os.path.join(log_dir, datetime.now().strftime('%b%d_%H-%M-%S')) writer = SummaryWriter(log_dir) tb = program.TensorBoard() tb.configure(argv=[None, '--logdir', log_dir, '--bind_all', '--samples_per_plugin=images=50']) print(('TensorBoard at %s \n' % tb.launch())) step_counter = GlobalStepCounter(initial_step=1) if 'train' in args.task: optimizer = optim.SGD(model.parameters(), lr=args.lr, weight_decay=args.w_decay) train(model, loader, optimizer, step_counter, args, writer) if 'eval' in args.task: evaluate(model, loader, step_counter, args, writer) def run(args=None): # Arguements parser = configargparse.ArgumentParser(description='Feature Graph Networks', default_config_files=['./config/config.yaml']) # general parser.add_argument("--config", is_config_file=True, help="Config file path") parser.add_argument("--task", type=str, choices=['train-seq', 'train-joint', 'eval'], default='train-seq', help="Task to perform") parser.add_argument("--catalog-dir", type=str, default='./.cache/catalog', help='Processed dataset catalog') parser.add_argument("--no-parallel", action='store_true', help="DataParallel") parser.add_argument("--devices", type=str, nargs='+', default=None, help="Available devices") parser.add_argument("--deterministic", type=int, default=3, help='Level of determinism.') parser.add_argument("--seed", type=int, default=0, help='Random seed.') parser.add_argument("--ll-config", type=str, help='Config file for lifelong losses') parser.add_argument("--print-configs", action='store_true', help='Print parsed configs to console') # dataset parser.add_argument("--dataset-root", type=str, default='/data/datasets/', help="Home for all datasets") parser.add_argument("--dataset", type=str, choices=['tartanair', 'nordland', 'robotcar'], default='tartanair', help="Dataset to use") parser.add_argument("--include", type=str, default=None, help="Regex for sequences to include") parser.add_argument("--exclude", type=str, default=None, help="Regex for sequences to exclude") parser.add_argument('--scale', type=float, default=0.5, help='Image scale') parser.add_argument("--num-workers", type=int, default=4, help="Number of workers in dataloader") # model parser.add_argument("--gd-dim", type=int, default=1024, help="Global descriptor dimension") # training parser.add_argument("--load", type=str, default=None, help="load pretrained model") parser.add_argument("--save", type=str, default=None, help="Model save path") parser.add_argument("--save-freq", type=int, help="Model saving frequency") parser.add_argument("--save-steps", type=int, nargs="+", help="Specific steps to save model") parser.add_argument("--ll-method", type=str, help="Lifelong learning method") parser.add_argument("--ll-weight-dir", type=str, default=None, help="Load directory for regularization weights") parser.add_argument("--ll-weight-load", type=str, nargs='+', help="Environment names for regularization weights") parser.add_argument("--ll-strength", type=float, nargs='+', help="Weights of lifelong losses") parser.add_argument("--batch-size", type=int, default=8, help="Minibatch size") parser.add_argument("--lr", type=float, default=2e-3, help="Learning rate") parser.add_argument("--w-decay", type=float, default=0, help="Weight decay of optim") parser.add_argument("--epoch", type=int, default=15, help="Number of epoches") parser.add_argument("--mem-size", type=int, default=1000, help="Memory size") parser.add_argument("--log-dir", type=str, default=None, help="Tensorboard Log dir") parser.add_argument("--viz-start", type=int, default=np.inf, help='Visualize starting from iteration') parser.add_argument("--viz-freq", type=int, default=1, help='Visualize every * iteration(s)') # evaluation parser.add_argument("--eval-split-seed", type=int, default=42, help='Seed for splitting the dataset') parser.add_argument("--eval-percentage", type=float, default=0.2, help='Percentage of sequences for eval') parser.add_argument("--eval-save", type=str, help='Raw evaluation result save path') parser.add_argument("--eval-desc-save", type=str, help='Generated global descriptor save path') parser.add_argument("--eval-gt-dir", type=str, help='Evaluation groundtruth save directory') parserd_args = parser.parse_args(args) # domain specific configs if parserd_args.ll_config is not None and parserd_args.ll_method is not None: with open(parserd_args.ll_config, 'r') as f: for k, v in yaml.safe_load(f)[parserd_args.ll_method].items(): setattr(parserd_args, k.replace('-', '_'), v) if parserd_args.print_configs: print("Training config:", parserd_args) main(parserd_args) if __name__ == "__main__": run()
45.844086
137
0.673273
1,164
8,527
4.819588
0.231959
0.059358
0.112121
0.019251
0.142959
0.098039
0.050267
0.028877
0.028877
0.01426
0
0.005641
0.189164
8,527
185
138
46.091892
0.805756
0.02756
0
0.042857
0
0
0.19587
0.003623
0
0
0
0
0
1
0.028571
false
0
0.121429
0
0.15
0.028571
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
9d10f233df729f37438c93bc6d49f9504b03d459
1,192
py
Python
Part-03-Understanding-Software-Crafting-Your-Own-Tools/models/edx-platform/lms/djangoapps/rss_proxy/views.py
osoco/better-ways-of-thinking-about-software
83e70d23c873509e22362a09a10d3510e10f6992
[ "MIT" ]
3
2021-12-15T04:58:18.000Z
2022-02-06T12:15:37.000Z
Part-03-Understanding-Software-Crafting-Your-Own-Tools/models/edx-platform/lms/djangoapps/rss_proxy/views.py
osoco/better-ways-of-thinking-about-software
83e70d23c873509e22362a09a10d3510e10f6992
[ "MIT" ]
null
null
null
Part-03-Understanding-Software-Crafting-Your-Own-Tools/models/edx-platform/lms/djangoapps/rss_proxy/views.py
osoco/better-ways-of-thinking-about-software
83e70d23c873509e22362a09a10d3510e10f6992
[ "MIT" ]
1
2019-01-02T14:38:50.000Z
2019-01-02T14:38:50.000Z
""" Views for the rss_proxy djangoapp. """ import requests from django.conf import settings from django.core.cache import cache from django.http import HttpResponse, HttpResponseNotFound from lms.djangoapps.rss_proxy.models import WhitelistedRssUrl CACHE_KEY_RSS = "rss_proxy.{url}" def proxy(request): """ Proxy requests for the given RSS url if it has been whitelisted. """ url = request.GET.get('url') if url and WhitelistedRssUrl.objects.filter(url=url).exists(): # Check cache for RSS if the given url is whitelisted cache_key = CACHE_KEY_RSS.format(url=url) status_code = 200 rss = cache.get(cache_key, '') print(cache_key) print('Cached rss: %s' % rss) if not rss: # Go get the RSS from the URL if it was not cached resp = requests.get(url) status_code = resp.status_code if status_code == 200: # Cache RSS rss = resp.content cache.set(cache_key, rss, settings.RSS_PROXY_CACHE_TIMEOUT) return HttpResponse(rss, status=status_code, content_type='application/xml') return HttpResponseNotFound()
29.8
84
0.653523
157
1,192
4.834395
0.356688
0.063241
0.043478
0
0
0
0
0
0
0
0
0.006818
0.261745
1,192
39
85
30.564103
0.855682
0.177013
0
0
0
0
0.049163
0
0
0
0
0
0
1
0.045455
false
0
0.227273
0
0.363636
0.090909
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
9d123f052b89aece17eb457b8ad9cafa6d71e501
314
py
Python
bootcamp/accounts/urls.py
elbakouchi/bootcamp
2c7a0cd2ddb7632acb3009f94d728792ddf9644f
[ "MIT" ]
null
null
null
bootcamp/accounts/urls.py
elbakouchi/bootcamp
2c7a0cd2ddb7632acb3009f94d728792ddf9644f
[ "MIT" ]
null
null
null
bootcamp/accounts/urls.py
elbakouchi/bootcamp
2c7a0cd2ddb7632acb3009f94d728792ddf9644f
[ "MIT" ]
null
null
null
from django.conf.urls import url from .views import * app_name = "accounts" urlpatterns = [ url(r"^signup/$", CustomSignupView.as_view(), name="custom_signup"), url(r"^destroy/$", AjaxLogoutView.as_view(), name="destroy"), url(r"^(?P<username>[\w.@+-]+)/$", ProfileView.as_view(), name="profile"), ]
28.545455
78
0.652866
40
314
5
0.6
0.06
0.15
0
0
0
0
0
0
0
0
0
0.121019
314
10
79
31.4
0.724638
0
0
0
0
0
0.254777
0.082803
0
0
0
0
0
1
0
false
0
0.25
0
0.25
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
9d1338f96592532b4f49b0f4d8c0180dee99ffe0
1,833
py
Python
tests/integration/test_translated_content.py
asmeurer/nikola
ea1c651bfed0fd6337f1d22cf8dd99899722912c
[ "MIT" ]
1,901
2015-01-02T02:49:51.000Z
2022-03-30T23:31:35.000Z
tests/integration/test_translated_content.py
asmeurer/nikola
ea1c651bfed0fd6337f1d22cf8dd99899722912c
[ "MIT" ]
1,755
2015-01-01T08:17:16.000Z
2022-03-24T18:02:22.000Z
tests/integration/test_translated_content.py
asmeurer/nikola
ea1c651bfed0fd6337f1d22cf8dd99899722912c
[ "MIT" ]
421
2015-01-02T18:06:37.000Z
2022-03-28T23:18:54.000Z
""" Test a site with translated content. Do not test titles as we remove the translation. """ import io import os import shutil import lxml.html import pytest import nikola.plugins.command.init from nikola import __main__ from .helper import cd from .test_empty_build import ( # NOQA test_archive_exists, test_avoid_double_slash_in_rss, test_check_files, test_check_links, test_index_in_sitemap, ) def test_translated_titles(build, output_dir, other_locale): """Check that translated title is picked up.""" normal_file = os.path.join(output_dir, "pages", "1", "index.html") translated_file = os.path.join(output_dir, other_locale, "pages", "1", "index.html") # Files should be created assert os.path.isfile(normal_file) assert os.path.isfile(translated_file) # And now let's check the titles with io.open(normal_file, "r", encoding="utf8") as inf: doc = lxml.html.parse(inf) assert doc.find("//title").text == "Foo | Demo Site" with io.open(translated_file, "r", encoding="utf8") as inf: doc = lxml.html.parse(inf) assert doc.find("//title").text == "Bar | Demo Site" @pytest.fixture(scope="module") def build(target_dir, test_dir): """Build the site.""" init_command = nikola.plugins.command.init.CommandInit() init_command.create_empty_site(target_dir) init_command.create_configuration(target_dir) src = os.path.join(test_dir, "..", "data", "translated_titles") for root, dirs, files in os.walk(src): for src_name in files: rel_dir = os.path.relpath(root, src) dst_file = os.path.join(target_dir, rel_dir, src_name) src_file = os.path.join(root, src_name) shutil.copy2(src_file, dst_file) with cd(target_dir): __main__.main(["build"])
29.095238
88
0.681942
267
1,833
4.456929
0.370787
0.040336
0.042017
0.047059
0.144538
0.144538
0.105882
0.105882
0.105882
0.105882
0
0.003399
0.19749
1,833
62
89
29.564516
0.805574
0.111839
0
0.05
0
0
0.074627
0
0
0
0
0
0.1
1
0.05
false
0
0.225
0
0.275
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
9d13de1d5fcb7bb17eb81bbe83f7d14929b0ec78
8,826
py
Python
src/train.py
weiyi1991/UA_Concurrent
11238c778c60095abf326800d6e6a13a643bf071
[ "MIT" ]
null
null
null
src/train.py
weiyi1991/UA_Concurrent
11238c778c60095abf326800d6e6a13a643bf071
[ "MIT" ]
1
2020-09-02T12:24:59.000Z
2020-09-02T12:24:59.000Z
src/train.py
weiyi1991/UA_Concurrent
11238c778c60095abf326800d6e6a13a643bf071
[ "MIT" ]
null
null
null
import argparse import os import torch import torch.nn.functional as F from model_ST import * import data import numpy as np import matplotlib.pyplot as plt from torch.utils.data import Dataset, DataLoader import sys from predict import evaluate_MA from tensorboardX import SummaryWriter # print model parameter def print_model(model): print('=================== Print model parameters ================') print(model.state_dict().keys()) for i, j in model.named_parameters(): print(i) print(j) # Training settings parser = argparse.ArgumentParser(description='Relation network for concurrent activity detection') parser.add_argument('--BATCH_SIZE', type=int, default=256, help='Training batch size. Default=256') parser.add_argument('--save_every', type=int, default=5, help='Save model every save_every epochs. Defualt=5') parser.add_argument('--EPOCH', type=int, default=500, help='Number of epochs to train. Default=600') parser.add_argument('--LR', type=float, default=0.001, help='Learning Rate. Default=0.001') parser.add_argument('--TRAIN', action='store_true', default=True, help='Train or test? ') parser.add_argument('--DEBUG', action='store_true', default=False, help='Debug mode (load less data)? Defualt=False') parser.add_argument('--clip_grad', type=float, default=5.0, help='Gradient clipping parameter. Default=5,0') parser.add_argument('--dataPath', type=str, default='/home/yi/PycharmProjects/relation_network/data/UCLA/new273', help='path to the data folder') parser.add_argument('--checkpoint', type=str, help='Checkpoint folder name under ./model/') parser.add_argument('--verbose', type=int, default=1, help='Print verbose information? Default=True') # model parameters parser.add_argument('--n_input', type=int, default=37, help='Input feature vector size. Default=37') parser.add_argument('--n_hidden', type=int, default=128, help='Hidden units for LSTM baseline. Default=128') parser.add_argument('--n_layers', type=int, default=2, help='LSTM layer number. Default=2') parser.add_argument('--n_class', type=int, default=12, help='Class label number. Default=12') parser.add_argument('--use_lstm', action='store_true', default=True, help='Use LSTM for relation network classifier. Default=True') parser.add_argument('--df', type=int, default=64, help='Relation feature dimension. Default=64') parser.add_argument('--dk', type=int, default=8, help='Key feature dim. Default=8') parser.add_argument('--nr', type=int, default=4, help='Multihead number. Default=4') opt = parser.parse_args() checkpoint_dir = './model/{}/'.format(opt.checkpoint) if not os.path.exists(checkpoint_dir): os.makedirs(checkpoint_dir) orig_stdout = sys.stdout f = open(checkpoint_dir + '/parameter.txt', 'w') sys.stdout = f print(opt) f.close() sys.stdout = orig_stdout # data preparation train_dataset = data.ConActDataset(opt.dataPath) test_dataset = data.ConActDataset(opt.dataPath, train=not opt.TRAIN) writer = SummaryWriter() # only take few sequences for debuging debug_seq = 3 if opt.DEBUG: train_data = [] for i in range(debug_seq): input, labels = train_dataset[i] train_data.append((input, labels)) print("%s loaded." % train_dataset.seq_list[i]) else: print('Loading training data ----------------------') train_data = [] train_labels = [] for i, (input, labels) in enumerate(train_dataset): train_data.append((input, labels)) train_labels.append(labels) print("%s loaded." % train_dataset.seq_list[i]) print('Loading testing data ----------------------') test_data = [] for i, (input, labels) in enumerate(test_dataset): test_data.append((input, labels)) print("%s loaded." % test_dataset.seq_list[i]) # for model_lstm if opt.use_lstm: rnn = RNN(opt.n_input, opt.n_hidden, opt.n_layers, opt.n_class, opt.BATCH_SIZE, opt.df, opt.dk, opt.nr).cuda() # use lstm as classifier else: rnn = RNN(opt.n_input, opt.n_hidden, opt.n_layers, opt.n_class, opt.use_lstm).cuda() # use fc as classifier print(rnn.state_dict().keys()) optimizer = torch.optim.Adam(rnn.parameters(), lr=opt.LR) scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, 'min', factor=0.5) # set up scheduler # Keep track of losses for plotting best_loss = 10000 all_losses = [] current_loss = 3 FAA = [] # false area ration on test set INTAP = [] # overall interval AP on test set save_epoch = [] # list to save the model saving epoch # train model total_step = len(train_data) for epoch in range(opt.EPOCH): all_losses.append(current_loss) current_loss = 0 for i, (input, labels) in enumerate(train_data): optimizer.zero_grad() feats = torch.from_numpy(input).float() nframes, _ = input.shape feats = feats.reshape(-1, nframes, 273).cuda() #feats = feats.reshape(-1, nframes, opt.n_input*6).cuda() # change label 0 to -1 labels[labels<1]=-1 labels = torch.from_numpy(labels) labels = labels.float().cuda() # Forward pass outputs = rnn(feats) outputs = torch.squeeze(outputs) loss = F.mse_loss(outputs, labels) # print model parameter if loss is NaN if opt.verbose > 0: if torch.isnan(loss): print_model(rnn) print('Epoch {}, step {}'.format(epoch+1, i+1)) raw_input("Press Enter to continue ...") # Backward and optimize loss.backward() # This line is used to prevent the vanishing / exploding gradient problem torch.nn.utils.clip_grad_norm_(rnn.parameters(), opt.clip_grad) optimizer.step() print('Epoch [{}/{}], Step [{}/{}], Loss: {:.4f}' .format(epoch + 1, opt.EPOCH, i + 1, total_step, loss.item())) current_loss = current_loss + loss.item() writer.add_scalar('loss/loss', current_loss, epoch) scheduler.step(current_loss) # update lr if needed # save model parameters and loss figure if ((epoch+1) % opt.save_every) == 0: # compute false area on test set if not opt.DEBUG: false_area, overall_IAPlist = evaluate_MA(rnn, test_data) FAA.append(torch.sum(false_area).item()) INTAP.append(overall_IAPlist[-2]) # get the interval AP at threshold 0.8 save_epoch.append(epoch+1) if FAA[-1] == min(FAA): # if has the minimum test error, save model checkpoint_dir = './model/{}/'.format(opt.checkpoint) if not os.path.exists(checkpoint_dir): os.makedirs(checkpoint_dir) if epoch > 100: model_str = checkpoint_dir + 'net-best.pth' torch.save(rnn, model_str) checkpoint_dir = './model/{}/'.format(opt.checkpoint) if not os.path.exists(checkpoint_dir): os.makedirs(checkpoint_dir) if opt.verbose == 2: print('Making dir: {}'.format(checkpoint_dir)) model_str = checkpoint_dir + 'net-{}'.format(str(epoch+1)) if opt.verbose > 0: print('Model saved to: {}.pth'.format(model_str)) if epoch >= 100: torch.save(rnn, model_str+'.pth') # save interval AP np.savetxt(model_str + 'AP.csv', np.asarray(overall_IAPlist), fmt='%0.5f') # save miss detection np.savetxt(model_str + 'MD.txt', np.asarray(FAA), fmt='%0.5f') # draw miss detection v.s. epoch figure fig, ax1 = plt.subplots() color = 'tab:red' ax1.plot(range(epoch+1), all_losses, color=color) ax1.set_xlabel('Epochs') ax1.set_ylabel('Loss', color=color) ax2 = ax1.twinx() color = 'tab:blue' ax2.set_ylabel('Miss detection area ratio', color=color) ax2.plot(save_epoch, FAA, 'bd') fig.savefig(model_str+'.png') plt.close() # draw intervalAP v.s. epoch figure fig1, ax3 = plt.subplots() color = 'tab:red' ax3.plot(range(epoch+1), all_losses, color=color) ax3.set_xlabel('Epochs') ax3.set_ylabel('Loss', color=color) ax4 = ax3.twinx() color = 'tab:blue' ax4.set_ylabel('Overall interval AP', color=color) ax4.plot(save_epoch, INTAP, 'bd') fig1.savefig(model_str+'_AP.png') plt.close() # print the loss on training set and evaluation metrics on test set to file orig_stdout = sys.stdout f = open(checkpoint_dir + '/loss.txt', 'w') sys.stdout = f print('Loss over epochs:') print(all_losses) if not opt.DEBUG: print('Miss detection area ratio:') print(FAA) f.close() sys.stdout = orig_stdout
41.051163
140
0.643327
1,205
8,826
4.589212
0.236515
0.029295
0.055335
0.01302
0.216817
0.157324
0.125859
0.105967
0.080289
0.066546
0
0.017329
0.215386
8,826
214
141
41.242991
0.781227
0.102198
0
0.207101
0
0
0.186748
0.012923
0
0
0
0
0
1
0.005917
false
0
0.071006
0
0.076923
0.12426
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
9d1ab6609be43e89cc309b21cfc303cd71c0ffae
5,617
py
Python
tests/tensor/test_tensor_data.py
aspfohl/tinytorch
99ac1847b798f755d12876667ec7c3a6c7149857
[ "MIT" ]
null
null
null
tests/tensor/test_tensor_data.py
aspfohl/tinytorch
99ac1847b798f755d12876667ec7c3a6c7149857
[ "MIT" ]
null
null
null
tests/tensor/test_tensor_data.py
aspfohl/tinytorch
99ac1847b798f755d12876667ec7c3a6c7149857
[ "MIT" ]
null
null
null
import pytest from hypothesis import given from hypothesis.strategies import data from numpy import array, array_equal from tests.strategies import indices, tensor_data from tinytorch.tensor.data import ( IndexingError, TensorData, broadcast_index, shape_broadcast, ) # Check basic properties of layout and strides. def test_layout(): "Test basis properties of layout and strides" data = [0] * 3 * 5 tensor_data = TensorData(data, (3, 5), (5, 1)) assert tensor_data.is_contiguous() assert tensor_data.shape == (3, 5) assert tensor_data.index((1, 0)) == 5 assert tensor_data.index((1, 2)) == 7 tensor_data = TensorData(data, (5, 3), (1, 5)) assert tensor_data.shape == (5, 3) assert not tensor_data.is_contiguous() data = [0] * 4 * 2 * 2 tensor_data = TensorData(data, (4, 2, 2)) assert tensor_data.strides == (4, 2, 1) @pytest.mark.xfail def test_layout_bad(): "Test basis properties of layout and strides" data = [0] * 3 * 5 TensorData(data, (3, 5), (6,)) @given(tensor_data()) def test_enumeration(tensor_data): "Test enumeration of tensor_datas." indices = list(tensor_data.indices()) # Check that enough positions are enumerated. assert len(indices) == tensor_data.size # Check that all positions are enumerated only once. assert len(set(tensor_data.indices())) == len(indices) # Check that all indices are within the shape. for ind in tensor_data.indices(): for i, p in enumerate(ind): assert p >= 0 assert p < tensor_data.shape[i] @given(tensor_data()) def test_index(tensor_data): "Test enumeration of tensor_data." # Check that all indices are within the size. for ind in tensor_data.indices(): pos = tensor_data.index(ind) assert pos >= 0 and pos < tensor_data.size base = [0] * tensor_data.dims with pytest.raises(IndexingError): base[0] = -1 tensor_data.index(tuple(base)) if tensor_data.dims > 1: with pytest.raises(IndexingError): base = [0] * (tensor_data.dims - 1) tensor_data.index(tuple(base)) @given(data()) def test_permute(data): td = data.draw(tensor_data()) ind = data.draw(indices(td)) td_rev = td.permute(*list(reversed(range(td.dims)))) assert td.index(ind) == td_rev.index(tuple(reversed(ind))) td2 = td_rev.permute(*list(reversed(range(td_rev.dims)))) assert td.index(ind) == td2.index(ind) # Check basic properties of broadcasting. def test_broadcast_index_smaller(): "Tests broadcast mapping between higher and lower dim tensors" out_index = array([0, 0]) def _broadcast_index(big_index): return broadcast_index( big_index=big_index, big_shape=array([2, 2, 3]), shape=array([2, 1]), out_index=out_index, ) for big_index, expected_out_index in ( ([0, 0, 0], [0, 0]), ([0, 0, 1], [0, 0]), ([0, 0, 2], [0, 0]), ([0, 1, 0], [1, 0]), ([0, 1, 1], [1, 0]), ([0, 1, 2], [1, 0]), ([1, 0, 0], [0, 0]), ([1, 0, 1], [0, 0]), ([1, 0, 2], [0, 0]), ([1, 1, 0], [1, 0]), ([1, 1, 1], [1, 0]), ([1, 1, 2], [1, 0]), ): print(big_index, expected_out_index) _broadcast_index(big_index=array(big_index)) assert array_equal(out_index, expected_out_index) def test_broadcast_index(): out_index = array([0, 0]) def _broadcast_index(big_index): return broadcast_index( big_index=big_index, big_shape=array([3, 2]), shape=array([3, 1]), out_index=out_index, ) for big_index, expected_out_index in ( ([0, 0], [0, 0]), ([0, 1], [0, 0]), ([1, 0], [1, 0]), ([1, 1], [1, 0]), ([2, 0], [2, 0]), ([2, 1], [2, 0]), ): _broadcast_index(big_index=array(big_index)) assert array_equal(out_index, array(expected_out_index)) def test_broadcast_index_constant(): out_index = array([0]) def _broadcast_index(big_index): return broadcast_index( big_index=big_index, big_shape=array([3, 2]), shape=array([1]), out_index=out_index, ) expected_out_index = array([0]) for big_index in ([0, 0, 0], [0, 0, 1], [0, 0, 2], [1, 0, 0], [1, 0, 1], [1, 0, 2]): _broadcast_index(big_index=array(big_index)) assert array_equal(out_index, expected_out_index) @pytest.mark.parametrize( "shape1, shape2, expected_return", ( ((1,), (5, 5), (5, 5)), ((5, 5), (1,), (5, 5)), ((1, 5, 5), (5, 5), (1, 5, 5)), ((5, 1, 5, 1), (1, 5, 1, 5), (5, 5, 5, 5)), ((2, 5), (5,), (2, 5)), ), ) def test_shape_broadcast(shape1, shape2, expected_return): c = shape_broadcast(shape1, shape2) assert c == expected_return @pytest.mark.parametrize( "shape1, shape2", ( # 2nd-indexed dimension (7 and 5) can't be broadcasted ((5, 7, 5, 1), (1, 5, 1, 5)), # 2nd-indexed dimension (2 and 5) can't be broadcasted ((5, 2), (5,)), # shape1 can't be empty (tuple(), (1,)), # shape2 can't be empty ((1,), tuple()), # multiples don't work ((4,), (2,)), ), ) def test_shape_broadcast_errors(shape1, shape2): with pytest.raises(IndexingError): c = shape_broadcast(shape1, shape2) print(c) @given(tensor_data()) def test_string(tensor_data): tensor_data.to_string()
27.534314
88
0.574862
794
5,617
3.896725
0.141058
0.10989
0.015514
0.012928
0.52521
0.394958
0.293794
0.231739
0.230123
0.224305
0
0.057734
0.269183
5,617
203
89
27.669951
0.695981
0.116966
0
0.298013
0
0
0.049564
0
0
0
0
0
0.119205
1
0.092715
false
0
0.039735
0.019868
0.152318
0.013245
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
9d1d92e0aac0102261fb87134d9195f41601abbb
2,813
py
Python
aps/tokenizer/word.py
ishine/aps
c814dc5a8b0bff5efa7e1ecc23c6180e76b8e26c
[ "Apache-2.0" ]
117
2021-02-02T13:38:16.000Z
2022-03-16T05:40:25.000Z
aps/tokenizer/word.py
ishine/aps
c814dc5a8b0bff5efa7e1ecc23c6180e76b8e26c
[ "Apache-2.0" ]
3
2021-11-11T07:07:31.000Z
2021-11-20T15:25:42.000Z
aps/tokenizer/word.py
ishine/aps
c814dc5a8b0bff5efa7e1ecc23c6180e76b8e26c
[ "Apache-2.0" ]
19
2021-02-04T10:04:25.000Z
2022-02-16T05:24:44.000Z
#!/usr/bin/env python # Copyright 2021 Jian Wu # License: Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0) from typing import List, Union from aps.tokenizer.base import TokenizerAbc, ApsTokenizer class WordBasedTokenizer(TokenizerAbc): """ Word based (word, character) tokenizer Args: filter_words (list): filter those words char (bool): use character unit or word unit space (str): insert space symbol between words """ def __init__(self, filter_words: List[str] = [], char: bool = False, space: str = ""): super(WordBasedTokenizer, self).__init__() self.char = char self.space = space self.filter_words = filter_words def encode(self, utt: Union[str, List[str]]) -> List[str]: if isinstance(utt, str): raw_tokens = utt.split() else: raw_tokens = utt kept_tokens = [] for tok in raw_tokens: # remove tokens is_filter_tok = tok in self.filter_words if is_filter_tok: continue # word => char if self.char and not is_filter_tok: toks = [t for t in tok] else: toks = [tok] kept_tokens += toks if self.space: kept_tokens += [self.space] if self.space: # remove last one kept_tokens = kept_tokens[:-1] return kept_tokens def decode(self, utt: Union[str, List[str]]) -> List[str]: if isinstance(utt, str): enc_tokens = utt.split() else: enc_tokens = utt if not self.char: return enc_tokens if self.space: strs = "".join(enc_tokens).replace(self.space, " ") else: strs = " ".join(enc_tokens) return strs.split(" ") @ApsTokenizer.register("word") class WordTokenizer(WordBasedTokenizer): """ Word tokenizer Args: filter_words (list): filter those words """ def __init__(self, filter_words: List[str] = []): super(WordTokenizer, self).__init__(filter_words=filter_words, char=False, space="") @ApsTokenizer.register("char") class CharTokenizer(WordBasedTokenizer): """ Character tokenizer Args: filter_words (list): filter those words space (str): insert space symbol between words """ def __init__(self, filter_words: List[str] = [], space: str = "<space>"): super(CharTokenizer, self).__init__(filter_words=filter_words, char=True, space=space)
30.247312
77
0.539637
300
2,813
4.87
0.273333
0.097878
0.061602
0.049281
0.323751
0.323751
0.323751
0.277207
0.227242
0.154689
0
0.005
0.360114
2,813
92
78
30.576087
0.806667
0.186989
0
0.163636
0
0
0.008182
0
0
0
0
0
0
1
0.090909
false
0
0.036364
0
0.236364
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
9d1d953211acad0e8c4ba6634015c410a59e3522
1,736
py
Python
tests/test_session.py
StenSipma/astrometry-client
11d5b0cd0ae41a18b5bbd7f5570af60dbfbd9cc6
[ "MIT" ]
1
2020-08-06T17:55:52.000Z
2020-08-06T17:55:52.000Z
tests/test_session.py
StenSipma/astrometry-client
11d5b0cd0ae41a18b5bbd7f5570af60dbfbd9cc6
[ "MIT" ]
1
2021-12-18T17:03:21.000Z
2021-12-19T12:33:16.000Z
tests/test_session.py
StenSipma/astrometry-client
11d5b0cd0ae41a18b5bbd7f5570af60dbfbd9cc6
[ "MIT" ]
null
null
null
import os from unittest import mock import pytest import requests from constants import VALID_KEY from utils import FunctionCalledException, function_called_raiser from astrometry_net_client import Session from astrometry_net_client.exceptions import APIKeyError, LoginFailedException some_key = "somekey" # Start of tests def test_session_key_input_invalid(): with pytest.raises(APIKeyError): Session() def test_session_key_input_string(): s = Session(some_key) assert not s.logged_in assert s.api_key == some_key def test_session_key_input_file(): s = Session(key_location="./tests/data/testkey") assert not s.logged_in assert s.api_key == some_key @mock.patch.dict(os.environ, {"ASTROMETRY_API_KEY": some_key}) def test_session_key_input_environment(): s = Session() assert not s.logged_in assert s.api_key == some_key def test_valid_session_login(mock_server, monkeypatch): session = Session(api_key=VALID_KEY) session.login() # login for the first time assert session.logged_in assert getattr(session, "key", None) # token exists original_key = session.key # We patch the post call to send an error if it is called. monkeypatch.setattr(requests, "post", function_called_raiser) session.login() # login should not be done now, as it is already done assert session.logged_in assert session.key == original_key # Here we force the login which should raise the patched exception with pytest.raises(FunctionCalledException): session.login(force=True) def test_invalid_session_login(mock_server): session = Session(api_key="invalid_key") with pytest.raises(LoginFailedException): session.login()
27.555556
78
0.75
243
1,736
5.127572
0.345679
0.064205
0.05618
0.054575
0.221509
0.142857
0.142857
0.142857
0.142857
0.102729
0
0
0.175115
1,736
62
79
28
0.870112
0.130184
0
0.268293
0
0
0.041916
0
0
0
0
0
0.243902
1
0.146341
false
0
0.195122
0
0.341463
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
9d1e173ec4f6da5495185d4e64e6ce6be159c672
2,184
py
Python
all_repos_depends/lang/python.py
mxr/all-repos-depends
dcf715dbfb7182899e2412dbfaaf1ef4cc50865c
[ "MIT" ]
11
2018-04-23T06:41:55.000Z
2022-01-27T13:37:59.000Z
all_repos_depends/lang/python.py
mxr/all-repos-depends
dcf715dbfb7182899e2412dbfaaf1ef4cc50865c
[ "MIT" ]
2
2018-04-23T06:03:18.000Z
2018-04-23T06:03:51.000Z
all_repos_depends/lang/python.py
mxr/all-repos-depends
dcf715dbfb7182899e2412dbfaaf1ef4cc50865c
[ "MIT" ]
2
2021-02-01T15:02:14.000Z
2021-09-25T15:49:44.000Z
import ast import os.path from typing import Iterable from packaging.requirements import InvalidRequirement from packaging.requirements import Requirement from packaging.utils import canonicalize_name from all_repos_depends.errors import DependsError from all_repos_depends.types import Depends NAME = 'python' def to_name(s: str) -> str: return s.lower().replace('_', '-') def load_setup_py_ast() -> ast.AST: with open('setup.py', 'rb') as f: try: return ast.parse(f.read(), filename='setup.py') except SyntaxError: raise DependsError('Had setup.py but could not be parsed') def node_is_setup_call(node: ast.Call) -> bool: return ( # setup( (isinstance(node.func, ast.Name) and node.func.id == 'setup') or # setuptools.setup( ( isinstance(node.func, ast.Attribute) and isinstance(node.func.value, ast.Name) and node.func.value.id == 'setuptools' and node.func.attr == 'setup' ) ) def to_depends(relationship: str, requirement_s: str) -> Depends: try: req = Requirement(requirement_s) except InvalidRequirement: return Depends(relationship, NAME, requirement_s, ' (unable to parse)') spec_parts = [] if req.extras: spec_parts.append('[{}]'.format(','.join(sorted(req.extras)))) if req.specifier: spec_parts.append(str(req.specifier)) if req.marker: spec_parts.append(f';{req.marker}') spec = ''.join(spec_parts) return Depends(relationship, NAME, canonicalize_name(req.name), spec) def from_reqs_file(relationship: str, filename: str) -> Iterable[Depends]: with open(filename) as f: for line in f: line, _, _ = line.partition('#') line = line.strip() # local editable paths aren't all that interesting if line.startswith('-e '): _, _, path = line.partition(' ') path = os.path.join(os.path.dirname(filename), path) if os.path.exists(path): continue if line: yield to_depends(relationship, line)
29.513514
79
0.617674
264
2,184
4.996212
0.359848
0.036391
0.04094
0.047005
0.064443
0
0
0
0
0
0
0
0.264652
2,184
73
80
29.917808
0.821295
0.033425
0
0.037736
0
0
0.058377
0
0
0
0
0
0
1
0.09434
false
0
0.150943
0.037736
0.339623
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
9d1fd039657947bcd1efbe3cb094639c4aa0c630
2,829
py
Python
mac/macos_app_audit.py
airdata/scripts
b24d62d70bbc70f02b3758ea14e47cc2b34646a9
[ "Apache-2.0" ]
null
null
null
mac/macos_app_audit.py
airdata/scripts
b24d62d70bbc70f02b3758ea14e47cc2b34646a9
[ "Apache-2.0" ]
null
null
null
mac/macos_app_audit.py
airdata/scripts
b24d62d70bbc70f02b3758ea14e47cc2b34646a9
[ "Apache-2.0" ]
null
null
null
from os import listdir from os.path import isfile, join class Command(object): """ Run a command and capture it's output string, error string and exit status Source: http://stackoverflow.com/a/13848259/354247 """ def __init__(self, command): self.command = command def run(self, shell=True): import subprocess as sp process = sp.Popen(self.command, shell = shell, stdout = sp.PIPE, stderr = sp.PIPE) self.pid = process.pid self.output, self.error = process.communicate() self.failed = process.returncode return self @property def returncode(self): return self.failed default_applications = ['Utilities','App Store.app','Automator.app','Calculator.app','Calendar.app','Chess.app','Contacts.app','Dashboard.app','Dictionary.app','DVD Player.app','FaceTime.app','Font Book.app','iBooks.app','Image Capture.app','iTunes.app','Launchpad.app','Mail.app','Maps.app','Messages.app','Mission Control.app','Notes.app','Paste.app','Photo Booth.app','Photos.app','Preview.app','QuickTime Player.app','Reminders.app','Safari.app','Siri.app','Stickies.app','System Preferences.app','TextEdit.app','Time Machine.app','Utilities.app'] remaps = { "iTerm.app": "iTerm2", # brew cask install iterm2 gives iTerm.app "Alfred 3.app": "Alfred" # brew cask install alfred gives Alfred 3.app } mypath = "/Applications" installed_applications = [f for f in listdir(mypath) if not isfile(join(mypath, f))] cask_packages = Command('brew cask list').run().output.split() mac_app_store_apps = Command('mas list').run().output.splitlines() # collect applications that are not default ones. user_applications = [] for x in installed_applications: #first remap the names if(x in remaps): name = remaps[x] else: name = x #then check if they are defaults if name not in default_applications: user_applications.append(name) # determine which applications weren't installed via brew cask unmanged_applications = [] for x in user_applications: strip_dotapp = x[:-4] if (".app" in x) else x trimmed = strip_dotapp.replace(" ", "-").lower() is_casked = trimmed in cask_packages is_mas = any(strip_dotapp in s for s in mac_app_store_apps) # print('{} -> {}: {}|{}'.format(x, trimmed, is_casked, is_mas)) if(not is_casked and not is_mas): unmanged_applications.append(x) # print("-------------------") print("You have {} default applications.".format(len(default_applications))) print("Tou have {} brew cask applications.".format(len(cask_packages))) print("Tou have {} app store applications.".format(len(mac_app_store_apps))) print("You have {} user applications Applications not managed by brew cask or app store...\n------".format(len(unmanged_applications))) for x in unmanged_applications: print(x) # print(mac_app_store_apps)
41.602941
551
0.70555
400
2,829
4.89
0.3825
0.02863
0.022495
0.030675
0.047035
0
0
0
0
0
0
0.007842
0.143514
2,829
68
552
41.602941
0.799422
0.17356
0
0
0
0
0.29987
0
0
0
0
0
0
1
0.065217
false
0
0.065217
0.021739
0.195652
0.108696
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
9d20e8c21375abfa3aefb4fb09790b9ecbec1d58
6,911
py
Python
compress/algorithms/lzw.py
ShellCode33/CompressionAlgorithms
3b2e7b497ef0af4ba7ac8bc6f4d6e77ea4c4aedc
[ "MIT" ]
null
null
null
compress/algorithms/lzw.py
ShellCode33/CompressionAlgorithms
3b2e7b497ef0af4ba7ac8bc6f4d6e77ea4c4aedc
[ "MIT" ]
null
null
null
compress/algorithms/lzw.py
ShellCode33/CompressionAlgorithms
3b2e7b497ef0af4ba7ac8bc6f4d6e77ea4c4aedc
[ "MIT" ]
null
null
null
# coding: utf-8 class LZW(object): """ Implementation of the LZW algorithm. Attributes ---------- translation_dict : dict Association between repeated bytes sequences and integers. Examples -------- An array of bytes like ['\x41', '\x42', '\x43', '\x0A', '\x00'] can be represented by an integer like 256. It means that one integer is able to represent multiple bytes at once. Notes ----- On the internet we usually find this algorithm using integers that are coded on 12bits. But I think it's a waste of space and it can be optimized by sending along the encoded content, the size of the integers. So instead of sending 12 bits integers, we will be able to send smaller (and bigger) integers. The size of the integers will be determined based on the biggest integer in the dictionary. This integer will be on 5 bits, it means other integers can be coded on 2^5 = 32 bits max. Which means the biggest supported dictionary is 2^32 = 4294967296 long. Which is more than enough. """ def __init__(self, verbose=False): self.verbose = verbose self.translation_dict = None self.max_size_integer_size = 5 # The integers size is encoded on 5 bits by default self.integers_size_bits = 0 # Max value must be 2**max_size_integer_size (= 32 by default) def __build_bytes_dictionary(self, decompression=False): if decompression: self.translation_dict = {byte: bytes([byte]) for byte in range(256)} else: self.translation_dict = {bytes([byte]): byte for byte in range(256)} def __compress(self, bytes_list): self.__build_bytes_dictionary() biggest_integer = 0 compressed = [] pattern = bytes([]) for byte in bytes_list: byte_as_array = bytes([byte]) current = pattern + byte_as_array if current in self.translation_dict: pattern = current else: self.translation_dict[current] = len(self.translation_dict) compressed.append(self.translation_dict[pattern]) if biggest_integer < self.translation_dict[pattern]: biggest_integer = self.translation_dict[pattern] pattern = byte_as_array compressed.append(self.translation_dict[pattern]) if biggest_integer < self.translation_dict[pattern]: biggest_integer = self.translation_dict[pattern] if biggest_integer > 2 ** (2 ** self.max_size_integer_size): # Shouldn't happen raise ValueError("Can't encode such value... Maybe you should increase the size of max_size_integer_size.") self.integers_size_bits = biggest_integer.bit_length() if self.verbose: print("The biggest integer is {} so integers will be coded on {} bits.".format(biggest_integer, self.integers_size_bits)) return compressed def compress_file(self, input_filename, output_filename): with open(input_filename, "rb") as input_file: bytes_list = input_file.read() if not bytes_list: raise IOError("File is empty !") if self.verbose: print("Input size : {} bytes.".format(len(bytes_list))) compressed = self.__compress(bytes_list) if self.verbose: print("Assembling integers together...") # Originally, each integer was added to a big one using bits shifting, but this method was way to slow. # Strings are better for this purpose. binary_string_compressed = "1" # Padding with a 1 to keep the first zeros when converting to integer # Add binary representation of the integers bit-length binary_string_compressed += format(self.integers_size_bits, "0{}b".format(self.max_size_integer_size)) # https://waymoot.org/home/python_string/ # According to this, the fastest way to concatenate strings is to use join() on a list bin_format = "0{}b".format(self.integers_size_bits) binary_string_compressed += ''.join([format(byte, bin_format) for byte in compressed]) if self.verbose: print("Done.") big_int_compress = int(binary_string_compressed, 2) to_store_in_file = big_int_compress.to_bytes((big_int_compress.bit_length() + 7) // 8, 'big') total_file_size = len(to_store_in_file) if self.verbose: print("Output : {} bytes".format(total_file_size)) if len(bytes_list) <= total_file_size: raise Exception("Aborted. No gain, you shouldn't compress that file. (+{} bytes)".format( total_file_size - len(bytes_list))) compression_rate = 100 - total_file_size * 100 / len(bytes_list) # Print anyway, even when not in verbose mode print("Compression gain : {0:.2f}%".format(compression_rate)) with open(output_filename, "wb") as output_file: output_file.write(to_store_in_file) return compression_rate def __decompress(self, compressed_bytes_list): self.__build_bytes_dictionary(decompression=True) previous_code = compressed_bytes_list[0] decompressed = self.translation_dict[previous_code] first_byte = None for new_code in compressed_bytes_list[1:]: try: translation = self.translation_dict[new_code] except KeyError: translation = first_byte + self.translation_dict[previous_code] decompressed += translation first_byte = bytes([translation[0]]) self.translation_dict[len(self.translation_dict)] = self.translation_dict[previous_code] + first_byte previous_code = new_code return decompressed def decompress_file(self, input_filename, output_filename): with open(input_filename, "rb") as input_file: bytes_list = input_file.read() if not bytes_list: raise IOError("File is empty !") big_int_compressed = int.from_bytes(bytes_list, 'big') bits_string_compressed = format(big_int_compressed, "0b") self.integers_size_bits = int(bits_string_compressed[1:self.max_size_integer_size + 1], 2) # Skip first pad bit if self.verbose: print("Integers are {} bits long.".format(self.integers_size_bits)) compressed = [] for i in range(self.max_size_integer_size + 1, len(bits_string_compressed), self.integers_size_bits): compressed.append(int(bits_string_compressed[i:i + self.integers_size_bits], 2)) decompressed = self.__decompress(compressed) with open(output_filename, "wb") as output_file: output_file.write(decompressed)
38.825843
120
0.64911
880
6,911
4.871591
0.244318
0.06648
0.079776
0.041987
0.275484
0.199673
0.163751
0.141358
0.141358
0.141358
0
0.014596
0.266387
6,911
177
121
39.045198
0.830966
0.229779
0
0.268041
0
0
0.074858
0.004159
0
0
0
0
0
1
0.061856
false
0
0
0
0.103093
0.072165
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
9d20f94306c2d2e2215af2edce02e11edf2054d9
1,322
py
Python
app/models.py
ariqfadlan/donorojo-db-api
dd1a3241ead5738c94eb77ed0bbb23b26582618f
[ "MIT" ]
null
null
null
app/models.py
ariqfadlan/donorojo-db-api
dd1a3241ead5738c94eb77ed0bbb23b26582618f
[ "MIT" ]
null
null
null
app/models.py
ariqfadlan/donorojo-db-api
dd1a3241ead5738c94eb77ed0bbb23b26582618f
[ "MIT" ]
null
null
null
""" Contains database models """ from sqlalchemy import Column, ForeignKey, Integer, String, Float from sqlalchemy.orm import relationship from .database import Base class TouristAttraction(Base): __tablename__ = "tourist_attraction" id = Column(Integer, primary_key=True, index=True) name = Column(String(50), nullable=False) category = Column(String(255), nullable=False) address = relationship("Address", back_populates="tourist_attraction", uselist=False) location = relationship("Location", back_populates="tourist_attraction", uselist=False) class Address(Base): __tablename__ = "address" tourist_attraction_id = Column(Integer, ForeignKey("tourist_attraction.id"), primary_key=True) subvillage = Column(String(255)) village = Column(String(255)) district = Column(String(255)) regency = Column(String(255)) province = Column(String(255)) tourist_attraction = relationship("TouristAttraction", back_populates="address") class Location(Base): __tablename__ = "location" tourist_attraction_id = Column(Integer, ForeignKey("tourist_attraction.id"), primary_key=True) latitude = Column(Float, nullable=False) longitude = Column(Float, nullable=False) tourist_attraction = relationship("TouristAttraction", back_populates="location")
33.05
98
0.746596
142
1,322
6.739437
0.295775
0.159875
0.094044
0.07837
0.401254
0.367816
0.15674
0.15674
0.15674
0.15674
0
0.017621
0.141452
1,322
39
99
33.897436
0.825551
0.018154
0
0.08
0
0
0.135659
0.032558
0
0
0
0
0
1
0
false
0
0.12
0
1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
9d2612bdf9b9d5fe13c734ed2826b9452f048d19
1,096
py
Python
hackerrank_contests/101Hack44/prime.py
rishabhiitbhu/hackerrank
acc300851c81a29472177f15fd8b56ebebe853ea
[ "MIT" ]
null
null
null
hackerrank_contests/101Hack44/prime.py
rishabhiitbhu/hackerrank
acc300851c81a29472177f15fd8b56ebebe853ea
[ "MIT" ]
null
null
null
hackerrank_contests/101Hack44/prime.py
rishabhiitbhu/hackerrank
acc300851c81a29472177f15fd8b56ebebe853ea
[ "MIT" ]
1
2020-01-30T06:47:09.000Z
2020-01-30T06:47:09.000Z
def rwh_primes2(n): correction = (n%6>1) n = {0:n,1:n-1,2:n+4,3:n+3,4:n+2,5:n+1}[n%6] sieve = [True] * (n//3) sieve[0] = False for i in range(int(n**0.5)//3+1): if sieve[i]: k=3*i+1|1 sieve[ ((k*k)//3) ::2*k]=[False]*((n//6-(k*k)//6-1)//k+1) sieve[(k*k+4*k-2*k*(i&1))//3::2*k]=[False]*((n//6-(k*k+4*k-2*k*(i&1))//6-1)//k+1) return [2,3] + [3*i+1|1 for i in range(1,n//3-correction) if sieve[i]] # a = rwh_primes2(100) # print(a) # http://stackoverflow.com/questions/2068372/fastest-way-to-list-all-primes-below-n-in-python/3035188#3035188 """ Input n>=6, Returns a list of primes, 2 <= p < n """ def sieve_for_primes_to(n): size = n//2 sieve = [1]*size limit = int(n**0.5) for i in range(1,limit): if sieve[i]: val = 2*i+1 tmp = ((size-1) - i)//val sieve[i+val::val] = [0]*tmp return [2] + [i*2+1 for i, v in enumerate(sieve) if v and i>0] print(sieve_for_primes_to(3)) print(sieve_for_primes_to(1)) print(sieve_for_primes_to(100))
33.212121
110
0.519161
222
1,096
2.5
0.22973
0.018018
0.100901
0.115315
0.225225
0.068468
0.068468
0.068468
0
0
0
0.11339
0.243613
1,096
32
111
34.25
0.556092
0.124088
0
0.083333
0
0
0
0
0
0
0
0
0
1
0.083333
false
0
0
0
0.166667
0.125
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
9d2bc7d987bd63f2af30edb8519069c52527c5c7
387
py
Python
General Data Preprocessing/copyFile.py
yuxiawang1992/Python-Code
d457a1fd61742dfac08a82a26b66703e5ff6f780
[ "Apache-2.0" ]
null
null
null
General Data Preprocessing/copyFile.py
yuxiawang1992/Python-Code
d457a1fd61742dfac08a82a26b66703e5ff6f780
[ "Apache-2.0" ]
null
null
null
General Data Preprocessing/copyFile.py
yuxiawang1992/Python-Code
d457a1fd61742dfac08a82a26b66703e5ff6f780
[ "Apache-2.0" ]
null
null
null
#Python 3.4.3 #coding=gbk # copy file wangyuxia 20160920 import sys, shutil, os, string path = "E:\\test for qgis\\" target_path = "E:\\test for qgis\\HourScale\\" for i in range(2,31): for j in range(0,24): filename = 'N'+str(i).zfill(2)+str(j).zfill(2) shutil.copyfile(path+'d_02.hdr',target_path+filename+'.hdr') print("------------finished---------")
25.8
68
0.596899
61
387
3.737705
0.639344
0.04386
0.078947
0.105263
0.140351
0
0
0
0
0
0
0.065831
0.175711
387
14
69
27.642857
0.648903
0.134367
0
0
0
0
0.274096
0.087349
0
0
0
0
0
1
0
false
0
0.125
0
0.125
0.125
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
9d2c26cb802d2c6da46e391e982eacb22cc6b08d
3,581
py
Python
convert_to_onnx.py
bhahn2004/FaceBoxes.PyTorch
be01c2449c6efa2a976a701dd8a052aa903a32b4
[ "MIT" ]
null
null
null
convert_to_onnx.py
bhahn2004/FaceBoxes.PyTorch
be01c2449c6efa2a976a701dd8a052aa903a32b4
[ "MIT" ]
null
null
null
convert_to_onnx.py
bhahn2004/FaceBoxes.PyTorch
be01c2449c6efa2a976a701dd8a052aa903a32b4
[ "MIT" ]
null
null
null
import sys from scipy.special import softmax import torch.onnx import onnxruntime as ort import numpy as np import tensorflow as tf from tensorflow.keras import backend as K from pytorch2keras.converter import pytorch_to_keras from models.faceboxes import FaceBoxes input_dim = 1024 num_classes = 2 model_path = "weights/FaceBoxesProd.pth" net = FaceBoxes('train', input_dim, num_classes) def check_keys(model, pretrained_state_dict): ckpt_keys = set(pretrained_state_dict.keys()) model_keys = set(model.state_dict().keys()) used_pretrained_keys = model_keys & ckpt_keys unused_pretrained_keys = ckpt_keys - model_keys missing_keys = model_keys - ckpt_keys print('Missing keys:{}'.format(len(missing_keys))) print('Unused checkpoint keys:{}'.format(len(unused_pretrained_keys))) print('Used keys:{}'.format(len(used_pretrained_keys))) assert len(used_pretrained_keys) > 0, 'load NONE from pretrained checkpoint' return True def remove_prefix(state_dict, prefix): ''' Old style model is stored with all names of parameters sharing common prefix 'module.' ''' print('remove prefix \'{}\''.format(prefix)) f = lambda x: x.split(prefix, 1)[-1] if x.startswith(prefix) else x return {f(key): value for key, value in state_dict.items()} def load_model(model, pretrained_path, load_to_cpu): print('Loading pretrained model from {}'.format(pretrained_path)) if load_to_cpu: pretrained_dict = torch.load(pretrained_path, map_location=lambda storage, loc: storage) else: device = torch.cuda.current_device() pretrained_dict = torch.load(pretrained_path, map_location=lambda storage, loc: storage.cuda(device)) if "state_dict" in pretrained_dict.keys(): pretrained_dict = remove_prefix(pretrained_dict['state_dict'], 'module.') else: pretrained_dict = remove_prefix(pretrained_dict, 'module.') check_keys(model, pretrained_dict) model.load_state_dict(pretrained_dict, strict=False) return model net = load_model(net, model_path, False) net.eval() net.to("cuda") model_name = model_path.split("/")[-1].split(".")[0] onnx_model_path = f"models/onnx/base-model.onnx" # export ONNX model dummy_input = torch.randn(1, 3, input_dim, input_dim).to("cuda") torch.onnx.export(net, dummy_input, onnx_model_path, verbose=False, input_names=['input'], output_names=['output']) """ # try using pytorch2keras keras_model = pytorch_to_keras(net, dummy_input, [(3, input_dim, input_dim)]) keras_model_path = f"models/onnx/base-model" #keras_model.save(model_path) # 0. print PyTorch outputs out = net(dummy_input) dummy_input = dummy_input.cpu().detach().numpy() out = out.cpu().detach().numpy() loc = out[:, :, 2:] conf = out[:, :, :2] scores = softmax(conf, axis=-1) print(scores) # 1. check if ONNX outputs are the same ort_session = ort.InferenceSession(onnx_model_path) input_name = ort_session.get_inputs()[0].name out = ort_session.run(None, {input_name: dummy_input})[0] loc = out[:, :, 2:] conf = out[:, :, :2] scores = softmax(conf, axis=-1) print(scores) # 2. check if Keras outputs are the same keras_model_path = f"models/onnx/base-model" keras_model = tf.keras.models.load_model(keras_model_path) out = keras_model.predict(dummy_input) loc = out[:, :, 2:] conf = out[:, :, :2] scores = softmax(conf, axis=-1) print(scores) # 3. check if intermediate results of Keras are the same test_fn = K.function([keras_model.input], [keras_model.get_layer('334').output[0]]) test_out = test_fn(dummy_input) print(np.round(np.array(test_out), 4)[:30]) """
33.46729
115
0.729405
532
3,581
4.691729
0.25188
0.036058
0.020833
0.019231
0.223958
0.19351
0.161458
0.14984
0.14984
0.14984
0
0.011974
0.137113
3,581
106
116
33.783019
0.795793
0.029601
0
0.040816
0
0
0.108238
0.022787
0
0
0
0
0.020408
1
0.061224
false
0
0.183673
0
0.306122
0.102041
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
9d2f4723ec751e23b2b4a9d81dfaceee08d127d9
3,292
py
Python
x2py/links/strategies/buffer_transform_strategy.py
jaykang920/x2py
b8bd473f94ff4b9576e984cc384f4159ab71278d
[ "MIT" ]
null
null
null
x2py/links/strategies/buffer_transform_strategy.py
jaykang920/x2py
b8bd473f94ff4b9576e984cc384f4159ab71278d
[ "MIT" ]
1
2019-06-05T09:35:09.000Z
2020-07-02T09:46:46.000Z
x2py/links/strategies/buffer_transform_strategy.py
jaykang920/x2py
b8bd473f94ff4b9576e984cc384f4159ab71278d
[ "MIT" ]
null
null
null
# Copyright (c) 2017, 2018 Jae-jun Kang # See the file LICENSE for details. from x2py.event_factory import EventFactory from x2py.links.link_events import * from x2py.links.strategy import ChannelStrategy from x2py.util.trace import Trace class BufferTransformStrategy(ChannelStrategy): EventFactory.register_type(HandshakeReq) EventFactory.register_type(HandshakeResp) EventFactory.register_type(HandshakeAck) def __init__(self, buffer_transform=None): self.buffer_transform = buffer_transform def before_session_setup(self, session): session_strategy = BufferTransformSessionStrategy() session_strategy.session = session session.channel_strategy = session_strategy def init_handshake(self, session): if self.buffer_transform is None: return session_strategy = session.channel_strategy buffer_transform = self.buffer_transform.clone() session_strategy.buffer_transform = buffer_transform session.send(HandshakeReq().setattrs( _transform = False, data = buffer_transform.init_handshake() )) def cleanup(self): if self.buffer_transform is None: return self.buffer_transform.cleanup() self.buffer_transform = None class BufferTransformSessionStrategy(ChannelStrategy.SubStrategy): def __init__(self): self.buffer_transform = None self.rx_transform_ready = False self.tx_transform_ready = False def process(self, event): type_id = event.type_id() if type_id == LinkEventType.HANDSHAKE_REQ: response = None try: response = self.buffer_transform.handshake(event.data) except Exception as ex: Trace.error("{} error handshaking {}", self.link.name, ex) self.session.send(HandshakeResp().setattrs( _transform = False, data = response )) elif type_id == LinkEventType.HANDSHAKE_RESP: result = False try: result = self.buffer_transform.fini_handshake(event.data) except Exception as ex: Trace.error("{} error finishing handshake {}", self.link.name, ex) if result: self.rx_transform_ready = True self.session.send(HandshakeAck().setattrs( _transform = False, result = result )) elif type_id == LinkEventType.HANDSHAKE_ACK: result = event.result if result: self.tx_transform_ready = True self.session.link.on_connect(result, self.session) else: return False return True def cleanup(self): if self.buffer_transform is None: return self.buffer_transform.cleanup() self.buffer_transform = None def before_send(self, buffer): if self.tx_transform_ready: buffer = self.buffer_transform.transform(buffer) return True, buffer return False, buffer def after_receive(self, buffer): if self.rx_transform_ready: buffer = self.buffer_transform.inverse_transform(buffer) return buffer
33.591837
82
0.637303
340
3,292
5.958824
0.247059
0.148075
0.140671
0.04541
0.287759
0.200888
0.162389
0.146101
0.146101
0.146101
0
0.005139
0.290705
3,292
97
83
33.938144
0.862527
0.021567
0
0.3125
0
0
0.016786
0
0
0
0
0
0
1
0.1125
false
0
0.05
0
0.2875
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
9d2ffa602fd2739373ede0b55f827179feb8572a
5,632
py
Python
ignite_trainer/_visdom.py
jinczing/AudioCLIP
b080fc946599290c91f9d3b203295e5968af1bf6
[ "MIT" ]
304
2021-06-28T09:59:13.000Z
2022-03-30T17:33:52.000Z
ignite_trainer/_visdom.py
AK391/AudioCLIP
45327aa203839bfeb58681dd36c04fd493ee72f4
[ "MIT" ]
176
2021-07-23T08:30:21.000Z
2022-03-14T12:29:06.000Z
ignite_trainer/_visdom.py
AK391/AudioCLIP
45327aa203839bfeb58681dd36c04fd493ee72f4
[ "MIT" ]
34
2021-06-29T11:50:19.000Z
2022-03-02T12:01:36.000Z
import os import sys import json import time import tqdm import socket import subprocess import numpy as np import visdom from typing import Tuple from typing import Optional def calc_ytick_range(vis: visdom.Visdom, window_name: str, env: Optional[str] = None) -> Tuple[float, float]: lower_bound, upper_bound = -1.0, 1.0 stats = vis.get_window_data(win=window_name, env=env) if stats: stats = json.loads(stats) stats = [np.array(item['y']) for item in stats['content']['data']] stats = [item[item != np.array([None])].astype(np.float16) for item in stats] if stats: q25s = np.array([np.quantile(item, 0.25) for item in stats if len(item) > 0]) q75s = np.array([np.quantile(item, 0.75) for item in stats if len(item) > 0]) if q25s.shape == q75s.shape and len(q25s) > 0: iqrs = q75s - q25s lower_bounds = q25s - 1.5 * iqrs upper_bounds = q75s + 1.5 * iqrs stats_sanitized = list() idx = 0 for item in stats: if len(item) > 0: item_sanitized = item[(item >= lower_bounds[idx]) & (item <= upper_bounds[idx])] stats_sanitized.append(item_sanitized) idx += 1 stats_sanitized = np.array(stats_sanitized) q25_sanitized = np.array([np.quantile(item, 0.25) for item in stats_sanitized]) q75_sanitized = np.array([np.quantile(item, 0.75) for item in stats_sanitized]) iqr_sanitized = np.sum(q75_sanitized - q25_sanitized) lower_bound = np.min(q25_sanitized) - 1.5 * iqr_sanitized upper_bound = np.max(q75_sanitized) + 1.5 * iqr_sanitized return lower_bound, upper_bound def plot_line(vis: visdom.Visdom, window_name: str, env: Optional[str] = None, line_label: Optional[str] = None, x: Optional[np.ndarray] = None, y: Optional[np.ndarray] = None, x_label: Optional[str] = None, y_label: Optional[str] = None, width: int = 576, height: int = 416, draw_marker: bool = False) -> str: empty_call = not vis.win_exists(window_name) if empty_call and (x is not None or y is not None): return window_name if x is None: x = np.ones(1) empty_call = empty_call & True if y is None: y = np.full(1, np.nan) empty_call = empty_call & True if x.shape != y.shape: x = np.ones_like(y) opts = { 'showlegend': True, 'markers': draw_marker, 'markersize': 5, } if empty_call: opts['title'] = window_name opts['width'] = width opts['height'] = height window_name = vis.line( X=x, Y=y, win=window_name, env=env, update='append', name=line_label, opts=opts ) xtickmin, xtickmax = 0.0, np.max(x) * 1.05 ytickmin, ytickmax = calc_ytick_range(vis, window_name, env) opts = { 'showlegend': True, 'xtickmin': xtickmin, 'xtickmax': xtickmax, 'ytickmin': ytickmin, 'ytickmax': ytickmax, 'xlabel': x_label, 'ylabel': y_label } window_name = vis.update_window_opts(win=window_name, opts=opts, env=env) return window_name def create_summary_window(vis: visdom.Visdom, visdom_env_name: str, experiment_name: str, summary: str) -> str: return vis.text( text=summary, win=experiment_name, env=visdom_env_name, opts={'title': 'Summary', 'width': 576, 'height': 416}, append=vis.win_exists(experiment_name, visdom_env_name) ) def connection_is_alive(host: str, port: int) -> bool: with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sock: try: sock.connect((host, port)) sock.shutdown(socket.SHUT_RDWR) return True except socket.error: return False def get_visdom_instance(host: str = 'localhost', port: int = 8097, env_name: str = 'main', env_path: str = 'visdom_env') -> Tuple[visdom.Visdom, Optional[int]]: vis_pid = None if not connection_is_alive(host, port): if any(host.strip('/').endswith(lh) for lh in ['127.0.0.1', 'localhost']): os.makedirs(env_path, exist_ok=True) tqdm.tqdm.write('Starting visdom on port {}'.format(port), end='') vis_args = [ sys.executable, '-m', 'visdom.server', '-port', str(port), '-env_path', os.path.join(os.getcwd(), env_path) ] vis_proc = subprocess.Popen(vis_args, stdout=subprocess.PIPE, stderr=subprocess.PIPE) time.sleep(2.0) vis_pid = vis_proc.pid tqdm.tqdm.write('PID -> {}'.format(vis_pid)) trials_left = 5 while not connection_is_alive(host, port): time.sleep(1.0) tqdm.tqdm.write('Trying to connect ({} left)...'.format(trials_left)) trials_left -= 1 if trials_left < 1: raise RuntimeError('Visdom server is not running. Please run "python -m visdom.server".') vis = visdom.Visdom( server='http://{}'.format(host), port=port, env=env_name ) return vis, vis_pid
29.333333
109
0.552734
706
5,632
4.256374
0.240793
0.039933
0.020965
0.032612
0.175707
0.136439
0.10183
0.09584
0.081198
0.081198
0
0.026344
0.332741
5,632
191
110
29.486911
0.773284
0
0
0.06993
0
0
0.062145
0
0
0
0
0
0
1
0.034965
false
0
0.076923
0.006993
0.160839
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
9d3007ae1a0b21a2c5b82a4a63774e81f6aa5a00
4,960
py
Python
anonybot.py
sp0oks/anonybot
864688f04231e3088737b12caed76f61a5128993
[ "MIT" ]
5
2019-12-17T17:53:51.000Z
2020-09-06T07:51:23.000Z
anonybot.py
CptSpookz/anonybot
864688f04231e3088737b12caed76f61a5128993
[ "MIT" ]
null
null
null
anonybot.py
CptSpookz/anonybot
864688f04231e3088737b12caed76f61a5128993
[ "MIT" ]
2
2020-01-20T01:01:20.000Z
2020-09-06T07:51:25.000Z
import os import time from sqlalchemy import create_engine, BigInteger, UnicodeText, Column, Integer from sqlalchemy.ext.declarative import declarative_base from sqlalchemy.orm import sessionmaker, scoped_session from sqlalchemy.exc import SQLAlchemyError from aiogram import Bot, Dispatcher, executor, types from aiogram.utils.exceptions import ChatNotFound from dotenv import load_dotenv load_dotenv() # Database configuration DB = os.getenv('DB_ADDR') ENGINE = create_engine(DB) Base = declarative_base() Session = scoped_session(sessionmaker(bind=ENGINE)) class Msg(Base): __tablename__ = 'messages' id = Column(Integer, primary_key=True) user_id = Column(BigInteger) text = Column(UnicodeText(4096)) # Bot configuration USAGE = """\ /status -- show how many messages are pending /receive -- receive pending messages /send [user_id] -- reply to message to send it to given user /drop -- drop all pending messages /help -- shows this message """ TOKEN = os.getenv('BOT_TOKEN') bot = Bot(token=TOKEN) dp = Dispatcher(bot) @dp.message_handler(commands=['send']) async def send_msg(message: types.Message): if message.chat.type == 'private': session = Session() args = message.get_args().split() if len(args) >= 1: try: receiver = int(args[0]) except ValueError: await message.reply('You need to specify a Telegram id as the receiver.') return if message.reply_to_message is not None: msg = Msg(user_id=receiver, text=message.reply_to_message.text) try: session.add(msg) session.commit() try: await bot.send_message(receiver, 'You have a new message!') await message.reply('Message was sent.') except ChatNotFound: session.flush() await message.reply('This user id does not exist.') except SQLAlchemyError as err: session.rollback() print(f'[{time.asctime()}]: {err}') await message.reply('Something happened, message could not be sent.\nTry sending the message again.') else: await message.reply('You must reply to the message you want to send.') else: await message.reply('You must provide a receiver to the message.') @dp.message_handler(commands=['receive']) async def receive_msg(message: types.Message): if message.chat.type == 'private': session = Session() msgs = session.query(Msg).filter_by(user_id=message.from_user.id).all() if len(msgs) > 0: for i, msg in enumerate(msgs, 1): text = f'#{i}: {msg.text}' await message.reply(text, parse_mode=types.message.ParseMode.MARKDOWN, reply=False) try: session.query(Msg).filter_by(user_id=message.from_user.id).delete() session.commit() except SQLAlchemyError as err: session.rollback() print(f'[{time.asctime()}]: {err}') await message.reply('Something happened, could not drop messages.') else: await message.reply('Your inbox is currently empty.') @dp.message_handler(commands=['drop']) async def drop_msg(message: types.Message): if message.chat.type == 'private': session = Session() msgs = session.query(Msg).filter_by(user_id=message.from_user.id).count() try: session.query(Msg).filter_by(user_id=message.from_user.id).delete() session.commit() await message.reply(f'Dropped {msgs} messages.') except SQLAlchemyError as err: session.rollback() print(f'[{time.asctime()}]: {err}') await message.reply(f'Something happened, could not drop messages.') @dp.message_handler(commands=['status']) async def status(message: types.Message): if message.chat.type == 'private': session = Session() msgs = session.query(Msg).filter_by(user_id=message.from_user.id).count() text = f'You have {msgs} pending messages.' await message.reply(text) @dp.message_handler(commands=['help']) async def start(message: types.Message): if message.chat.type == 'private': await message.reply(text=USAGE) @dp.message_handler(commands=['start']) async def start(message: types.Message): if message.chat.type == 'private': text = f'Hello, this is Anonybot.\n'+USAGE session = Session() msgs = session.query(Msg).filter_by(user_id=message.from_user.id).count() text += f'\nYou have {msgs} pending messages.' await message.reply(text=text, reply=False) if __name__ == '__main__': Base.metadata.create_all(ENGINE) executor.start_polling(dp)
36.20438
121
0.626008
601
4,960
5.063228
0.254576
0.031548
0.078212
0.047322
0.392047
0.392047
0.354913
0.354913
0.325994
0.325994
0
0.00218
0.260081
4,960
136
122
36.470588
0.826975
0.008065
0
0.339286
0
0
0.188326
0
0
0
0
0
0
1
0
false
0
0.080357
0
0.133929
0.026786
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
9d303166d818d8f8f693a98022e31dfc5961d444
2,912
py
Python
tests/test_doc_cvnn_example.py
saugatkandel/cvnn
f6d7b5c17fd064a7eaa60e7af922914a974eb69a
[ "MIT" ]
38
2020-09-16T14:47:36.000Z
2022-03-30T13:35:05.000Z
tests/test_doc_cvnn_example.py
saugatkandel/cvnn
f6d7b5c17fd064a7eaa60e7af922914a974eb69a
[ "MIT" ]
25
2020-10-03T19:30:16.000Z
2022-03-29T15:24:44.000Z
tests/test_doc_cvnn_example.py
saugatkandel/cvnn
f6d7b5c17fd064a7eaa60e7af922914a974eb69a
[ "MIT" ]
9
2021-01-18T10:48:57.000Z
2022-02-11T10:34:52.000Z
import numpy as np import cvnn.layers as complex_layers import tensorflow as tf from pdb import set_trace def get_dataset(): (train_images, train_labels), (test_images, test_labels) = tf.keras.datasets.cifar10.load_data() train_images = train_images.astype(dtype=np.complex64) / 255.0 test_images = test_images.astype(dtype=np.complex64) / 255.0 return (train_images, train_labels), (test_images, test_labels) def test_cifar(): (train_images, train_labels), (test_images, test_labels) = get_dataset() # Create your model model = tf.keras.models.Sequential() model.add(complex_layers.ComplexInput(input_shape=(32, 32, 3))) # Always use ComplexInput at the start model.add(complex_layers.ComplexConv2D(32, (3, 3), activation='cart_relu')) model.add(complex_layers.ComplexAvgPooling2D((2, 2))) model.add(complex_layers.ComplexConv2D(64, (3, 3), activation='cart_relu')) model.add(complex_layers.ComplexFlatten()) model.add(complex_layers.ComplexDense(64, activation='cart_relu')) model.add(complex_layers.ComplexDense(10, activation='convert_to_real_with_abs')) model.compile(optimizer='adam', loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), metrics=['accuracy']) # model.summary() history = model.fit(train_images, train_labels, epochs=1, validation_data=(test_images, test_labels)) test_loss, test_acc = model.evaluate(test_images, test_labels, verbose=2) def test_regression(): input_shape = (4, 28, 28, 3) x = tf.cast(tf.random.normal(input_shape), tf.complex64) model = tf.keras.models.Sequential() model.add(complex_layers.ComplexInput(input_shape=input_shape[1:])) model.add(complex_layers.ComplexFlatten()) model.add(complex_layers.ComplexDense(units=64, activation='cart_relu')) model.add(complex_layers.ComplexDense(units=10, activation='linear')) model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy']) y = model(x) assert y.dtype == np.complex64 def test_functional_api(): inputs = complex_layers.complex_input(shape=(128, 128, 3)) c0 = complex_layers.ComplexConv2D(32, activation='cart_relu', kernel_size=3)(inputs) c1 = complex_layers.ComplexConv2D(32, activation='cart_relu', kernel_size=3)(c0) c2 = complex_layers.ComplexMaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding='valid')(c1) t01 = complex_layers.ComplexConv2DTranspose(5, kernel_size=2, strides=(2, 2), activation='cart_relu')(c2) concat01 = tf.keras.layers.concatenate([t01, c1], axis=-1) c3 = complex_layers.ComplexConv2D(4, activation='cart_relu', kernel_size=3)(concat01) out = complex_layers.ComplexConv2D(4, activation='cart_relu', kernel_size=3)(c3) model = tf.keras.Model(inputs, out) if __name__ == '__main__': test_functional_api() test_regression() test_cifar()
45.5
109
0.730426
390
2,912
5.220513
0.307692
0.121316
0.081041
0.113458
0.461198
0.4278
0.422888
0.391454
0.329568
0.247544
0
0.040508
0.135302
2,912
63
110
46.222222
0.76807
0.024038
0
0.081633
0
0
0.060606
0.016913
0
0
0
0
0.020408
1
0.081633
false
0
0.081633
0
0.183673
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
9d31c3b53c5a416e56a025e297cf9e335432c27b
2,580
py
Python
gkutils/commonutils/getCSVColumnSubset.py
genghisken/gkutils
0c8aa06d813de72b1cd9cba11219a78952799420
[ "MIT" ]
null
null
null
gkutils/commonutils/getCSVColumnSubset.py
genghisken/gkutils
0c8aa06d813de72b1cd9cba11219a78952799420
[ "MIT" ]
1
2021-11-19T19:28:52.000Z
2021-11-19T19:29:57.000Z
gkutils/commonutils/getCSVColumnSubset.py
genghisken/gkutils
0c8aa06d813de72b1cd9cba11219a78952799420
[ "MIT" ]
null
null
null
"""Write a subset of keys from one CSV to another. Don't use lots of memory. Usage: %s <filename> <outputfile> [--columns=<columns>] [--htm] [--racol=<racol>] [--deccol=<deccol>] [--filtercol=<filtercol>] %s (-h | --help) %s --version Options: -h --help Show this screen. --version Show version. --columns=<columns> Comma separated (no spaces) columns. --htm Generate HTM IDs and add to the column subset. --racol=<racol> RA column, ignored if htm not specified [default: ra] --deccol=<deccol> Declination column, ignored if htm not specified [default: dec] --filtercol=<filtercol> Only write the row when this column is not blank. """ import sys __doc__ = __doc__ % (sys.argv[0], sys.argv[0], sys.argv[0]) from docopt import docopt from gkutils.commonutils import Struct, readGenericDataFile, cleanOptions import csv from gkhtm._gkhtm import htmName def getColumnSubset(options): # DictReader doesn't burden the memory - so let's use it to select our column subset. data = csv.DictReader(open(options.filename), delimiter=',') columns = options.columns.split(',') if options.htm: columns.append('htm10') columns.append('htm13') columns.append('htm16') with open(options.outputfile, 'w') as f: w = csv.DictWriter(f, columns, delimiter = ',') w.writeheader() for row in data: # TO FIX - code is very inefficient. HTMs generated regardless of filtercol. Silly! trimmedRow = {key: row[key] for key in options.columns.split(',')} if options.htm: htm16Name = htmName(16, float(row[options.racol]), float(row[options.deccol])) trimmedRow['htm10'] = htm16Name[0:12] trimmedRow['htm13'] = htm16Name[12:15] trimmedRow['htm16'] = htm16Name[15:18] try: if options.filtercol: if trimmedRow[options.filtercol] and trimmedRow[options.filtercol] != 'null': w.writerow(trimmedRow) else: w.writerow(trimmedRow) except KeyError as e: w.writerow(trimmedRow) return def main(argv = None): opts = docopt(__doc__, version='0.1') opts = cleanOptions(opts) # Use utils.Struct to convert the dict into an object for compatibility with old optparse code. options = Struct(**opts) getColumnSubset(options) if __name__ == '__main__': main()
35.342466
122
0.605039
303
2,580
5.082508
0.432343
0.013636
0.015584
0.023377
0.103896
0.103896
0.048052
0
0
0
0
0.020408
0.278295
2,580
72
123
35.833333
0.80666
0.391473
0
0.128205
0
0
0.032072
0
0
0
0
0
0
1
0.051282
false
0
0.128205
0
0.205128
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
9d35852cc4326c58c6eb53f1d5a84c6b35a5e6fb
1,006
py
Python
src/python/WMComponent/DBS3Buffer/MySQL/DBSBufferFiles/GetParentStatus.py
khurtado/WMCore
f74e252412e49189a92962945a94f93bec81cd1e
[ "Apache-2.0" ]
21
2015-11-19T16:18:45.000Z
2021-12-02T18:20:39.000Z
src/python/WMComponent/DBS3Buffer/MySQL/DBSBufferFiles/GetParentStatus.py
khurtado/WMCore
f74e252412e49189a92962945a94f93bec81cd1e
[ "Apache-2.0" ]
5,671
2015-01-06T14:38:52.000Z
2022-03-31T22:11:14.000Z
src/python/WMComponent/DBS3Buffer/MySQL/DBSBufferFiles/GetParentStatus.py
khurtado/WMCore
f74e252412e49189a92962945a94f93bec81cd1e
[ "Apache-2.0" ]
67
2015-01-21T15:55:38.000Z
2022-02-03T19:53:13.000Z
#!/usr/bin/env python """ _GetParentStatus_ MySQL implementation of DBSBufferFile.GetParentStatus """ from WMCore.Database.DBFormatter import DBFormatter class GetParentStatus(DBFormatter): sql = """SELECT status FROM dbsbuffer_file INNER JOIN dbsbuffer_file_parent ON dbsbuffer_file.id = dbsbuffer_file_parent.parent WHERE dbsbuffer_file_parent.child = (SELECT id FROM dbsbuffer_file WHERE lfn = :lfn)""" def format(self, results): """ _format_ Format the query results into a list of LFNs. """ results = DBFormatter.format(self, results) status = [] for result in results: status.append(result[0]) return status def execute(self, lfn, conn = None, transaction = False): result = self.dbi.processData(self.sql, {"lfn": lfn}, conn = conn, transaction = transaction) return self.format(result)
27.189189
74
0.614314
105
1,006
5.761905
0.485714
0.128926
0.094215
0
0
0
0
0
0
0
0
0.001422
0.301193
1,006
36
75
27.944444
0.859175
0.148111
0
0
0
0
0.326406
0.09291
0
0
0
0
0
1
0.117647
false
0
0.058824
0
0.411765
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
9d3874299d6c36b60cba6fdb324222e4353364ea
481
py
Python
tests/test_actor.py
sdss/HAL
c7a2111f8737a498a124f5571d6f0e6b46e5c371
[ "BSD-3-Clause" ]
null
null
null
tests/test_actor.py
sdss/HAL
c7a2111f8737a498a124f5571d6f0e6b46e5c371
[ "BSD-3-Clause" ]
2
2022-01-14T04:50:58.000Z
2022-02-28T22:31:06.000Z
tests/test_actor.py
sdss/HAL
c7a2111f8737a498a124f5571d6f0e6b46e5c371
[ "BSD-3-Clause" ]
null
null
null
#!/usr/bin/env python # -*- coding: utf-8 -*- # # @Author: José Sánchez-Gallego (gallegoj@uw.edu) # @Date: 2021-03-24 # @Filename: test_hal.py # @License: BSD 3-clause (http://www.opensource.org/licenses/BSD-3-Clause) import pytest from hal import __version__ pytestmark = [pytest.mark.asyncio] async def test_version(actor): await actor.invoke_mock_command("version") assert len(actor.mock_replies) == 2 assert actor.mock_replies[-1]["version"] == __version__
20.913043
74
0.706861
68
481
4.794118
0.735294
0.02454
0.06135
0
0
0
0
0
0
0
0
0.031325
0.137214
481
22
75
21.863636
0.754217
0.424116
0
0
0
0
0.051852
0
0
0
0
0
0.285714
1
0
false
0
0.285714
0
0.285714
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
9d3a4036188d6088bc1ce4cfe8dfff01c0a9fdb1
490
py
Python
day_07/puzzles.py
electronsandstuff/Advent-of-Code-2021
9c23872640e8d092088dcb6d5cb845cd11d98994
[ "BSD-3-Clause" ]
null
null
null
day_07/puzzles.py
electronsandstuff/Advent-of-Code-2021
9c23872640e8d092088dcb6d5cb845cd11d98994
[ "BSD-3-Clause" ]
null
null
null
day_07/puzzles.py
electronsandstuff/Advent-of-Code-2021
9c23872640e8d092088dcb6d5cb845cd11d98994
[ "BSD-3-Clause" ]
null
null
null
import numpy as np def crab_fuel(n): return (n**2 + n) // 2 if __name__ == '__main__': with open('input.txt') as f: pin = np.array([int(x) for x in f.read().split(',')]) distances = np.abs(pin[None, :] - np.arange(pin.max() + 1)[:, None]) total_fuel = np.sum(distances, axis=1) print(f'Solution 1: {total_fuel.min()}') distances_v2 = crab_fuel(distances) total_fuel_v2 = np.sum(distances_v2, axis=1) print(f'Solution 2: {total_fuel_v2.min()}')
25.789474
72
0.608163
80
490
3.5
0.4875
0.128571
0.1
0.078571
0.135714
0
0
0
0
0
0
0.028133
0.202041
490
18
73
27.222222
0.68798
0
0
0
0
0
0.165306
0.042857
0
0
0
0
0
1
0.083333
false
0
0.083333
0.083333
0.25
0.166667
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
9d3b2ee3ee8d1f5868d497f89b1766382405982d
16,114
py
Python
sampling.py
bigdata-inha/FedDC
c90c48fc7e35b6cb80890194c8cdfb0d412a0819
[ "MIT" ]
null
null
null
sampling.py
bigdata-inha/FedDC
c90c48fc7e35b6cb80890194c8cdfb0d412a0819
[ "MIT" ]
null
null
null
sampling.py
bigdata-inha/FedDC
c90c48fc7e35b6cb80890194c8cdfb0d412a0819
[ "MIT" ]
null
null
null
#!/usr/bin/env python # -*- coding: utf-8 -*- # Python version: 3.6 import numpy as np from torchvision import datasets, transforms import logging import random import torch # Settings for a multiplicative linear congruential generator (aka Lehmer # generator) suggested in 'Random Number Generators: Good # Ones are Hard to Find' by Park and Miller. MLCG_MODULUS = 2**(31) - 1 MLCG_MULTIPLIER = 16807 # Default quantiles for federated evaluations. DEFAULT_QUANTILES = (0.0, 0.25, 0.5, 0.75, 1.0) def mnist_iid(dataset, num_users): """ Sample I.I.D. client data from MNIST dataset :param dataset: :param num_users: :return: dict of image index """ num_items = int(len(dataset) / num_users) dict_users, all_idxs = {}, [i for i in range(len(dataset))] for i in range(num_users): dict_users[i] = set(np.random.choice(all_idxs, num_items, replace=False)) all_idxs = list(set(all_idxs) - dict_users[i]) return dict_users def mnist_noniid(dataset, num_users): """ Sample non-I.I.D client data from MNIST dataset :param dataset: :param num_users: :return: """ # 60,000 training imgs --> 200 imgs/shard X 300 shards num_shards, num_imgs = 200, 300 idx_shard = [i for i in range(num_shards)] dict_users = {i: np.array([]) for i in range(num_users)} idxs = np.arange(num_shards * num_imgs) labels = dataset.train_labels.numpy() # sort labels idxs_labels = np.vstack((idxs, labels)) idxs_labels = idxs_labels[:, idxs_labels[1, :].argsort()] idxs = idxs_labels[0, :] # divide and assign 2 shards/client for i in range(num_users): rand_set = set(np.random.choice(idx_shard, 2, replace=False)) idx_shard = list(set(idx_shard) - rand_set) for rand in rand_set: dict_users[i] = np.concatenate( (dict_users[i], idxs[rand * num_imgs:(rand + 1) * num_imgs]), axis=0) return dict_users def mnist_noniid_unequal(dataset, num_users): """ Sample non-I.I.D client data from MNIST dataset s.t clients have unequal amount of data :param dataset: :param num_users: :returns a dict of clients with each clients assigned certain number of training imgs """ # 60,000 training imgs --> 50 imgs/shard X 1200 shards num_shards, num_imgs = 1200, 50 idx_shard = [i for i in range(num_shards)] dict_users = {i: np.array([]) for i in range(num_users)} idxs = np.arange(num_shards * num_imgs) labels = dataset.train_labels.numpy() # sort labels idxs_labels = np.vstack((idxs, labels)) idxs_labels = idxs_labels[:, idxs_labels[1, :].argsort()] idxs = idxs_labels[0, :] # Minimum and maximum shards assigned per client: min_shard = 1 max_shard = 30 # Divide the shards into random chunks for every client # s.t the sum of these chunks = num_shards random_shard_size = np.random.randint(min_shard, max_shard + 1, size=num_users) random_shard_size = np.around(random_shard_size / sum(random_shard_size) * num_shards) random_shard_size = random_shard_size.astype(int) # Assign the shards randomly to each client if sum(random_shard_size) > num_shards: for i in range(num_users): # First assign each client 1 shard to ensure every client has # atleast one shard of data rand_set = set(np.random.choice(idx_shard, 1, replace=False)) idx_shard = list(set(idx_shard) - rand_set) for rand in rand_set: dict_users[i] = np.concatenate( (dict_users[i], idxs[rand * num_imgs:(rand + 1) * num_imgs]), axis=0) random_shard_size = random_shard_size - 1 # Next, randomly assign the remaining shards for i in range(num_users): if len(idx_shard) == 0: continue shard_size = random_shard_size[i] if shard_size > len(idx_shard): shard_size = len(idx_shard) rand_set = set(np.random.choice(idx_shard, shard_size, replace=False)) idx_shard = list(set(idx_shard) - rand_set) for rand in rand_set: dict_users[i] = np.concatenate( (dict_users[i], idxs[rand * num_imgs:(rand + 1) * num_imgs]), axis=0) else: for i in range(num_users): shard_size = random_shard_size[i] rand_set = set(np.random.choice(idx_shard, shard_size, replace=False)) idx_shard = list(set(idx_shard) - rand_set) for rand in rand_set: dict_users[i] = np.concatenate( (dict_users[i], idxs[rand * num_imgs:(rand + 1) * num_imgs]), axis=0) if len(idx_shard) > 0: # Add the leftover shards to the client with minimum images: shard_size = len(idx_shard) # Add the remaining shard to the client with lowest data k = min(dict_users, key=lambda x: len(dict_users.get(x))) rand_set = set(np.random.choice(idx_shard, shard_size, replace=False)) idx_shard = list(set(idx_shard) - rand_set) for rand in rand_set: dict_users[k] = np.concatenate( (dict_users[k], idxs[rand * num_imgs:(rand + 1) * num_imgs]), axis=0) return dict_users def cifar_iid(dataset, num_users, args): """ Sample I.I.D. client data from CIFAR10 dataset :param dataset: :param num_users: :return: dict of image index """ torch.backends.cudnn.benchmark = False torch.backends.cudnn.deterministic = True torch.manual_seed(args.seed) torch.cuda.manual_seed(args.seed) random.seed(args.seed) np.random.seed(args.seed) num_items = int(len(dataset) / num_users) # dict_users란? 0~100의 유저들에게 50000개 데이터를 100개씩 할당. 유저마다 indx를 가지고 있는 list dict_users, all_idxs = {}, [i for i in range(len(dataset))] for i in range(num_users): dict_users[i] = set(np.random.choice(all_idxs, num_items, replace=False)) all_idxs = list(set(all_idxs) - dict_users[i]) return dict_users def imagenet_noniid(dataset, num_users, args, class_num=2): """ Sample non-I.I.D client data from CIFAR10 dataset :param dataset: :param num_users: :return: """ #num_shards -> 총클래스 개수/ num_imgs ->한명당 가지는 데이터개수.but imagenet은 클래스마다 다름.세어줘야함 / # idxs 총데이터수 torch.backends.cudnn.benchmark = False torch.backends.cudnn.deterministic = True torch.manual_seed(args.seed) torch.cuda.manual_seed(args.seed) random.seed(args.seed) np.random.seed(args.seed) # 아직 한 유저당 클래스 한개만 들어가는 경우 발생. #idx_shards ->유저당 가지는 랜덤시드 n개(n개는 클래스 개수임.) -> 클래스2 x 유저수100 = 200 #num_imgs -> 전체데이터셋중 유저 한명이 가지는 한 클래스 데이터 수. 5만/100 =500, 2개클래스 500개 num_shards, num_imgs = num_users*class_num, int(len(dataset)/num_users/class_num) idx_shard = [i for i in range(num_shards)] dict_users = {i: np.array([]) for i in range(num_users)} idxs = np.arange(num_shards * num_imgs) # labels = dataset.train_labels.numpy() labels = np.array(dataset.targets) # sort labels idxs = np.argsort(labels) class_count = [0 for i in range(num_shards)] for i in labels: class_count[i] += 1 accumulate_class_count = [0 for i in range(num_shards)] for c in range(num_shards): if c==0: accumulate_class_count[c] = class_count[0] else: accumulate_class_count[c] = accumulate_class_count[c-1] + class_count[c] idx_shuffle = np.random.permutation(idx_shard) client_class_set = [] for i in range(num_users): user_class_set = idx_shuffle[i*class_num:(i+1)*class_num] client_class_set.append(user_class_set) for class_seed in user_class_set: dict_users[i] = np.concatenate( (dict_users[i], idxs[accumulate_class_count[class_seed] -class_count[class_seed] :accumulate_class_count[class_seed]]), axis=0) return dict_users,client_class_set def cifar10_iid(train_dataset, num_users, args): torch.backends.cudnn.benchmark = False torch.backends.cudnn.deterministic = True torch.manual_seed(args.seed) torch.cuda.manual_seed(args.seed) random.seed(args.seed) np.random.seed(args.seed) n_dataset = len(train_dataset) idxs = np.random.permutation(n_dataset) batch_idxs = np.array_split(idxs, num_users) net_dataidx_map = {i: batch_idxs[i] for i in range(num_users)} return net_dataidx_map def record_net_data_stats(y_train, net_dataidx_map): net_cls_counts = {} for net_i, dataidx in net_dataidx_map.items(): unq, unq_cnt = np.unique(y_train[dataidx], return_counts=True) tmp = {unq[i]: unq_cnt[i] for i in range(len(unq))} net_cls_counts[net_i] = tmp logging.debug('Data statistics: %s' % str(net_cls_counts)) return net_cls_counts def partition_data(train_dataset, partition, num_uers, alpha, args): torch.backends.cudnn.benchmark = False torch.backends.cudnn.deterministic = True torch.manual_seed(args.seed) torch.cuda.manual_seed(args.seed) random.seed(args.seed) np.random.seed(args.seed) train_labels = np.array(train_dataset.targets) num_train = len(train_dataset) if partition == "homo": idxs = np.random.permutation(num_train) batch_idxs = np.array_split(idxs, num_uers) net_dataidx_map = {i: batch_idxs[i] for i in range(num_uers)} elif partition == "dirichlet": min_size = 0 K = args.num_classes N = len(train_labels) # train data 수 ex)cifar- 50000 net_dataidx_map = {} while min_size < 10: idx_batch = [[] for _ in range(num_uers)] # for each class in the dataset for k in range(K): idx_k = np.where(train_labels == k)[0] np.random.shuffle(idx_k) proportions = np.random.dirichlet(np.repeat(alpha, num_uers)) ## Balance proportions = np.array([p * (len(idx_j) < N / num_uers) for p, idx_j in zip(proportions, idx_batch)]) proportions = proportions / proportions.sum() proportions = (np.cumsum(proportions) * len(idx_k)).astype(int)[:-1] idx_batch = [idx_j + idx.tolist() for idx_j, idx in zip(idx_batch, np.split(idx_k, proportions))] min_size = min([len(idx_j) for idx_j in idx_batch]) for j in range(num_uers): np.random.shuffle(idx_batch[j]) net_dataidx_map[j] = idx_batch[j] elif partition > "noniid-#label0" and partition <= "noniid-#label9": num = eval(partition[13:]) K = 10 if num == 10: net_dataidx_map ={i:np.ndarray(0,dtype=np.int64) for i in range(num_uers)} for i in range(10): idx_k = np.where(train_labels==i)[0] np.random.shuffle(idx_k) split = np.array_split(idx_k,num_uers) for j in range(num_uers): net_dataidx_map[j]=np.append(net_dataidx_map[j],split[j]) else: times=[0 for i in range(10)] contain=[] for i in range(num_uers): current=[i%K] times[i%K]+=1 j=1 while (j<num): ind=random.randint(0,K-1) if (ind not in current): j=j+1 current.append(ind) times[ind]+=1 contain.append(current) net_dataidx_map ={i:np.ndarray(0,dtype=np.int64) for i in range(num_uers)} for i in range(K): idx_k = np.where(train_labels==i)[0] np.random.shuffle(idx_k) split = np.array_split(idx_k,times[i]) ids=0 for j in range(num_uers): if i in contain[j]: net_dataidx_map[j]=np.append(net_dataidx_map[j],split[ids]) ids+=1 traindata_cls_counts = record_net_data_stats(train_labels, net_dataidx_map) #print(traindata_cls_counts) # return (X_train, y_train, X_test, y_test, net_dataidx_map, traindata_cls_counts) # 이전 버전return y_train, net_dataidx_map, traindata_cls_counts return net_dataidx_map def cifar_noniid(dataset, num_users, args, class_num=2): """ Sample non-I.I.D client data from CIFAR10 dataset :param dataset: :param num_users: :return: """ torch.backends.cudnn.benchmark = False torch.backends.cudnn.deterministic = True torch.manual_seed(args.seed) torch.cuda.manual_seed(args.seed) random.seed(args.seed) np.random.seed(args.seed) # 아직 한 유저당 클래스 한개만 들어가는 경우 발생. #idx_shards ->유저당 갖는 랜덤시드 n개(n개는 클래스 개수임.) -> 클래스2 x 유저수100 = 200 #num_imgs -> 전체데이터셋중 유저 한명이 가지는 한 클래스 데이터 수. 5만/100 =500, 2개클래스 500개 num_shards, num_imgs = num_users*class_num, int(len(dataset)/num_users/class_num) idx_shard = [i for i in range(num_shards)] dict_users = {i: np.array([]) for i in range(num_users)} idxs = np.arange(num_shards * num_imgs) # labels = dataset.train_labels.numpy() labels = np.array(dataset.targets) #sort_index = np.argsort(labels) # sort labels idxs_labels = np.vstack((idxs, labels)) idxs_labels = idxs_labels[:, idxs_labels[1, :].argsort()] idxs = idxs_labels[0, :] user_classs_dict = [] # divide and assign for i in range(num_users): # 200중에 2개 랜덤 선택. rand_set = set(np.random.choice(idx_shard, class_num, replace=False)) if class_num > 1 and i != num_users-1: while dataset.targets[idxs[list(rand_set)[1] * num_imgs]] == dataset.targets[idxs[list(rand_set)[0] *num_imgs]]: rand_set = set(np.random.choice(idx_shard, class_num, replace=False)) #print(dataset.targets[idxs[list(rand_set)[1] * num_imgs]]) #print(dataset.targets[idxs[list(rand_set)[0] * num_imgs]]) #print('\t') user_classs_dict.append(rand_set) idx_shard = list(set(idx_shard) - rand_set) for rand in rand_set: dict_users[i] = np.concatenate( (dict_users[i], idxs[rand * num_imgs:(rand + 1) * num_imgs]), axis=0) # for data_idx, j in enumerate(dict_users[i]): # print(i, data_idx, dataset.targets[int(j)]) return dict_users, user_classs_dict class client_choice(object): def __init__(self, args, num_users): self.args =args self.num_users = num_users self.mlcg_start = np.random.RandomState(args.seed).randint(1, MLCG_MODULUS - 1) def client_sampling(self, num_users, m, random_seed, round_num): # Settings for a multiplicative linear congruential generator (aka Lehmer # generator) suggested in 'Random Number Generators: Good # Ones are Hard to Find' by Park and Miller. pseudo_random_int = pow(MLCG_MULTIPLIER, round_num, MLCG_MODULUS) * self.mlcg_start % MLCG_MODULUS random_state = np.random.RandomState(pseudo_random_int) return random_state.choice(num_users, m, replace=False) if __name__ == '__main__': dataset_train = datasets.MNIST('./data/mnist/', train=True, download=True, transform=transforms.Compose([ transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,)) ])) num = 100 d = mnist_noniid(dataset_train, num)
37.561772
151
0.608353
2,259
16,114
4.126605
0.136786
0.034327
0.019309
0.03422
0.620575
0.579382
0.532826
0.519095
0.505578
0.486376
0
0.018858
0.2859
16,114
428
152
37.649533
0.791258
0.177361
0
0.492593
0
0
0.006215
0
0
0
0
0
0
1
0.040741
false
0
0.018519
0
0.1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
9d3ca477c6b29581c9b909f6a0a67fb1fa79ccca
2,502
py
Python
codeforcesRating/codeforcesRating.py
gaurav512/Python-Scripts
46483ab09cccef380c8425d6924507e029745479
[ "MIT" ]
3
2020-05-23T14:31:35.000Z
2020-11-12T12:56:08.000Z
codeforcesRating/codeforcesRating.py
gaurav512/Python-Scripts
46483ab09cccef380c8425d6924507e029745479
[ "MIT" ]
null
null
null
codeforcesRating/codeforcesRating.py
gaurav512/Python-Scripts
46483ab09cccef380c8425d6924507e029745479
[ "MIT" ]
null
null
null
#! /usr/bin/python3 # Author: gaurav512 ''' Script written to scrape basic information about a Codeforces profile given the user id Usage: Enter the userid as command line argument OR as the input after running the following in terminal- python3 codeforces.py [userid] ''' import requests, bs4, sys def getDetails(userid): url = 'http://www.codeforces.com/profile/'+userid headers = {'User-Agent' : 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:73.0) Gecko/20100101 Firefox/73.0'} res = requests.get(url, headers=headers) try: res.raise_for_status() except: print('Cannot access codeforces') return soup = bs4.BeautifulSoup(res.text, 'html.parser') # Getting the rating of the user title = soup.select('.user-rank > span:nth-child(1)') if not title: print(f'User \'{userid}\' not found') return None title = title[0].text print('Title:\t\t',title) # Getting the name and place of the user (if updated on profile) elem = soup.select('.main-info > div:nth-child(3) > div:nth-child(1)') if elem: content = elem[0].text.split(',') name = content[0] print('Name:\t\t',name) if len(content) > 1: place = ','.join(content[1:]).lstrip() print('Place: \t', place) # Getting organization of the user (if updated on profile) elem2 = soup.select('.main-info > div:nth-child(3) > div:nth-child(2)') if elem2: organization = elem2[0].text pos = organization.find(' ') print('Organization:\t', organization[pos+1:]) # If the user is unrated then return back if title.strip() == 'Unrated': return None # Following code snippet takes care of the inconsistent css selectors on the Codeforces site due to display of badges in some profiles rating_selector = '.info > ul:nth-child(2) > li:nth-child(1) > span:nth-child(2)' if soup.select('div.badge:nth-child(1) > img:nth-child(1)'): rating_selector = rating_selector[:21]+'3'+rating_selector[22:] # Fetch the rating of the user rating = soup.select(rating_selector)[0].text print('Rating:\t\t', rating) # Fetch the highest title achieved by the user highestTitle = soup.select('span.smaller > span:nth-child(1)')[0].text print('Highest Title:\t', highestTitle[:-2].title()) # Fetch the highest rating achieved by the user highestRating = soup.select('span.smaller > span:nth-child(2)')[0].text print('Highest Rating:\t', highestRating) def main(): if len(sys.argv) > 1: userid = sys.argv[1] else: userid = input() getDetails(userid) if __name__ == '__main__': main()
30.144578
136
0.691847
383
2,502
4.477807
0.37859
0.055977
0.031487
0.016327
0.138776
0.117784
0.117784
0.047813
0.047813
0.047813
0
0.029426
0.157874
2,502
82
137
30.512195
0.784528
0.282974
0
0.040816
0
0.081633
0.320765
0.01238
0
0
0
0
0
1
0.040816
false
0
0.020408
0
0.122449
0.183673
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
9d3f7a7d27e1b7136efc12dc236457c627b3164e
1,025
py
Python
ch09-linear_model/src/score_card.py
ahitboyZBW/book-ml-sem
73208e7e492c9cbe82c4aaa6459a41e3ac1317be
[ "MIT" ]
137
2020-10-26T11:11:46.000Z
2022-03-29T01:21:22.000Z
ch09-linear_model/src/score_card.py
zengzhongjie/book-ml-sem
5d452a427db5ee65538d968ba5b938af013bb87c
[ "MIT" ]
4
2021-01-18T08:57:04.000Z
2021-07-29T02:39:00.000Z
ch09-linear_model/src/score_card.py
zengzhongjie/book-ml-sem
5d452a427db5ee65538d968ba5b938af013bb87c
[ "MIT" ]
46
2020-10-26T11:11:57.000Z
2022-03-08T00:15:32.000Z
def cal_A_B(pdo=20, base_score=500, odds=1 / 50): B = pdo / np.log(2) A = base_score + B * np.log(odds) return A, B ''' parameter --------- df:变量的woe,要求与模型训练logit时的列顺序一样 logit:sklearn中的逻辑回归模型,带截距 return ------ 新增每行数据的评分列:Score example: df= cal_score(df,logit) ''' def cal_score_byadd(df, logit, A=387.123, B=28.854): def _cal_woe_score(x, beta, n, B, beta0, A): ''' 只计算总分''' score = 0.0 for cc in x.index.tolist(): score += x[cc] * beta[cc] score = A - B * (beta0 + score) return score beta = dict(zip(df.columns.tolist(), logit.coef_[0])) n = df.shape[1] beta0 = logit.intercept_[0] df['Score'] = df.apply(lambda x: _cal_woe_score(x, beta, n, B, beta0, A), axis=1) return df def cal_score_byodds(df, logit, A=387.123, B=28.854): beta0 = logit.intercept_[0] prob_01 = logit.predict_proba(df) df['Score'] = A - B * np.log(prob_01[:, 1] / prob_01[:, 0]) return df
21.808511
77
0.559024
159
1,025
3.465409
0.352201
0.043557
0.021779
0.039927
0.15971
0.15971
0.15971
0.15971
0.087114
0
0
0.068456
0.273171
1,025
47
78
21.808511
0.671141
0.004878
0
0.181818
0
0
0.011682
0
0
0
0
0
0
1
0.181818
false
0
0
0
0.363636
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
9d41431a104dca3b80f9642ad172c2f1314cf033
3,790
py
Python
Tools/ecl_ekf/batch_process_logdata_ekf.py
lgarciaos/Firmware
26dba1407bd1fbc65c23870a22fed904afba6347
[ "BSD-3-Clause" ]
4,224
2015-01-02T11:51:02.000Z
2020-10-27T23:42:28.000Z
Tools/ecl_ekf/batch_process_logdata_ekf.py
choudhary0parivesh/Firmware
02f4ad61ec8eb4f7906dd06b4eb1fd6abb994244
[ "BSD-3-Clause" ]
11,736
2015-01-01T11:59:16.000Z
2020-10-28T17:13:38.000Z
Tools/ecl_ekf/batch_process_logdata_ekf.py
choudhary0parivesh/Firmware
02f4ad61ec8eb4f7906dd06b4eb1fd6abb994244
[ "BSD-3-Clause" ]
11,850
2015-01-02T14:54:47.000Z
2020-10-28T16:42:47.000Z
#! /usr/bin/env python3 """ Runs process_logdata_ekf.py on the .ulg files in the supplied directory. ulog files are skipped from the analysis, if a corresponding .pdf file already exists (unless the overwrite flag was set). """ # -*- coding: utf-8 -*- import argparse import os, glob from process_logdata_ekf import process_logdata_ekf def get_arguments(): parser = argparse.ArgumentParser(description='Analyse the estimator_status and ekf2_innovation message data for the' ' .ulg files in the specified directory') parser.add_argument("directory_path") parser.add_argument('-o', '--overwrite', action='store_true', help='Whether to overwrite an already analysed file. If a file with .pdf extension exists for a .ulg' 'file, the log file will be skipped from analysis unless this flag has been set.') parser.add_argument('--no-plots', action='store_true', help='Whether to only analyse and not plot the summaries for developers.') parser.add_argument('--check-level-thresholds', type=str, default=None, help='The csv file of fail and warning test thresholds for analysis.') parser.add_argument('--check-table', type=str, default=None, help='The csv file with descriptions of the checks.') parser.add_argument('--no-sensor-safety-margin', action='store_true', help='Whether to not cut-off 5s after take-off and 5s before landing ' '(for certain sensors that might be influence by proximity to ground).') return parser.parse_args() def main() -> None: args = get_arguments() if args.check_level_thresholds is not None: check_level_dict_filename = args.check_level_thresholds else: file_dir = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__))) check_level_dict_filename = os.path.join(file_dir, "check_level_dict.csv") if args.check_table is not None: check_table_filename = args.check_table else: file_dir = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__))) check_table_filename = os.path.join(file_dir, "check_table.csv") ulog_directory = args.directory_path # get all the ulog files found in the specified directory and in subdirectories ulog_files = glob.glob(os.path.join(ulog_directory, '**/*.ulg'), recursive=True) print("found {:d} .ulg files in {:s}".format(len(ulog_files), ulog_directory)) # remove the files already analysed unless the overwrite flag was specified. A ulog file is consired to be analysed if # a corresponding .pdf file exists.' if not args.overwrite: print("skipping already analysed ulg files.") ulog_files = [ulog_file for ulog_file in ulog_files if not os.path.exists('{}.pdf'.format(ulog_file))] n_files = len(ulog_files) print("analysing the {:d} .ulg files".format(n_files)) i = 1 n_skipped = 0 # analyse all ulog files for ulog_file in ulog_files: print('analysing file {:d}/{:d}: {:s}'.format(i, n_files, ulog_file)) try: _ = process_logdata_ekf( ulog_file, check_level_dict_filename, check_table_filename, plot=not args.no_plots, sensor_safety_margins=not args.no_sensor_safety_margin) except Exception as e: print(str(e)) print('an exception occurred, skipping file {:s}'.format(ulog_file)) n_skipped = n_skipped + 1 i = i + 1 print('{:d}/{:d} files analysed, {:d} skipped.'.format(n_files-n_skipped, n_files, n_skipped)) if __name__ == '__main__': main()
43.563218
125
0.656201
521
3,790
4.579655
0.295585
0.025147
0.042749
0.023889
0.228416
0.160101
0.106454
0.081308
0.054484
0.054484
0
0.00313
0.241425
3,790
87
126
43.563218
0.826783
0.130343
0
0.070175
0
0
0.296894
0.014921
0
0
0
0
0
1
0.035088
false
0
0.052632
0
0.105263
0.122807
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
9d429d9ff49854612f73350299d50ebaeb16c00a
1,468
py
Python
goodok_mlu/trackers/neptune.py
roma-goodok/ml_utils
c1d6630021a519102b5c4e029cecccdd8a0da946
[ "MIT" ]
null
null
null
goodok_mlu/trackers/neptune.py
roma-goodok/ml_utils
c1d6630021a519102b5c4e029cecccdd8a0da946
[ "MIT" ]
null
null
null
goodok_mlu/trackers/neptune.py
roma-goodok/ml_utils
c1d6630021a519102b5c4e029cecccdd8a0da946
[ "MIT" ]
1
2021-03-29T13:15:02.000Z
2021-03-29T13:15:02.000Z
import inspect import warnings from pathlib import Path def send_model_code(model, model_config, logdir, NEPTUNE_ON=False, exp=None): model_init = None model_forward = None model_config_s = None try: model_init = inspect.getsource(model.__init__) except Exception as e: warnings.warn(f"Can't save model_init: {e}", UserWarning) try: model_forward = inspect.getsource(model.forward) except Exception as e: warnings.warn(f"Can't save model_forward: {e}", UserWarning) try: model_config_s = str(model_config) except Exception as e: warnings.warn(f"Can't save model_config: {e}", UserWarning) def save_and_send(src, fnbase): if src is not None: fn = Path(logdir) / fnbase with open(fn, 'w') as f: f.write(src) if NEPTUNE_ON and exp is not None: exp.send_artifact(fn) save_and_send(model_init, 'model_init.py') save_and_send(model_forward, 'model_forward.py') save_and_send(model_config_s, 'model_config.txt') def log_and_send_string(value, name='example.txt', logdir=None, NEPTUNE_ON=False, exp=None): def save_and_send(src, fnbase): if src is not None: fn = Path(logdir) / fnbase with open(fn, 'w') as f: f.write(src) if NEPTUNE_ON and exp is not None: exp.send_artifact(fn) save_and_send(value, name)
30.583333
92
0.632834
212
1,468
4.160377
0.245283
0.087302
0.07483
0.061224
0.537415
0.44898
0.44898
0.44898
0.44898
0.44898
0
0
0.271798
1,468
47
93
31.234043
0.82507
0
0
0.526316
0
0
0.096049
0
0
0
0
0
0
1
0.105263
false
0
0.078947
0
0.184211
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
9d438aadf58244488ff98e5078d8104573590578
3,099
py
Python
pkgs/sdk-pkg/src/genie/libs/sdk/libs/abstracted_libs/iosxr/subsection.py
jbronikowski/genielibs
200a34e5fe4838a27b5a80d5973651b2e34ccafb
[ "Apache-2.0" ]
94
2018-04-30T20:29:15.000Z
2022-03-29T13:40:31.000Z
pkgs/sdk-pkg/src/genie/libs/sdk/libs/abstracted_libs/iosxr/subsection.py
jbronikowski/genielibs
200a34e5fe4838a27b5a80d5973651b2e34ccafb
[ "Apache-2.0" ]
67
2018-12-06T21:08:09.000Z
2022-03-29T18:00:46.000Z
pkgs/sdk-pkg/src/genie/libs/sdk/libs/abstracted_libs/iosxr/subsection.py
jbronikowski/genielibs
200a34e5fe4838a27b5a80d5973651b2e34ccafb
[ "Apache-2.0" ]
49
2018-06-29T18:59:03.000Z
2022-03-10T02:07:59.000Z
# Python import logging from os import path # Abstract from genie.abstract import Lookup # Parser from genie.libs import parser from genie.metaparser.util.exceptions import SchemaEmptyParserError # unicon from unicon.eal.dialogs import Statement, Dialog log = logging.getLogger(__name__) def save_device_information(device, **kwargs): """Install the commit packages. This is for IOSXR devices. Args: Mandatory: device (`obj`) : Device object. Returns: True: Result is PASSED False: Result is PASSX Raises: None Example: >>> save_device_information(device=Device()) """ # Checking the config-register has 0x2 # if not configure 0x2 # RP/0/RSP1/CPU0:PE1#admin config-register 0x2 if device.is_ha: conn = device.active else: conn = device # Install commit ( when thre are package to bring up features) # from admin prompt conn.admin_execute('install commit') def get_default_dir(device): """ Get the default directory of this device Args: Mandatory: device (`obj`) : Device object. Returns: default_dir (`str`): Default directory of the system Raises: Exception Example: >>> get_default_dir(device=device) """ try: lookup = Lookup.from_device(device) parsed_dict = lookup.parser.show_platform.Dir(device=device).parse() if ":" in parsed_dict['dir']['dir_name']: default_dir = parsed_dict['dir']['dir_name'] else: default_dir = '' except SchemaEmptyParserError as e: raise Exception("No output when executing 'dir' command") from e except Exception as e: raise Exception("Unable to execute 'dir' command") from e # Return default_dir to caller log.info("Default directory on '{d}' is '{dir}'".format(d=device.name, dir=default_dir)) return default_dir def configure_replace(device, file_location, timeout=60, file_name=None): """Configure replace on device Args: device (`obj`): Device object file_location (`str`): File location timeout (`int`): Timeout value in seconds file_name (`str`): File name Returns: None Raises: pyATS Results """ if file_name: file_location = '{}{}'.format( file_location, file_name) try: # check if file exist device.execute.error_pattern.append('.*Path does not exist.*') device.execute("dir {}".format(file_location)) except Exception: raise Exception("File {} does not exist".format(file_location)) dialog = Dialog([ Statement(pattern=r'\[no\]', action='sendline(y)', loop_continue=True, continue_timer=False)]) device.configure("load {}\ncommit replace".format(file_location), timeout=timeout, reply=dialog)
26.042017
77
0.601162
350
3,099
5.202857
0.382857
0.043932
0.039539
0.034596
0.066996
0.04503
0.04503
0
0
0
0
0.00554
0.301065
3,099
118
78
26.262712
0.83518
0.317844
0
0.088889
0
0
0.125527
0
0
0
0
0
0
1
0.066667
false
0
0.133333
0
0.222222
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
9d4487b1ae1496a3f2089388dee11fd461798de0
2,933
py
Python
whisper_scalability/plot.py
Evalir/research
0128cdc7c3cecaad4cc057886fd84e79b78f6b9c
[ "MIT" ]
42
2019-08-03T18:04:47.000Z
2022-02-28T14:24:56.000Z
whisper_scalability/plot.py
Evalir/research
0128cdc7c3cecaad4cc057886fd84e79b78f6b9c
[ "MIT" ]
88
2019-10-03T23:11:12.000Z
2022-03-30T05:28:44.000Z
whisper_scalability/plot.py
Evalir/research
0128cdc7c3cecaad4cc057886fd84e79b78f6b9c
[ "MIT" ]
3
2019-09-03T17:19:39.000Z
2021-12-27T16:53:44.000Z
import matplotlib.pyplot as plt import numpy as np from labellines import labelLines # # Trying to get interpolation to work but getting error: # # ValueError: The number of derivatives at boundaries does not match: expected 1, got 0+0 # from scipy.interpolate import make_interp_spline, BSpline # n_users = np.array([100, 10000, 1000000]) # bw_case8 = np.array([1, 1.5, 98.1]) # # 300 represents number of points to make between T.min and T.max # n_users_new = np.linspace(n_users.min(), n_users.max(), 300) # spl8 = make_interp_spline(n_users, bw_case8, k=3) # type: BSpline # bw_case8_smooth = spl8(n_users_new) # plt.plot(n_users_new, bw_case8_smooth, label='case 8', linewidth=2) n_users = [100, 10000, 1000000] bw_case1 = [1, 1, 1] bw_case2 = [97.7, 9.5*1000, 935.7*1000] bw_case3 = [49.3, 4.*10008, 476.8*1000] bw_case4 = [1, 1.5, 98.1] bw_case5 = [10.7, 978, 95.5*1000] bw_case6 = [21.5, 1.9*1000, 190.9*1000] bw_case7 = [3.9, 284.8, 27.8*1000] bw_case8 = [1, 1.5, 98.1] plt.xlim(100, 10**6) plt.ylim(1, 10**6) plt.plot(n_users, bw_case1, label='case 1', linewidth=4, linestyle='dashed') plt.plot(n_users, bw_case2, label='case 2', linewidth=4, linestyle='dashed') plt.plot(n_users, bw_case3, label='case 3', linewidth=4, linestyle='dashed') plt.plot(n_users, bw_case4, label='case 4', linewidth=4, linestyle='dashed') plt.plot(n_users, bw_case5, label='case 5', linewidth=4) plt.plot(n_users, bw_case6, label='case 6', linewidth=4) plt.plot(n_users, bw_case7, label='case 7', linewidth=4) plt.plot(n_users, bw_case8, label='case 8', linewidth=4) #labelLines(plt.gca().get_lines(),zorder=0) case1 = "Case 1. Only receiving messages meant for you [naive case]" case2 = "Case 2. Receiving messages for everyone [naive case]" case3 = "Case 3. All private messages go over one discovery topic [naive case]" case4 = "Case 4. All private messages partitioned into shards [naive case]" case5 = "Case 5. Case 4 + All messages passed through bloom filter" case6 = "Case 6. Case 5 + Benign duplicate receives" case7 = "Case 7. Case 6 + Mailserver case under good conditions with small bloom fp and mostly offline" case8 = "Case 8. Waku - No metadata protection with bloom filter and one node connected; static shard" plt.xlabel('number of users (log)') plt.ylabel('mb/day (log)') plt.legend([case1, case2, case3, case4, case5, case6, case7, case8], loc='upper left') plt.xscale('log') plt.yscale('log') plt.axhspan(0, 10, facecolor='0.2', alpha=0.2, color='blue') plt.axhspan(10, 30, facecolor='0.2', alpha=0.2, color='green') plt.axhspan(30, 100, facecolor='0.2', alpha=0.2, color='orange') plt.axhspan(100, 10**6, facecolor='0.2', alpha=0.2, color='red') #plt.axvspan(0, 10**2+3, facecolor='0.2', alpha=0.5) #plt.axvspan(10**4, 10**4+10**2, facecolor='0.2', alpha=0.5) #plt.axvspan(10**6, 10**6+10**4, facecolor='0.2', alpha=0.5) #for i in range(0, 5): # plt.axhspan(i, i+.2, facecolor='0.2', alpha=0.5) plt.show()
41.309859
103
0.703034
535
2,933
3.768224
0.31028
0.047619
0.035714
0.058036
0.228671
0.212302
0.203373
0.120536
0.109127
0
0
0.114196
0.128196
2,933
70
104
41.9
0.674228
0.292874
0
0
0
0
0.330897
0
0
0
0
0
0
1
0
false
0.025
0.075
0
0.075
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
9d44910e8c82debe9ba07f0a00ed736a65d972a9
2,000
py
Python
polydomino/search.py
PsiACE/polydomino
ade7cdb303cb4073d8c075659a5494392d31f8b4
[ "MIT" ]
null
null
null
polydomino/search.py
PsiACE/polydomino
ade7cdb303cb4073d8c075659a5494392d31f8b4
[ "MIT" ]
null
null
null
polydomino/search.py
PsiACE/polydomino
ade7cdb303cb4073d8c075659a5494392d31f8b4
[ "MIT" ]
null
null
null
# import the necessary packages import argparse import cv2 import numpy as np from polydomino.colordescriptor import ColorDescriptor from polydomino.searcher import Searcher # construct the argument parser and parse the arguments ap = argparse.ArgumentParser() ap.add_argument( "-i", "--index", required=True, help="Path to where the computed index will be stored", ) ap.add_argument("-q", "--query", required=True, help="Path to the query image") ap.add_argument( "-fm", "--features", required=True, help="Method to get features of pics", ) ap.add_argument( "-sm", "--searcher", required=True, help="Method to search pics", ) # ap.add_argument("-r", "--result-path", required=True, help="Path to the result path") args = vars(ap.parse_args()) # initialize the image descriptor cd = ColorDescriptor((8, 12, 3)) # load the query image and describe it query = cv2.imread(args["query"]) if args["features"] == "color-moments": features = cd.color_moments(query) elif args["features"] == "hsv-describe": features = cd.hsv_describe(query) elif args["features"] == "gray-matrix": features = cd.gray_matrix(query) elif args["features"] == "humoments": features = cd.humoments(query) elif args["features"] == "ahash": features = cd.ahash(query) elif args["features"] == "phash": features = cd.phash(query) elif args["features"] == "dhash": features = cd.dhash(query) elif args["features"] == "mse": features = cd.mse(query) elif args["features"] == "hog": features = cd.hog(query) else: print("Sorry, we don't support this method.") exit(1) # perform the search method = args["searcher"] searcher = Searcher(args["index"]) results = searcher.search(features, method) print(results) # display the query cv2.namedWindow("Query", 0) cv2.resizeWindow("Query", 640, 480) cv2.imshow("Query", query) # loop over the results for (score, resultID) in results: result = cv2.imread(resultID) cv2.imshow("Result", result) cv2.waitKey(0)
30.30303
87
0.6935
271
2,000
5.084871
0.361624
0.078374
0.075472
0.121916
0.087083
0.036284
0
0
0
0
0
0.012382
0.152
2,000
65
88
30.769231
0.800118
0.1485
0
0.055556
0
0
0.223141
0
0
0
0
0
0
1
0
false
0
0.092593
0
0.092593
0.037037
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
9d451d7664d2140e40043248faa30a6b327e59ee
2,880
py
Python
optimism/test/testMinimizeScalar.py
btalamini/optimism
023e1b2a0b137900a7517e4c7ac5056255cf7bbe
[ "MIT" ]
null
null
null
optimism/test/testMinimizeScalar.py
btalamini/optimism
023e1b2a0b137900a7517e4c7ac5056255cf7bbe
[ "MIT" ]
1
2022-03-12T00:01:12.000Z
2022-03-12T00:01:12.000Z
optimism/test/testMinimizeScalar.py
btalamini/optimism
023e1b2a0b137900a7517e4c7ac5056255cf7bbe
[ "MIT" ]
3
2021-12-23T19:53:31.000Z
2022-03-27T23:12:03.000Z
from optimism.JaxConfig import * from optimism import MinimizeScalar from optimism.test import TestFixture from optimism.material import J2Plastic def f(x): return 0.25*x**4 - 50.0*x**2 + 2.0 df = jacfwd(f) class TestMinimizeScalarFixture(TestFixture.TestFixture): def setUp(self): self.minimize_scalar_jitted = jit(MinimizeScalar.minimize_scalar, static_argnums=(0,4)) def test_solves_quadratic_problem_in_one_iteration(self): f = lambda x: x*x x0 = 3.5 settings = MinimizeScalar.get_settings(tol=1e-8, max_iters=1) x = MinimizeScalar.minimize_scalar(f, x0, diffArgs=tuple(), nondiffArgs=tuple(), settings=settings) self.assertNear(x, 0.0, 12) def test_does_not_converge_to_saddle_point(self): x0 = -0.001 settings = MinimizeScalar.get_settings(tol=1e-10, max_iters=30) x = MinimizeScalar.minimize_scalar(f, x0, diffArgs=tuple(), nondiffArgs=tuple(), settings=settings) r = np.abs(df(x)) self.assertLess(r, settings.tol) self.assertNear(x, -10.0, 9) def notest_jit(self): x0 = -0.001 settings = MinimizeScalar.get_settings(tol=1e-10, max_iters=30) x = self.minimize_scalar_jitted(f, x0, diffArgs=tuple(), nondiffArgs=tuple(), settings=settings) print("x={:1.13e}".format(x)) self.assertNear(x, -1.0, 9) def notest_grad(self): def g(x,c): return 0.25*x**4 - 0.5*(c*x)**2 + 2.0 c = -2.0 x0 = -3.0 settings = MinimizeScalar.get_settings(tol=1e-10, max_iters=30) x = MinimizeScalar.minimize_scalar(g, x0, diffArgs=(c,), nondiffArgs=tuple(), settings=settings) print("x={:1.13e}".format(x)) self.assertNear(x, c, 10) def notest_stiff_problem(self): E = 69.0 Y0 = 350.0 n = 3.0 eps0 = 1.0 e = 1.01*Y0/E def Wp(ep): w = np.where(ep > 0.0, Y0*ep + Y0*eps0*n/(n + 1.0)*(ep/eps0)**(1+1/n), Y0*ep) return w W = lambda ep: 0.5*E*(e - ep)**2 + Wp(ep) settings = MinimizeScalar.get_settings(tol=1e-8*Y0, max_iters=30) ep = MinimizeScalar.minimize_scalar(W, 1e-15, diffArgs=tuple(), nondiffArgs=tuple(), settings=settings) print("ep = ", ep) yield_func = grad(W) print("r=", -yield_func(ep)) if __name__ == '__main__': TestFixture.unittest.main()
34.698795
95
0.515278
351
2,880
4.096866
0.264957
0.06815
0.097357
0.114743
0.450626
0.435327
0.435327
0.346314
0.335188
0.335188
0
0.065466
0.363542
2,880
82
96
35.121951
0.71904
0
0
0.265625
0
0
0.012153
0
0
0
0
0
0.078125
1
0.140625
false
0
0.0625
0.03125
0.234375
0.0625
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
9d46c2badf319d174f35513f77f2237bac4308e9
2,709
py
Python
anima/ui/review_dialog.py
MehmetErer/anima
f92ae599b5a4c181fc8e131a9ccdde537e635303
[ "MIT" ]
101
2015-02-08T22:20:11.000Z
2022-03-21T18:56:42.000Z
anima/ui/review_dialog.py
MehmetErer/anima
f92ae599b5a4c181fc8e131a9ccdde537e635303
[ "MIT" ]
23
2016-11-30T08:33:21.000Z
2021-01-26T12:11:12.000Z
anima/ui/review_dialog.py
MehmetErer/anima
f92ae599b5a4c181fc8e131a9ccdde537e635303
[ "MIT" ]
27
2015-01-03T06:49:45.000Z
2021-12-28T03:30:54.000Z
# -*- coding: utf-8 -*- """ import datetime from anima import defaults defaults.timing_resolution = datetime.timedelta(minutes=10) from anima.ui import SET_PYSIDE2 SET_PYSIDE2() from anima.ui.widgets.review import APPROVE, REQUEST_REVISION from anima.ui import review_dialog review_dialog.UI(review_type=REQUEST_REVISION) """ from anima.ui.lib import QtCore, QtWidgets from anima.ui.base import ui_caller, AnimaDialogBase def UI(app_in=None, executor=None, **kwargs): """ :param app_in: A Qt Application instance, which you can pass to let the UI be attached to the given applications event process. :param executor: Instead of calling app.exec_ the UI will call this given function. It also passes the created app instance to this executor. """ return ui_caller(app_in, executor, ReviewDialog, **kwargs) class ReviewDialog(QtWidgets.QDialog, AnimaDialogBase): """review dialog """ def __init__(self, task=None, reviewer=None, review_type=None, parent=None): super(ReviewDialog, self).__init__(parent=parent) self.task = task self.reviewer = reviewer self.review_type = review_type self.main_layout = None self.button_box = None self._setup_ui() def _setup_ui(self): """set up the ui elements """ self.setWindowTitle("Review Dialog") self.resize(550, 350) self.main_layout = QtWidgets.QVBoxLayout(self) # Review from anima.ui.widgets.review import ReviewWidget self.review_widget = ReviewWidget( parent=self, task=self.task, reviewer=self.reviewer, review_type=self.review_type, ) self.main_layout.addWidget(self.review_widget) # Button Box self.button_box = QtWidgets.QDialogButtonBox(self) self.button_box.setOrientation(QtCore.Qt.Horizontal) self.button_box.setStandardButtons( QtWidgets.QDialogButtonBox.Cancel | QtWidgets.QDialogButtonBox.Ok ) self.main_layout.addWidget(self.button_box) # setup signals from functools import partial self.button_box.accepted.connect(partial(self.accept)) self.button_box.rejected.connect(partial(self.reject)) def accept(self): """runs when the dialog is accepted """ # finalize the review review = self.review_widget.finalize_review() if review: QtWidgets.QMessageBox.information( self, "Success", "Review is created!" ) # do the default behaviour super(ReviewDialog, self).accept()
29.769231
80
0.655592
316
2,709
5.474684
0.363924
0.041619
0.052601
0.019653
0.115607
0.034682
0
0
0
0
0
0.005467
0.257291
2,709
90
81
30.1
0.854374
0.282023
0
0
0
0
0.020116
0
0
0
0
0
0
1
0.090909
false
0
0.090909
0
0.227273
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
9d47cbe33f2156eddf7fcd553e506425ed8d1607
12,737
py
Python
squares/dsl/interpreter.py
Vivokas20/SKEL
d8766ceaa8aa766ea3580bbb61b747572ebfe77c
[ "Apache-2.0" ]
1
2022-01-20T14:57:30.000Z
2022-01-20T14:57:30.000Z
squares/dsl/interpreter.py
Vivokas20/SKEL
d8766ceaa8aa766ea3580bbb61b747572ebfe77c
[ "Apache-2.0" ]
null
null
null
squares/dsl/interpreter.py
Vivokas20/SKEL
d8766ceaa8aa766ea3580bbb61b747572ebfe77c
[ "Apache-2.0" ]
null
null
null
import math import re from itertools import permutations from logging import getLogger from typing import Tuple, Union from rpy2 import robjects from rpy2.rinterface_lib.embedded import RRuntimeError from z3 import BitVecVal from .. import util, results from ..decider import RowNumberInfo from ..program import LineInterpreter from ..tyrell.interpreter import InterpreterError logger = getLogger('squares.interpreter') def get_type(df, index): _script = f'sapply({df}, class)[{index}]' ret_val = robjects.r(_script) return ret_val[0] class RedudantError(InterpreterError): def __init__(self, *args): pass def add_quotes(string: str) -> str: new_string = "" if string != '': string = string.replace(" ", "").replace("\"", "").replace("'","").split(",") for s in string: if "=" in s: new = s.split("=") new_string += "'" + new[0] + "'" + " = " + "'" + new[1] + "'" + "," else: new_string += "'" + s + "'" + "," new_string = new_string[:-1] return new_string def eval_decorator(func): def wrapper(self, args, key): if key and not self.final_interpretation and util.get_config().cache_ops: if not key in self.cache: name = util.get_fresh_name() self.try_execute(func(self, name, args)) # if robjects.r(f'all_equal({name}, {args[0]}, convert=T, ignore_row_order=T)')[0] is True: # results.redundant_lines += 1 # raise RedudantError() self.cache[key] = name return self.cache[key] name = util.get_fresh_name() script = func(self, name, args) if self.final_interpretation: self.program += script self.try_execute(script) return name return wrapper class SquaresInterpreter(LineInterpreter): def __init__(self, problem, final_interpretation=False): self.problem = problem self.program = '' self.final_interpretation = final_interpretation self.cache = {} def try_execute(self, script): try: # print("SCRIPT_EXEC") # print(script, end='') robjects.r(script) except (Exception, RRuntimeError) as e: # logger.error("Error while evaluating program") # logger.error("%s", str(e)) raise InterpreterError(e) @eval_decorator def eval_filter(self, name, args): return f'{name} <- {args[0]} %>% filter({args[1]})\n' @eval_decorator def eval_filters(self, name, args): return f'{name} <- {args[0]} %>% filter({args[1]} {args[3]} {args[2]})\n' @eval_decorator def eval_summarise(self, name, args): if args[2]: args2 = args[2].replace("'", "") else: args2 = args[2] re_object = re.fullmatch(r'([A-Za-z_]+)\$([A-Za-z_]+)', args[1]) if re_object: return f'{name} <- {args[0]} %>% group_by({args2}) %>% summarise_{re_object.groups()[0]}({re_object.groups()[1]}) %>% ungroup()\n' else: return f'{name} <- {args[0]} %>% group_by({args2}) %>% summarise({args[1]}) %>% ungroup()\n' @eval_decorator def eval_mutate(self, name, args): re_object = re.fullmatch(r'([A-Za-z_]+)\$([A-Za-z_]+)', args[1]) if re_object: return f'{name} <- {args[0]} %>% mutate_{re_object.groups()[0]}({re_object.groups()[1]})\n' else: return f'{name} <- {args[0]} %>% mutate({args[1]})\n' @eval_decorator def eval_inner_join(self, name, args): if args[2] and "'" not in args[2]: args2 = add_quotes(args[2]) else: args2 = args[2] _script = f"{name} <- inner_join({args[0]}, {args[1]}, by=c({args2}), suffix = c('', '.other'), na_matches='{util.get_config().na_matches}')" for pair in args2.split(','): if '=' in pair: A, B = pair.split('=') A = A.strip()[1:-1] B = B.strip()[1:-1] if A.strip() != B.strip(): _script += f' %>% mutate({B} = {A})' return _script + '\n' @eval_decorator def eval_natural_join(self, name, args): if robjects.r(f'length(intersect(colnames({args[0]}), colnames({args[1]})))')[0] > 0: return f'{name} <- inner_join({args[0]}, {args[1]}, na_matches="{util.get_config().na_matches}")\n' else: return f'{name} <- full_join({args[0]}, {args[1]}, by=character(), na_matches="{util.get_config().na_matches}")\n' @eval_decorator def eval_natural_join3(self, name, args): _script = f'{name} <- ' if robjects.r(f'length(intersect(colnames({args[0]}), colnames({args[1]})))')[0] > 0: _script += f'inner_join({args[0]}, {args[1]}, na_matches="{util.get_config().na_matches}") ' else: _script += f'full_join({args[0]}, {args[1]}, by=character(), na_matches="{util.get_config().na_matches}") ' if robjects.r(f'length(intersect(union(colnames({args[0]}), colnames({args[1]})), colnames({args[2]})))')[0] > 0: _script += f'%>% inner_join({args[2]}, na_matches="{util.get_config().na_matches}")\n' else: _script += f'%>% full_join({args[2]}, by=character(), na_matches="{util.get_config().na_matches}")\n' return _script @eval_decorator def eval_natural_join4(self, name, args): _script = f'{name} <- ' if robjects.r(f'length(intersect(colnames({args[0]}), colnames({args[1]})))')[0] > 0: _script += f'inner_join({args[0]}, {args[1]}, na_matches="{util.get_config().na_matches}") ' else: _script += f'full_join({args[0]}, {args[1]}, by=character(), na_matches="{util.get_config().na_matches}") ' if robjects.r(f'length(intersect(union(colnames({args[0]}), colnames({args[1]})), colnames({args[2]})))')[0] > 0: _script += f'%>% inner_join({args[2]}, na_matches="{util.get_config().na_matches}") ' else: _script += f'%>% full_join({args[2]}, by=character(), na_matches="{util.get_config().na_matches}") ' if robjects.r(f'length(intersect(union(union(colnames({args[0]}), colnames({args[1]})), colnames({args[2]})), colnames({args[3]})))')[0] > 0: _script += f'%>% inner_join({args[3]}, na_matches="{util.get_config().na_matches}")\n' else: _script += f'%>% full_join({args[3]}, by=character(), na_matches="{util.get_config().na_matches}")\n' return _script @eval_decorator def eval_anti_join(self, name, args): if args[2] and "'" not in args[2]: args2 = add_quotes(args[2]) else: args2 = args[2] return f'{name} <- anti_join({args[0]}, {args[1]}, by=c({args2}), na_matches="{util.get_config().na_matches}")\n' @eval_decorator def eval_left_join(self, name, args): return f'{name} <- left_join({args[0]}, {args[1]}, na_matches="{util.get_config().na_matches}")\n' @eval_decorator def eval_union(self, name, args): return f'{name} <- bind_rows({args[0]}, {args[1]})\n' @eval_decorator def eval_intersect(self, name, args): return f'{name} <- intersect(select({args[0]},{args[2]}), select({args[1]}, {args[2]}))\n' @eval_decorator def eval_semi_join(self, name, args): return f'{name} <- semi_join({args[0]}, {args[1]}, na_matches="{util.get_config().na_matches}")\n' @eval_decorator def eval_cross_join(self, name, args): _script = f'{name} <- full_join({args[0]}, {args[1]}, by=character(), suffix = c("", ".other"), na_matches="{util.get_config().na_matches}")' if args[2] != '': _script += f' %>% filter({args[2]})' return _script + '\n' @eval_decorator def eval_unite(self, name, args): return f'{name} <- unite({args[0]}, {args[1]}, {args[1]}, {args[2]}, sep=":", remove=F)\n' def apply_row(self, val): df = robjects.r(val) return df.nrow def apply_col(self, val): df = robjects.r(val) return df.ncol def apply_columns(self, val): a = list(robjects.r(f'colnames({val})')) bools = list(map(lambda c: c in a, self.problem.all_columns)) raise NotImplementedError() def equals(self, actual: str, expect: str, *args) -> Tuple[bool, float, Union[RowNumberInfo, None]]: if robjects.r(f'nrow({actual})')[0] == 0: results.empty_output += 1 # with rpy2.robjects.conversion.localconverter(robjects.default_converter + pandas2ri.converter): # print(robjects.conversion.rpy2py(robjects.r(actual))) score = robjects.r(f'ue <- {expect} %>% unlist %>% unique;length(intersect({actual} %>% unlist %>% unique, ue)) / length(ue)')[0] if math.isnan(score): score = 0 if not util.get_config().subsume_conditions and score < 1: return False, score, None sketch_cols = None sketch_distinct = None sketch_order = None if self.problem.sketch and self.problem.sketch.select: if "cols" in self.problem.sketch.select: sketch_cols = tuple(self.problem.sketch.select["cols"]) if "distinct" in self.problem.sketch.select: sketch_distinct = self.problem.sketch.select["distinct"] if "arrange" in self.problem.sketch.select: sketch_order = self.problem.sketch.select["arrange"] # The columns are already described in the output so we don't need to use them a_cols = list(robjects.r(f'colnames({actual})')) e_cols = list(robjects.r(f'colnames({expect})')) expected_n = int(robjects.r(f'nrow({expect})')[0]) result = None if sketch_cols: selected_columns = [sketch_cols] else: selected_columns = permutations(a_cols, len(e_cols)) for combination in selected_columns: for d in sketch_distinct if sketch_distinct is not None else ['', ' %>% distinct()']: _script = f'out <- {actual} %>% select({", ".join(map(lambda pair: f"{pair[0]} = {pair[1]}" if pair[0] != pair[1] else pair[0], zip(e_cols, combination)))}){d}' try: robjects.r(_script) if self.test_equality('out', expect, False): if self.final_interpretation: if sketch_order != []: # None implies that there is no sketch so it must be [] to ensure there is no order by if sketch_order: perms = sketch_order else: perms = util.get_permutations(e_cols, len(e_cols)) for perm in perms: name = util.get_fresh_name() new_script = f'{name} <- out %>% arrange({perm})' robjects.r(new_script) if self.test_equality(name, expect, True): _script += f' %>% arrange({perm})' break self.program += _script + '\n' return True, score, None except: continue finally: if util.get_config().subsume_conditions and result != RowNumberInfo.UNKNOWN: actual_n = int(robjects.r(f'nrow(out)')[0]) if actual_n > expected_n: if result is None or result == RowNumberInfo.LESS_ROWS: result = RowNumberInfo.LESS_ROWS else: result = RowNumberInfo.UNKNOWN if actual_n < expected_n: if result is None or result == RowNumberInfo.MORE_ROWS: result = RowNumberInfo.MORE_ROWS else: result = RowNumberInfo.UNKNOWN return False, score, result def test_equality(self, actual: str, expect: str, keep_order: bool = False) -> bool: if not keep_order: _script = f'all_equal({actual}, {expect}, convert=T)' else: _script = f'all_equal({actual}, {expect}, convert=T, ignore_row_order=T)' try: return robjects.r(_script)[0] is True except: return False
42.885522
176
0.544241
1,557
12,737
4.290944
0.143866
0.045802
0.038916
0.040712
0.502769
0.453076
0.39066
0.341416
0.304146
0.294866
0
0.015714
0.295517
12,737
296
177
43.030405
0.728853
0.045615
0
0.298755
0
0.053942
0.280303
0.132246
0.008299
0
0
0
0
1
0.112033
false
0.004149
0.049793
0.029046
0.294606
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
9d4857e094a5401228d6f2b6484e13982abb69b9
7,869
py
Python
src/data_preparation/process_airbnb_data.py
ejgenc/Data-Analysis_Istanbul-Health-Tourism
34b9838690ca640c6a7a60f63eb2f51983ec46ef
[ "MIT" ]
1
2020-11-18T15:27:53.000Z
2020-11-18T15:27:53.000Z
src/data_preparation/process_airbnb_data.py
ejgenc/Data-Analysis_Istanbul-Health-Tourism
34b9838690ca640c6a7a60f63eb2f51983ec46ef
[ "MIT" ]
null
null
null
src/data_preparation/process_airbnb_data.py
ejgenc/Data-Analysis_Istanbul-Health-Tourism
34b9838690ca640c6a7a60f63eb2f51983ec46ef
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- """ ------ What is this file? ------ This script targets the istanbul_airbnb_raw.csv file. It cleans the .csv file in order to prepare it for further analysis """ #%% --- Import Required Packages --- import os import pathlib from pathlib import Path # To wrap around filepaths import numpy as np import pandas as pd from matplotlib import pyplot as plt from scipy.stats import iqr from src.helper_functions.data_preparation_helper_functions import sample_and_read_from_df from src.helper_functions.data_preparation_helper_functions import report_null_values #%% --- Set proper directory to assure integration with doit --- abspath = os.path.abspath(__file__) dname = os.path.dirname(abspath) os.chdir(dname) #%% --- Import Data --- import_fp = Path("../../data/raw/istanbul_airbnb_raw.csv") airbnb = pd.read_csv(import_fp, encoding='utf-8-sig') #%% --- Get a general sense of the datasets --- # Shape of the data print(airbnb.shape) # 16251 rows, 16 cols # First few lines print(airbnb.head()) #Not much info, let's print the columns airbnb_columns = airbnb.columns #%% --- Clean the dataset: Relevant - Irrelevant Columns --- airbnb_unwanted_columns = ["neighbourhood_group", "last_review", "number_of_reviews", "minimum_nights", "reviews_per_month", "calculated_host_listings_count", "availability_365"] #Drop unwanted columns airbnb.drop(columns = airbnb_unwanted_columns, axis = 1, inplace = True) # Check shape now print(airbnb.shape) # 16251 rows, 9 cols #%% --- Clean the dataset: Further Troubleshooting --- #I want to be able to randomly take n samples from each dataset and then print them #on a clean format to see the potential problems #If i had something to test for, i'd strive for somewhat of a representative sample size #while sampling. However, i think the best to do here would be to print what i can read #because i don't have any computational measure to test for something: sample_and_read_from_df(airbnb, 20) #SPOTTED PROBLEMS: # dataframe airbnb column neigborhood is not properly formatted: # Formatting fixes # should actually be called "district_tr" # There should be an accompanying "district_eng" column. #%% --- Fix column naming --- #I can use either dataframe.columns attribute to assign new columns #or i can pass a dictionary with old names/new names into dataframe.rename() airbnb_columns_in_english = ["listing_id", "name", "host_id", "host_name", "district_eng", "latitude", "longitude", "room_type", "price"] airbnb.columns = airbnb_columns_in_english #%% --- One-off fix for districts named "Eyup" --- eyup_mask = airbnb.loc[:,"district_eng"] == "Eyup" airbnb.loc[eyup_mask, "district_eng"] = "Eyupsultan" #%% --- Add a new "district_tr" column airbnb.loc[:,"district_tr"] = airbnb.loc[:,"district_eng"].str.lower().str.capitalize() #I will be using df.map() method, so i'll need two dataframes: one for existing values - tr values #and one for exixsting values - eng values unique_districts_tr_corrected = ["Kadıköy", "Fatih", "Tuzla", "Gaziosmanpaşa", "Üsküdar", "Adalar", "Sarıyer", "Arnavutköy", "Silivri", "Çatalca", "Küçükçekmece", "Beyoğlu", "Şile", "Kartal", "Şişli", "Beşiktaş", "Kağıthane", "Esenyurt", "Bahçelievler", "Avcılar", "Başakşehir", "Sultangazi", "Maltepe", "Sancaktepe", "Beykoz", "Büyükçekmece", "Bakırköy", "Pendik", "Bağcılar", "Esenler", "Beylikdüzü", "Ümraniye", "Eyüpsultan", "Çekmeköy", "Ataşehir", "Sultanbeyli", "Zeytinburnu", "Güngören", "Bayrampaşa"] unique_districts_eng_corrected = ["Kadikoy", "Fatih", "Tuzla", "Gaziosmanpasa", "Uskudar", "Adalar", "Sariyer", "Arnavutkoy", "Silivri", "Catalca", "Kucukcekmece", "Beyoglu", "Sile", "Kartal", "Sisli", "Besiktas", "Kagithane", "Esenyurt", "Bahcelievler", "Avcilar", "Basaksehir", "Sultangazi", "Maltepe", "Sancaktepe", "Beykoz", "Buyukcekmece", "Bakirkoy", "Pendik", "Bagcilar", "Esenler", "Beylikduzu", "Umraniye", "Eyupsultan", "Cekmekoy", "Atasehir", "Sultanbeyli", "Zeytinburnu", "Gungoren", "Bayrampasa"] airbnb_unique_districts_dict_tr = dict(zip(unique_districts_eng_corrected, unique_districts_tr_corrected)) airbnb.loc[:,"district_tr"] = airbnb.loc[:,"district_tr"].map(airbnb_unique_districts_dict_tr) #%% --- EDA: Explore Missing Values --- #Let's check null values first null_report = report_null_values(airbnb) #We have so few missing values, dropping them won't affect our quality at all. # Let's do exactly that. airbnb.dropna(axis = 0, inplace = True) #%% --- EDA: Explore Datatype agreement --- #Now, let's check data type agreement for each column. data_types = airbnb.dtypes # The data types with "object" warrant further investigation #They could just be strings, but mixed data types also show as "object" # Let's select "object" data types and query once again. airbnb_dtype_object_only = airbnb.select_dtypes(include = ["object"]) print(airbnb_dtype_object_only.columns) #As all the column names seem to accomodate only strings, we can be #pretty sure that showing up as object is correct behavior. #%% --- EDA - Explore Outliers in price --- fig = plt.figure(figsize = (19.20, 10.80)) ax = fig.add_subplot(1,1,1) ax.hist(x = airbnb.loc[:,"price"], bins = 20) #Our histogram is very wonky. It's obvious that there are some issues. Let's see: # It doesn't make sense for a airbnb room to cost 0 liras. That's for sure. print(airbnb.loc[:,"price"].sort_values().head(20)) #What about maxes? print(airbnb.loc[:,"price"].sort_values(ascending = False).head(30)) #There are some very high maxes, that's for sure. Let's try to make heads and tails of #what these houses are: possible_outliers = airbnb.sort_values(by = "price", axis = 0, ascending = False).head(30) # A qualitative analysis of such houses show that there really aappears to be a problem #with pricing. Let's calculate the IQR to drop the outliers: #Calculate the iqr price_iqr = iqr(airbnb.loc[:,"price"], axis = 0) #Calculate q3 and q1 q1 = airbnb["price"].quantile(0.25) q3 = airbnb["price"].quantile(0.75) #Create min and max mask min_mask = airbnb.loc[:,"price"] >= q1 - (1.5 * price_iqr) max_mask = airbnb.loc[:,"price"] <= q3 + (1.5 * price_iqr) #Combine masks combined_mask = min_mask & max_mask #Create subset airbnb_within_iqr = airbnb.loc[combined_mask] fig = plt.figure(figsize = (19.20, 10.80)) ax = fig.add_subplot(1,1,1) ax.hist(x = airbnb_within_iqr.loc[:,"price"], bins = 20) #Alright, limiting our data to an IQR appears to omit a whole lot of data. #I am sure that some of the outliers we have are errors of entry. #However, the only ones that we can conclusively prove are the entries that are rated at 0. #We'll drop these #Create a mask for zeros zero_mask = (airbnb.loc[:,"price"] > 0) #Filter using the mask airbnb = airbnb.loc[zero_mask,:] # #%% --- Export Data --- export_fp = Path("../../data/processed/istanbul_airbnb_processed.csv") airbnb.to_csv(export_fp, encoding='utf-8-sig', index = False)
38.199029
106
0.641632
1,019
7,869
4.83317
0.409225
0.027411
0.019898
0.011574
0.109239
0.073503
0.061726
0.047107
0.047107
0.023553
0
0.01269
0.238912
7,869
206
107
38.199029
0.809651
0.38747
0
0.137931
0
0
0.228842
0.024842
0
0
0
0
0
1
0
false
0
0.126437
0
0.126437
0.068966
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
9d4ac45e3a86ef95dc9b84f578aa4f83f679c9b6
3,695
py
Python
py/shure.py
dman776/micboard
166987dfad529dc35654f402fdbbde7f16b60f77
[ "MIT" ]
44
2019-08-30T02:51:59.000Z
2022-03-15T13:47:18.000Z
py/shure.py
dman776/micboard
166987dfad529dc35654f402fdbbde7f16b60f77
[ "MIT" ]
21
2019-09-01T16:17:22.000Z
2022-02-01T15:47:55.000Z
py/shure.py
dman776/micboard
166987dfad529dc35654f402fdbbde7f16b60f77
[ "MIT" ]
16
2019-09-01T01:40:09.000Z
2022-03-15T17:12:28.000Z
import time import select import queue import atexit import sys import logging from networkdevice import ShureNetworkDevice from channel import chart_update_list, data_update_list # from mic import WirelessMic # from iem import IEM NetworkDevices = [] DeviceMessageQueue = queue.Queue() def get_network_device_by_ip(ip): return next((x for x in NetworkDevices if x.ip == ip), None) def get_network_device_by_slot(slot): for networkdevice in NetworkDevices: for channel in networkdevice.channels: if channel.slot == slot: return channel def check_add_network_device(ip, type): net = get_network_device_by_ip(ip) if net: return net net = ShureNetworkDevice(ip, type) NetworkDevices.append(net) return net def watchdog_monitor(): for rx in (rx for rx in NetworkDevices if rx.rx_com_status == 'CONNECTED'): if (int(time.perf_counter()) - rx.socket_watchdog) > 5: logging.debug('disconnected from: %s', rx.ip) rx.socket_disconnect() for rx in (rx for rx in NetworkDevices if rx.rx_com_status == 'CONNECTING'): if (int(time.perf_counter()) - rx.socket_watchdog) > 2: rx.socket_disconnect() for rx in (rx for rx in NetworkDevices if rx.rx_com_status == 'DISCONNECTED'): if (int(time.perf_counter()) - rx.socket_watchdog) > 20: rx.socket_connect() def WirelessQueryQueue(): while True: for rx in (rx for rx in NetworkDevices if rx.rx_com_status == 'CONNECTED'): strings = rx.get_query_strings() for string in strings: rx.writeQueue.put(string) time.sleep(10) def ProcessRXMessageQueue(): while True: rx, msg = DeviceMessageQueue.get() rx.parse_raw_rx(msg) def SocketService(): for rx in NetworkDevices: rx.socket_connect() while True: watchdog_monitor() readrx = [rx for rx in NetworkDevices if rx.rx_com_status in ['CONNECTING', 'CONNECTED']] writerx = [rx for rx in readrx if not rx.writeQueue.empty()] read_socks, write_socks, error_socks = select.select(readrx, writerx, readrx, .2) for rx in read_socks: try: data = rx.f.recv(1024).decode('UTF-8') except: rx.socket_disconnect() break # print("read: {} data: {}".format(rx.ip,data)) d = '>' if rx.type == 'uhfr': d = '*' data = [e+d for e in data.split(d) if e] for line in data: # rx.parse_raw_rx(line) DeviceMessageQueue.put((rx, line)) rx.socket_watchdog = int(time.perf_counter()) rx.set_rx_com_status('CONNECTED') for rx in write_socks: string = rx.writeQueue.get() logging.debug("write: %s data: %s", rx.ip, string) try: if rx.type in ['qlxd', 'ulxd', 'axtd', 'p10t']: rx.f.sendall(bytearray(string, 'UTF-8')) elif rx.type == 'uhfr': rx.f.sendto(bytearray(string, 'UTF-8'), (rx.ip, 2202)) except: logging.warning("TX ERROR IP: %s String: %s", rx.ip, string) for rx in error_socks: rx.set_rx_com_status('DISCONNECTED') # @atexit.register def on_exit(): connected = [rx for rx in NetworkDevices if rx.rx_com_status == 'CONNECTED'] for rx in connected: rx.disable_metering() time.sleep(50) print("IT DONE!") sys.exit(0) # atexit.register(on_exit) # signal.signal(signal.SIGTERM, on_exit) # signal.signal(signal.SIGINT, on_exit)
29.56
97
0.603518
479
3,695
4.511482
0.254697
0.03702
0.051828
0.029153
0.285053
0.233688
0.213327
0.197594
0.147617
0.147617
0
0.008732
0.287145
3,695
124
98
29.798387
0.811693
0.063329
0
0.183908
0
0
0.058806
0
0
0
0
0
0
1
0.091954
false
0
0.091954
0.011494
0.229885
0.011494
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
9d50b18aa63e6f3b4b6406ced31f91d878b8ae26
773
py
Python
e_vae_proj/qualitative/mnist/btcvae/gen_train.py
kuangdai/disentangling-vae
9a5f9da44a82a2c643b7289c4945320621b86247
[ "MIT" ]
1
2021-06-30T08:58:49.000Z
2021-06-30T08:58:49.000Z
e_vae_proj/qualitative/mnist/btcvae/gen_train.py
kuangdai/disentangling-vae
9a5f9da44a82a2c643b7289c4945320621b86247
[ "MIT" ]
null
null
null
e_vae_proj/qualitative/mnist/btcvae/gen_train.py
kuangdai/disentangling-vae
9a5f9da44a82a2c643b7289c4945320621b86247
[ "MIT" ]
null
null
null
import numpy as np from pathlib import Path import sys if __name__ == '__main__': # absolute path my_path = Path(__file__).parent.resolve().expanduser() main_path = my_path.parent.parent seed = 0 nlat = 10 alpha = 1.0 beta = 6.0 gamma = 1.0 epochs = 100 # cmd template cmd = f'python main.py btcvae_mnist_{epochs}ep/z{nlat}_a{alpha}_b{beta}_g{gamma}_s{seed} -s {seed} ' \ f'--checkpoint-every 25 -d mnist -e {epochs} -b 64 --lr 0.0005 ' \ f'-z {nlat} -l btcvae --btcvae-A {alpha} --btcvae-B {beta} --btcvae-G {gamma} ' \ f'--no-test\n' with open(my_path / f'train_beta{beta}.sh', 'w') as f: unnormalized_beta = beta * nlat f.write(cmd)
28.62963
107
0.564036
116
773
3.551724
0.517241
0.043689
0.048544
0
0
0
0
0
0
0
0
0.038532
0.294955
773
26
108
29.730769
0.717431
0.033635
0
0
0
0.157895
0.371866
0.090529
0
0
0
0
0
1
0
false
0
0.157895
0
0.157895
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
9d5197f8d1796538860fe2f3fb98a1af46c8ef38
3,331
py
Python
tests/test_load.py
tom3131/simfin
8ef5a2b0dd67ddcd3f8b92b5cd45c1a483eeada1
[ "MIT" ]
231
2019-09-25T13:30:00.000Z
2022-03-26T08:00:47.000Z
tests/test_load.py
tom3131/simfin
8ef5a2b0dd67ddcd3f8b92b5cd45c1a483eeada1
[ "MIT" ]
11
2019-10-01T14:50:15.000Z
2022-02-23T10:35:47.000Z
tests/test_load.py
tom3131/simfin
8ef5a2b0dd67ddcd3f8b92b5cd45c1a483eeada1
[ "MIT" ]
36
2019-09-30T16:14:48.000Z
2022-03-19T19:59:30.000Z
########################################################################## # # Unit tests (pytest) for load.py # ########################################################################## # SimFin - Simple financial data for Python. # www.simfin.com - www.github.com/simfin/simfin # See README.md for instructions and LICENSE.txt for license details. ########################################################################## import simfin as sf from simfin.datasets import iter_all_datasets ########################################################################## # Test configuration. # Set data directory. sf.set_data_dir(data_dir='~/simfin_data/') # Load API key or use default 'free' if key-file doesn't exist. sf.load_api_key(path='~/simfin_api_key.txt', default_key='free') # Set number of days before refreshing data from SimFin server. refresh_days = 30 ########################################################################## # Helper functions. def _create_kwargs(variant, market): """ Create a dict with keyword args for sf.load() functions that take variant, market and refresh_days as kwargs. """ kwargs = \ { 'variant': variant, 'market': market, 'refresh_days': refresh_days, } return kwargs ########################################################################## # Test functions. def test_load(): """Test simfin.bulk.load()""" for dataset, variant, market in iter_all_datasets(): sf.load(dataset=dataset, variant=variant, market=market, refresh_days=refresh_days) def test_load_income(): """Test simfin.bulk.load_income()""" for dataset, variant, market in iter_all_datasets(datasets='income'): kwargs = _create_kwargs(variant=variant, market=market) sf.load_income(**kwargs) sf.load_income_banks(**kwargs) sf.load_income_insurance(**kwargs) def test_load_balance(): """Test simfin.bulk.load_balance()""" for dataset, variant, market in iter_all_datasets(datasets='balance'): kwargs = _create_kwargs(variant=variant, market=market) sf.load_balance(**kwargs) sf.load_balance_banks(**kwargs) sf.load_balance_insurance(**kwargs) def test_load_cashflow(): """Test simfin.bulk.load_cashflow()""" for dataset, variant, market in iter_all_datasets(datasets='cashflow'): kwargs = _create_kwargs(variant=variant, market=market) sf.load_cashflow(**kwargs) sf.load_cashflow_banks(**kwargs) sf.load_cashflow_insurance(**kwargs) def test_load_shareprices(): """Test simfin.bulk.load_shareprices()""" for dataset, variant, market in iter_all_datasets(datasets='shareprices'): kwargs = _create_kwargs(variant=variant, market=market) sf.load_shareprices(**kwargs) def test_load_companies(): """Test simfin.bulk.load_companies()""" for dataset, variant, market in iter_all_datasets(datasets='companies'): kwargs = _create_kwargs(variant=variant, market=market) sf.load_companies(**kwargs) def test_load_industries(): """Test simfin.bulk.load_industries()""" sf.load_industries(refresh_days=refresh_days) ##########################################################################
31.424528
78
0.576403
355
3,331
5.183099
0.225352
0.048913
0.057065
0.098913
0.38587
0.340217
0.340217
0.340217
0.266304
0
0
0.000723
0.169919
3,331
105
79
31.72381
0.664738
0.216752
0
0.106383
0
0
0.051358
0
0
0
0
0
0
1
0.170213
false
0
0.042553
0
0.234043
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
9d556827bb836c6e6f6530ec156f0777935a5dea
1,514
py
Python
async_nbgrader/apps/exportapp.py
IllumiDesk/async_nbgrader
427e1b634277c043a1ed9f00bf7e417e0f611aca
[ "Apache-2.0" ]
2
2021-06-23T17:58:22.000Z
2021-09-27T10:00:01.000Z
async_nbgrader/apps/exportapp.py
IllumiDesk/async-nbgrader
427e1b634277c043a1ed9f00bf7e417e0f611aca
[ "Apache-2.0" ]
6
2021-06-17T21:40:24.000Z
2021-11-11T17:48:15.000Z
async_nbgrader/apps/exportapp.py
IllumiDesk/async-nbgrader
427e1b634277c043a1ed9f00bf7e417e0f611aca
[ "Apache-2.0" ]
2
2021-06-10T18:16:22.000Z
2021-06-17T02:52:45.000Z
# coding: utf-8 from nbgrader.api import Gradebook from nbgrader.apps import ExportApp as BaseExportApp from traitlets import Instance from traitlets import Type from traitlets import default from ..plugins import CanvasCsvExportPlugin from ..plugins import CustomExportPlugin aliases = { "log-level": "Application.log_level", "db": "CourseDirectory.db_url", "to": "CanvasCsvExportPlugin.to", "canvas_import": "CanvasCsvExportPlugin.canvas_import", "exporter": "ExportApp.plugin_class", "assignment": "CanvasCsvExportPlugin.assignment", "student": "CanvasCsvExportPlugin.student", "course": "CourseDirectory.course_id", } flags = {} class ExportApp(BaseExportApp): """Custom nbgrader export app to export grades from a Canvas LMS course. """ name = "async_nbgrader-export" aliases = aliases plugin_class = Type( CanvasCsvExportPlugin, klass=CustomExportPlugin, help="The plugin class for exporting the grades.", ).tag(config=True) plugin_inst = Instance(CustomExportPlugin).tag(config=False) @default("classes") def _classes_default(self): classes = super(ExportApp, self)._classes_default() classes.append(ExportApp) classes.append(CustomExportPlugin) return classes def start(self): super(ExportApp, self).start() self.init_plugin() with Gradebook(self.coursedir.db_url, self.coursedir.course_id) as gb: self.plugin_inst.export(gb)
27.527273
78
0.703435
160
1,514
6.55
0.40625
0.037214
0.054389
0
0
0
0
0
0
0
0
0.00082
0.194848
1,514
54
79
28.037037
0.858901
0.055482
0
0
0
0
0.238163
0.163251
0
0
0
0
0
1
0.052632
false
0
0.210526
0
0.421053
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
9d5735cba5c6faf4bc0915b6d346541d85cbb4ac
15,960
py
Python
torsion/model/symmetry_function.py
hnlab/TorsionNet
e81ab624f1340765345b34240a049a8cc5f4d581
[ "MIT" ]
15
2021-01-15T01:54:26.000Z
2022-03-31T16:00:52.000Z
torsion/model/symmetry_function.py
hnlab/TorsionNet
e81ab624f1340765345b34240a049a8cc5f4d581
[ "MIT" ]
2
2021-07-21T22:42:09.000Z
2021-11-22T06:39:20.000Z
torsion/model/symmetry_function.py
hnlab/TorsionNet
e81ab624f1340765345b34240a049a8cc5f4d581
[ "MIT" ]
6
2021-01-16T04:07:17.000Z
2022-02-23T02:11:49.000Z
import math import numpy as np from openeye import oechem from torsion.inchi_keys import get_torsion_oeatom_list, get_torsion_oebond def GetPairwiseDistanceMatrix(icoords, jcoords): ''' input: two sets of coordinates, icoords, jcoords; each of which are a list of OEDoubleArray(3) containing x, y, and z component output: xij - the x component of the distance matrix yij - the y component of the distance matrix zij - the z component of the distance matrix rij - the distance matrix rij2 - square of the distance matrix ''' nullRet = [None, None, None, None, None] ni = len(icoords) nj = len(jcoords) try: iArrayX = np.array([c[0] for c in icoords]) iArrayY = np.array([c[1] for c in icoords]) iArrayZ = np.array([c[2] for c in icoords]) iArrayX = np.repeat(iArrayX, nj) iArrayY = np.repeat(iArrayY, nj) iArrayZ = np.repeat(iArrayZ, nj) iArrayX = iArrayX.reshape(ni, nj) iArrayY = iArrayY.reshape(ni, nj) iArrayZ = iArrayZ.reshape(ni, nj) jArrayX = np.array([c[0] for c in jcoords]) jArrayY = np.array([c[1] for c in jcoords]) jArrayZ = np.array([c[2] for c in jcoords]) jArrayX = np.repeat(jArrayX, ni) jArrayY = np.repeat(jArrayY, ni) jArrayZ = np.repeat(jArrayZ, ni) jArrayX = jArrayX.reshape(nj, ni) jArrayY = jArrayY.reshape(nj, ni) jArrayZ = jArrayZ.reshape(nj, ni) jArrayX = np.transpose(jArrayX) jArrayY = np.transpose(jArrayY) jArrayZ = np.transpose(jArrayZ) ijArrayX = jArrayX - iArrayX ijArrayY = jArrayY - iArrayY ijArrayZ = jArrayZ - iArrayZ rijArraySq = (ijArrayX * ijArrayX) + (ijArrayY * ijArrayY) + (ijArrayZ * ijArrayZ) rijArray = np.sqrt(rijArraySq) return ijArrayX, ijArrayY, ijArrayZ, rijArray, rijArraySq except: return nullRet def GetThetaIJKMatrix(iCoords, jCoords, kCoords): ''' Using the given input, calculates a matrix of angles ijk iCoords -> OEDoubleArray containing x, y, and z component of the reference coordinate jCoordsList -> list of N OEDoubleArrays, each OEDoubleArray is of size 3 kCoordsList -> list of M OEDoubleArrays, each OEDoubleArray is of size 3 return a N-by-M matrix of angle theta_ijk ''' jiArrayX, jiArrayY, jiArrayZ, rjiArray, rjiArraySq \ = GetPairwiseDistanceMatrix(jCoords, iCoords) jkArrayX, jkArrayY, jkArrayZ, rjkArray, rjkArraySq \ = GetPairwiseDistanceMatrix(jCoords, kCoords) if jCoords == kCoords: rjkArray = np.eye(len(jCoords)) + np.sqrt(rjkArraySq) else: rjkArray = np.sqrt(rjkArraySq) if jCoords == iCoords: rjiArray = np.eye(len(jCoords)) + np.sqrt(rjiArraySq) else: rjiArray = np.sqrt(rjiArraySq) jiArrayX = jiArrayX / rjiArray jiArrayY = jiArrayY / rjiArray jiArrayZ = jiArrayZ / rjiArray jkArrayX = jkArrayX / rjkArray jkArrayY = jkArrayY / rjkArray jkArrayZ = jkArrayZ / rjkArray dotProduct = (jiArrayX * jkArrayX) + (jiArrayY * jkArrayY) + (jiArrayZ * jkArrayZ) dotProduct = np.select([dotProduct <= -1.0, dotProduct >= 1.0, np.abs(dotProduct) < 1.0], [-0.999, 0.999, dotProduct]) theta_ijk = np.arccos(dotProduct) return theta_ijk def GetThetaIJKLMatrix(mol, iAtoms, jAtom, kAtom, lAtoms, transform=True): ''' Using the given input, calculates a matrix of torsion angles around jk jAtom, kAtom -> OEAtombase, middle two atoms of the torsion iAtoms -> list of N OEAtombase lAtoms -> list of M OEAtombase return a N-by-M matrix of angle theta_ijkl ''' torsions = [] for iAtom in iAtoms: for lAtom in lAtoms: tor_angle = oechem.OEGetTorsion(mol, iAtom, jAtom, kAtom, lAtom) if not transform: torsions.append(tor_angle) else: torsions.append((math.pi + tor_angle) / 4.0) theta_ijkl = np.array(torsions) theta_ijkl = theta_ijkl.reshape(len(iAtoms), len(lAtoms)) return theta_ijkl class SymmetryFunction: def __init__(self): self.rcMax = 8.0 # distance cutoff for symmetry functions self.ita = 0.0001 self.rcMin = 1.0 self.rcIncr = 0.5 self.rsVec = [0.0] self.theta_s_Vec = [0.0] self.rsVec_tor = [2.0, 2.5, 3.0, 3.5, 4.0, 4.5, 5.0, 6.0] self.theta_s_Vec_tor = [0.0] self.rcRadVec = [1.5, 2.0, 2.5, 3.0, 4.0, 6.0, 10.0] self.rcAngVec = [4.5] self.rcTorVec = [2.5, 3.5, 5.0, 10.0] self.rs = 0.0 # parameter determining shape of the function self.itaVec = [0.0001] # parameter determining shape of the function self.lambda1 = 0.5 # parameter for angular symmetry function self.chi = 0.5 # parameter for angular symmetry function self.elemList = [oechem.OEElemNo_H, oechem.OEElemNo_C, oechem.OEElemNo_N, oechem.OEElemNo_O, oechem.OEElemNo_F, oechem.OEElemNo_S, oechem.OEElemNo_Cl, "pc", "nc"] def GetEnvAtomCoords(self, elem, refAtom, envMol, envAtoms): elemEnvList = [] for envAtom in envAtoms: if envAtom == refAtom: continue if elem == 'pc' and envAtom.GetFormalCharge() >= 1: elemEnvList.append(envAtom) elif elem == 'nc' and envAtom.GetFormalCharge() <= -1: elemEnvList.append(envAtom) elif envAtom.GetAtomicNum() == elem: elemEnvList.append(envAtom) coordsList = [] for elemEnvAtom in elemEnvList: coords = oechem.OEDoubleArray(3) if envMol.GetCoords(elemEnvAtom, coords): coordsList.append(coords) return coordsList def GetTorsionEnvAtoms(self, elem, bgnAtom, endAtom, envMol): elemEnvList = [] for envAtom in oechem.OEGetSubtree(bgnAtom, endAtom): if elem == 'pc' and envAtom.GetFormalCharge() >= 1: elemEnvList.append(envAtom) elif elem == 'nc' and envAtom.GetFormalCharge() <= -1: elemEnvList.append(envAtom) elif envAtom.GetAtomicNum() == elem: elemEnvList.append(envAtom) coordsList = [] for elemEnvAtom in elemEnvList: coords = oechem.OEDoubleArray(3) if envMol.GetCoords(elemEnvAtom, coords): coordsList.append(coords) return elemEnvList, coordsList def CalculateTorsionSymmetryFunction(self, envMol, num_iter): ''' Takes refAtom coordinates from refMol as reference and calculates the angular symmetry function using envMol atoms Functional form is described in the DFT-NN review article by Behler, page 30, equations 25 and 26 ''' tsf = [] elemList = self.elemList nullRet = [] bond = get_torsion_oebond(envMol) if bond is None: return nullRet jAtom = bond.GetBgn() jcoords = oechem.OEDoubleArray(3) if not envMol.GetCoords(bond.GetBgn(), jcoords): return nullRet kAtom = bond.GetEnd() kcoords = oechem.OEDoubleArray(3) if not envMol.GetCoords(bond.GetEnd(), kcoords): return nullRet # tsf.append(bond.GetBgn().GetAtomicNum() * bond.GetEnd().GetAtomicNum()); for inum, iElem in enumerate(elemList): if num_iter == 1: iAtoms, icoords = self.GetTorsionEnvAtoms(iElem, bond.GetBgn(), bond.GetEnd(), envMol) else: iAtoms, icoords = self.GetTorsionEnvAtoms(iElem, bond.GetEnd(), bond.GetBgn(), envMol) if len(icoords) == 0: for ita in self.itaVec: for rc in self.rcTorVec: for num1, _ in enumerate(elemList): if num1 < inum: continue tsf.append(0.0) continue _, _, _, rij, _ = GetPairwiseDistanceMatrix(icoords, [jcoords]) for lnum, lElem in enumerate(elemList): if lnum < inum: continue if num_iter == 1: lAtoms, lcoords = self.GetTorsionEnvAtoms(lElem, bond.GetEnd(), bond.GetBgn(), envMol) else: lAtoms, lcoords = self.GetTorsionEnvAtoms(lElem, bond.GetBgn(), bond.GetEnd(), envMol) if len(lcoords) == 0: for ita in self.itaVec: for rc in self.rcTorVec: tsf.append(0.0) continue _, _, _, rkl, _ = GetPairwiseDistanceMatrix([kcoords], lcoords) _, _, _, ril, _ = GetPairwiseDistanceMatrix(icoords, lcoords) theta_ijkl = GetThetaIJKLMatrix(envMol, iAtoms, jAtom, kAtom, lAtoms) # angular symmetry function for ita in self.itaVec: for rc in self.rcTorVec: rijMat = np.repeat(rij, rkl.size) rijMat = rijMat.reshape(rij.size, rkl.size) rklMat = np.repeat(rkl, rij.size) rklMat = rklMat.reshape(rkl.size, rij.size) rklMat = np.transpose(rklMat) fcRij = np.select([rijMat <= rc, rijMat > rc], [0.5 * (np.cos(np.pi * rijMat / rc) + 1.0), 0.0]) fcRkl = np.select([rklMat <= rc, rklMat > rc], [0.5 * (np.cos(np.pi * rklMat / rc) + 1.0), 0.0]) fcRil = np.select([ril <= rc, ril > rc], [0.5 * (np.cos(np.pi * ril / rc) + 1.0), 0.0]) exponent = ita * (np.square(rijMat) + np.square(rklMat) + np.square(ril)) term1 = np.power((1 + self.lambda1 * np.cos(theta_ijkl)), self.chi) term2 = np.exp(-exponent) term3 = (fcRij * fcRkl) * fcRil sumIL = np.sum(term1 * term2 * term3) coeff = np.power(2, 1 - self.chi) * sumIL tsf.append(coeff * jAtom.GetAtomicNum() * kAtom.GetAtomicNum()) a, b, c, d = get_torsion_oeatom_list(envMol) tsf.append(oechem.OEGetDistance2(envMol, a, d)) tsf.append(oechem.OEGetDistance2(envMol, b, c)) tsf.append(oechem.OEGetTorsion(envMol, a, b, c, d)) tsf.append(a.GetAtomicNum() * d.GetAtomicNum()) tsf.append(b.GetAtomicNum() * c.GetAtomicNum()) return tsf def GetTorsionCenterAsOEMol(self, mol): refCoords = oechem.OEDoubleArray(3) try: torsion_atoms = get_torsion_oeatom_list(mol) bgnCoords = mol.GetCoords(torsion_atoms[1]) endCoords = mol.GetCoords(torsion_atoms[2]) refCoords[0] = (bgnCoords[0] + endCoords[0]) / 2.0 refCoords[1] = (bgnCoords[1] + endCoords[1]) / 2.0 refCoords[2] = (bgnCoords[2] + endCoords[2]) / 2.0 except Exception as e: print(e) return None refMol = oechem.OEMol() refAtom = refMol.NewAtom(oechem.OEElemNo_C) refMol.SetCoords(refAtom, refCoords) refMol.Sweep() return refMol def CalculateSymmetryFunction(self, envMol): ''' Takes refAtom coordinates from refMol as reference and calculates the angular symmetry function using envMol atoms Functional form is described in the DFT-NN review article by Behler, page 30, equations 25 and 26 ''' refMol = self.GetTorsionCenterAsOEMol(envMol) _, b, c, _ = get_torsion_oeatom_list(envMol) refAtom = refMol.GetAtom(oechem.OEHasAtomIdx(0)) rsf = [] asf = [] elemList = self.elemList nullRet = [[], []] icoords = oechem.OEDoubleArray(3) if not refMol.GetCoords(refAtom, icoords): return nullRet for jnum, jElem in enumerate(elemList): jcoords = self.GetEnvAtomCoords(jElem, refAtom, envMol, envMol.GetAtoms()) if len(jcoords) == 0: for ita in self.itaVec: for rc in self.rcRadVec: rsf.append(0.0) # radial for rc in self.rcAngVec: for num1, _ in enumerate(elemList): if num1 < jnum: continue asf.append(0.0) # angular continue #ijX, ijY, ijZ, rij, rij2 = GetPairwiseDistanceMatrix([icoords], jcoords) _, _, _, rij, _ = GetPairwiseDistanceMatrix([icoords], jcoords) for ita in self.itaVec: expArg = ita * ((rij - self.rs) * (rij - self.rs)) expTerm = np.exp(-expArg) # radial symmetry function for rc in self.rcRadVec: fc = np.select([rij <= rc, rij > rc], [0.5 * (np.cos(np.pi * rij / rc) + 1.0), 0.0]) prod = expTerm * fc coeff = np.sum(prod) rsf.append(coeff * b.GetAtomicNum() * c.GetAtomicNum()) for knum, kElem in enumerate(elemList): if knum < jnum: continue kcoords = self.GetEnvAtomCoords(kElem, refAtom, envMol, envMol.GetAtoms()) if len(kcoords) == 0: for ita in self.itaVec: for rc in self.rcAngVec: asf.append(0.0) # angular continue _, _, _, rik, _ = GetPairwiseDistanceMatrix([icoords], kcoords) _, _, _, rjk, _ = GetPairwiseDistanceMatrix(jcoords, kcoords) theta_ijk = GetThetaIJKMatrix([icoords], jcoords, kcoords) # angular symmetry function for ita in self.itaVec: for rc in self.rcAngVec: rijMat = np.repeat(rij, rik.size) rijMat = rijMat.reshape(rij.size, rik.size) rikMat = np.repeat(rik, rij.size) rikMat = rikMat.reshape(rik.size, rij.size) rikMat = np.transpose(rikMat) fcRij = np.select([rijMat <= rc, rijMat > rc], [0.5 * (np.cos(np.pi * rijMat / rc) + 1.0), 0.0]) fcRik = np.select([rikMat <= rc, rikMat > rc], [0.5 * (np.cos(np.pi * rikMat / rc) + 1.0), 0.0]) fcRjk = np.select([rjk <= rc, rjk > rc], [0.5 * (np.cos(np.pi * rjk / rc) + 1.0), 0.0]) exponent = ita * (np.square(rijMat) + np.square(rikMat) + np.square(rjk)) term1 = np.power((1 + self.lambda1 * np.cos(theta_ijk)), self.chi) term2 = np.exp(-exponent) term3 = (fcRij * fcRjk) * fcRik sumJK = np.sum(term1 * term2 * term3) coeff = np.power(2, 1 - self.chi) * sumJK asf.append(coeff * b.GetAtomicNum() * c.GetAtomicNum()) return rsf, asf def get_sf_elements(mol): sfObj = SymmetryFunction() oechem.OEAssignFormalCharges(mol) oechem.OEAssignHybridization(mol) rsf, asf = sfObj.CalculateSymmetryFunction(mol) tsf1 = sfObj.CalculateTorsionSymmetryFunction(mol, 1) tsf2 = sfObj.CalculateTorsionSymmetryFunction(mol, 2) tsf = [] for elem1, elem2 in zip(tsf1, tsf2): tsf.append(elem1 + elem2) sf_elements = rsf sf_elements.extend(asf) sf_elements.extend(tsf) return sf_elements
40.507614
111
0.553446
1,751
15,960
4.993147
0.16562
0.00549
0.002745
0.010065
0.390598
0.322887
0.262267
0.21846
0.182546
0.168135
0
0.022152
0.343797
15,960
393
112
40.610687
0.812661
0.113596
0
0.268966
0
0
0.00086
0
0
0
0
0
0
1
0.034483
false
0
0.013793
0
0.103448
0.003448
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
9d5757c4a8bf60547e9dd883852158e386888c4b
6,785
py
Python
recommendation/recommendation.py
Jackson-Y/Machine-Learning
ea0a8c65ce93501d51fad2d73300dc0a37e2c1d8
[ "MIT" ]
4
2017-08-17T02:11:45.000Z
2017-09-25T00:46:13.000Z
recommendation/recommendation.py
Jackson-Y/Machine-Learning
ea0a8c65ce93501d51fad2d73300dc0a37e2c1d8
[ "MIT" ]
null
null
null
recommendation/recommendation.py
Jackson-Y/Machine-Learning
ea0a8c65ce93501d51fad2d73300dc0a37e2c1d8
[ "MIT" ]
null
null
null
""" 候选生成(Candidate generation) & 排序(LTR, Learning to Ranking)""" # -*- coding: utf-8 -*- from __future__ import absolute_import from __future__ import division from __future__ import print_function import os import sys import argparse from operator import itemgetter from math import sqrt import pandas as pd import pymysql from sklearn.model_selection import train_test_split # from sklearn.metrics.pairwise import pairwise_distances # from sklearn.metrics import mean_squared_error class UserBasedCF(object): """ 基于用户的协同过滤 """ def __init__(self, n_similarity_users=20, n_recommendation_articles=10): self.n_similarity_users = n_similarity_users self.n_recomendation_articles = n_recommendation_articles self.train_data = {} self.test_data = {} self.user_similarity_matrix = {} self.article_count = 0 print("Number of similarity users = {}".format(self.n_similarity_users)) print("Number of recommended articles = {}".format(self.n_recomendation_articles)) def store_data_mysql2csv(self): """Store data from mysql to csv.""" sql = 'select uid,lid,ImportantDegree,LocalModifyTime from 20171020_rating' conn = pymysql.connect(host='192.168.106.231', \ user='root', password='cnkidras', \ db='recomm', charset='utf8', use_unicode=True) df = pd.read_sql(sql, con=conn) print(df.head()) df.to_csv("data.csv", index=False) conn.close() def load_data(self): """Load data from csv.""" if os.path.isfile('data.csv'): if os.path.getsize('data.csv') > 0: return self.store_data_mysql2csv() header = ['uid', 'lid', 'ImportantDegree', 'LocalModifyTime'] df = pd.read_csv('data.csv', sep=',', names=header, low_memory=False) train_data, test_data = train_test_split(df, test_size=0.2) train_data_len = 0 test_data_len = 0 for line in train_data.itertuples(): if line[1] not in self.train_data: self.train_data.setdefault(line[1], {}) self.train_data[line[1]][line[2]] = line[3] train_data_len += 1 for line in test_data.itertuples(): if line[1] not in self.test_data: self.test_data.setdefault(line[1], {}) self.test_data[line[1]][line[2]] = line[3] test_data_len += 1 print('Train data length = %s' % train_data_len) print('Test data length = %s' % test_data_len) def calc_user_similarity(self): """ 计算用户相似度 """ article_user = {} for uid, lids in self.train_data.items(): for lid in lids: if lid not in article_user: article_user[lid] = set() article_user[lid].add(uid) self.article_count = len(article_user) print("Total article numbers = %d" % self.article_count) for lid, uids in article_user.items(): for uid1 in uids: for uid2 in uids: if uid1 == uid2: continue self.user_similarity_matrix.setdefault(uid1, {}) self.user_similarity_matrix[uid1].setdefault(uid2, 0) self.user_similarity_matrix[uid1][uid2] += 1 for u, related_users in self.user_similarity_matrix.items(): for v, count in related_users.items(): self.user_similarity_matrix[u][v] = count / sqrt(len(self.train_data[u]) * len(self.train_data[v])) def recommendation(self, user): """ 为用户user推荐文献,返回推荐列表及评分。 """ K = self.n_similarity_users N = self.n_recomendation_articles rank = {} print("user: ", user) # watched_articles = self.train_data[user] watched_articles = self.train_data.get(user, {}) if watched_articles is None: print(" [x] New User. ") return [] for v, wuv in sorted(self.user_similarity_matrix[user].items(), key=itemgetter(1), reverse=True)[0:K]: for article in self.train_data[v]: if article in watched_articles: continue rank.setdefault(article, 0) rank[article] += wuv return sorted(rank.items(), key=itemgetter(1), reverse=True) def evaluate(self): """ 计算准确率、召回率、覆盖率 """ N = self.n_recomendation_articles hit = 0 recommend_count = 0 test_count = 0 all_rec_article = set() for i, user, in enumerate(self.train_data): test_articles = self.test_data.get(user, {}) recommend_articles = self.recommendation(user) for article, w in recommend_articles: if article in test_articles: hit += 1 all_rec_article.add(article) recommend_count += N test_count = len(test_articles) precision = hit / (1.0 * recommend_count) recall = hit / (1.0 * test_count) coverage = len(all_rec_article) / (1.0 * self.article_count) print('precision= %.4f\t recall=%.4f\t coverage=%.4f' % (precision, recall, coverage)) class PrintArticles(object): """ print class """ def __init__(self, lid_list): self.lid_list = lid_list def output(self): """ 在数据库中查找lid对应的文献标题,并打印。 """ conn = pymysql.connect(host='192.168.106.231', \ user='root', password='cnkidras', \ db='recomm', charset='utf8', use_unicode=True) for score_tuple in self.lid_list: sql = 'select lid,UserID,title from test where lid = %s;' % score_tuple[0] df = pd.read_sql(sql, con=conn) print(df) conn.close() FLAGS = None def main(_): """main function""" user_cf = UserBasedCF(20, 10) user_cf.load_data() user_cf.calc_user_similarity() recommended_articled = user_cf.recommendation(FLAGS.uid) print(recommended_articled[0:10]) out = PrintArticles(recommended_articled[0:10]) out.output() # user_cf.evaluate() if __name__ == '__main__': parser = argparse.ArgumentParser() parser.register("type", "bool", lambda v: v.lower() == "true") parser.add_argument( "--uid", type=int, default=80871, help="The user who is going to be recommended articles." ) parser.add_argument( "--n", type=int, default=10, help="Number of recommended articles." ) FLAGS, unparsed = parser.parse_known_args() print("{} {}".format(sys.argv[0], unparsed)) print(FLAGS) main(FLAGS)
36.875
115
0.592336
825
6,785
4.65697
0.253333
0.039823
0.03722
0.043727
0.184019
0.119729
0.087454
0.077564
0.061947
0.047371
0
0.022056
0.291673
6,785
183
116
37.076503
0.777362
0.059396
0
0.122449
0
0
0.090665
0.006171
0
0
0
0
0
1
0.061224
false
0.013605
0.088435
0
0.183673
0.095238
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
9d59344dd6f980db538f0cd26f71a979f4b914e4
1,592
py
Python
orchestration/dags/twitter_streaming.py
amommendes/tweetstream
ef09928a4f3344210c597388332d18a53149bb41
[ "Apache-2.0" ]
null
null
null
orchestration/dags/twitter_streaming.py
amommendes/tweetstream
ef09928a4f3344210c597388332d18a53149bb41
[ "Apache-2.0" ]
null
null
null
orchestration/dags/twitter_streaming.py
amommendes/tweetstream
ef09928a4f3344210c597388332d18a53149bb41
[ "Apache-2.0" ]
null
null
null
from datetime import timedelta from airflow import DAG from airflow.utils.dates import days_ago from airflow.operators.python_operator import PythonOperator from tweetstream.consumers.twitter_streaming import TwitterStreamingConsumer from tweetstream.clients.spark import SparkClient default_args = { "owner": "tweeetstream", "depends_on_past": False, "start_date": days_ago(1), "email": ["tweetstream@team.com"], "email_on_failure": False, "email_on_retry": False, "retries": 1, "retry_delay": timedelta(minutes=5), } def main(): spark_client = SparkClient( session_config={ "spark.jars": "/usr/local/airflow/dags/tweetstream/libs/spark-sql-kafka-0-10_2.12-3.0.1.jar," "/usr/local/airflow/dags/tweetstream/libs/kafka-clients-2.5.0.jar," "/usr/local/airflow/dags/tweetstream/libs/spark-token-provider-kafka-0-10_2.12-3.0.1.jar," "/usr/local/airflow/dags/tweetstream/libs/commons-pool2-2.8.0.jar", "failOnDataLoss": "false", } ) spark = spark_client.get_session() consumer = TwitterStreamingConsumer( spark=spark, output_path="hdfs://hadoop:9000/twitter/consumer", checkpoint="hdfs://hadoop:9000/twitter/checkpoint", ) consumer.start() dag = DAG( dag_id="twitter_streaming", default_args=default_args, description="Tweets Streaming Consumer", schedule_interval=timedelta(days=1), ) start_job_task = PythonOperator( dag=dag, task_id="start_streaming", python_callable=main, execution_timeout=None, )
30.615385
105
0.692839
196
1,592
5.469388
0.433673
0.029851
0.05597
0.070896
0.170709
0.170709
0.170709
0.095149
0.095149
0.095149
0
0.028244
0.177136
1,592
51
106
31.215686
0.790076
0
0
0
0
0.088889
0.356156
0.2299
0
0
0
0
0
1
0.022222
false
0
0.133333
0
0.155556
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
9d5d5a4039dbeb89722961536cacebbce65b4ec3
1,059
py
Python
setup.py
fg1/ipynb_format
58dc276fca4f1fbb179d7e84ce41d59663d011c2
[ "BSD-3-Clause" ]
null
null
null
setup.py
fg1/ipynb_format
58dc276fca4f1fbb179d7e84ce41d59663d011c2
[ "BSD-3-Clause" ]
null
null
null
setup.py
fg1/ipynb_format
58dc276fca4f1fbb179d7e84ce41d59663d011c2
[ "BSD-3-Clause" ]
null
null
null
#!/usr/bin/python from setuptools import setup, find_packages from codecs import open with open('README.rst', 'r', 'utf-8') as fd: long_description = fd.read() setup(name='ipynb_format', version='0.1.1', description='A code formatter for python code in ipython notebooks', long_description=long_description, url='https://github.com/fg1/ipynb_format', author='fg1', license='BSD', classifiers=[ 'Development Status :: 3 - Alpha', 'Environment :: Console', 'Intended Audience :: Science/Research', 'License :: OSI Approved :: BSD License', 'Programming Language :: Python :: 2', 'Programming Language :: Python :: 2.6', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3', ], keywords='ipython notebook', packages=find_packages(), install_requires=['yapf'], entry_points={ 'console_scripts': [ 'ipynb_format=ipynb_format:cli', ], }, )
31.147059
74
0.588291
111
1,059
5.504505
0.630631
0.072013
0.163666
0.12766
0
0
0
0
0
0
0
0.016905
0.273843
1,059
33
75
32.090909
0.777633
0.015109
0
0.068966
0
0
0.444338
0.027831
0
0
0
0
0
1
0
false
0
0.068966
0
0.068966
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
19b2caec75b18b0aa3e0597b5caa0b0c55ce8cad
7,365
py
Python
gpss/transaction.py
martendo/gpss.py
52c6781bd8a65b651381ed11da9e31ddfae6e313
[ "MIT" ]
2
2021-11-28T08:48:02.000Z
2022-03-09T16:19:06.000Z
gpss/transaction.py
martendo/gpss.py
52c6781bd8a65b651381ed11da9e31ddfae6e313
[ "MIT" ]
null
null
null
gpss/transaction.py
martendo/gpss.py
52c6781bd8a65b651381ed11da9e31ddfae6e313
[ "MIT" ]
null
null
null
from .statement import Statement, StatementType from .event import Event from ._helpers import debugmsg, simulation_error class TransactionGenerator: def __init__(self, simulation, block_num, operands): self.simulation = simulation self.block = self.simulation.program[block_num] self.start_block = block_num + 1 self.operands = operands self.generated = 0 def __str__(self): return f"TransactionGenerator({','.join(map(str, self.operands))})" def prime(self): # Add initial Transaction generation event using the Offset # Interval self.add_next_event(self.operands[2]) def add_next_event(self, time=None): # If reached generation Limit Count, stop if (self.operands[3] is not None and self.generated >= self.operands[3]): return # Add event to event list to generate next Transaction if time is None: time = self.simulation.time + self.operands[0] if self.operands[1] != 0: time += self.simulation.rngs[1].randint( -self.operands[1], +self.operands[1], ) if time < self.simulation.time: simulation_error(self.simulation.parser.infile, self.block.linenum, "Cannot GENERATE a Transaction in a negative amount " f"of time ({time - self.simulation.time})") elif time == self.simulation.time and time is None: # Generate immediately, no need to add to event list self.generate() else: self.simulation.add_event(Event(time, self.generate)) def generate(self): # Generate a new Transaction debugmsg("generate:", self.simulation.time, self.operands) transaction = Transaction(self.simulation, self.start_block, self.operands[4]) self.simulation.transactions.add(transaction) self.generated += 1 # Add next Transaction generation event self.add_next_event() transaction.update() class Transaction: def __init__(self, simulation, start_block, priority): self.simulation = simulation self.current_block = start_block self.priority = priority def __str__(self): return f"Transaction({self.priority})" def update(self): while True: # Execute next block block = self.simulation.program[self.current_block] self.current_block += 1 self.current_linenum = block.linenum if block.type is StatementType.TERMINATE: self.simulation.terminate(self, block.operands[0]) return elif block.type is StatementType.QUEUE: self.simulation.queues[block.operands[0]].join(self, block.operands[1]) elif block.type is StatementType.DEPART: self.simulation.queues[block.operands[0]].depart(self, block.operands[1]) elif block.type is StatementType.ADVANCE: interval, spread = block.operands[0:2] # Add event for end of delay time = self.simulation.time + interval if spread != 0: time += self.simulation.rngs[1].randint( -spread, +spread, ) if time < self.simulation.time: simulation_error(self.simulation.parser.infile, block.linenum, "Cannot ADVANCE a negative amount of time " f"({time - self.simulation.time})") elif time == self.simulation.time: # ADVANCE 0 -> no-op continue self.simulation.add_event(Event(time, self.update)) return elif block.type is StatementType.SEIZE: # Use Facility or enter Delay Chain if busy if not self.simulation.facilities[block.operands[0]].seize(self): # Facility is busy -> wait return elif block.type is StatementType.RELEASE: self.simulation.facilities[block.operands[0]].release(self) elif block.type is StatementType.ENTER: # Enter Storage or enter Delay Chain if cannot satisfy # demand try: if not(self.simulation.storages[block.operands[0]] .enter(self, block.operands[1])): # Not enough Storage available return except KeyError: simulation_error(self.simulation.parser.infile, block.linenum, f"No Storage named \"{block.operands[0]}\"") elif block.type is StatementType.LEAVE: try: self.simulation.storages[block.operands[0]].leave( self, block.operands[1]) except KeyError: simulation_error(self.simulation.parser.infile, block.linenum, f"No Storage named \"{block.operands[0]}\"") elif block.type is StatementType.TRANSFER: if block.operands[0] is None: # Unconditional transfer mode self.current_block = ( self.simulation.labels[block.operands[1]].number) elif block.operands[0] == "BOTH": # BOTH mode if block.operands[1] != "": b_block = ( self.simulation.labels[block.operands[1]]) else: # Use sequential Block b_block = ( self.simulation.program[self.current_block]) c_block = self.simulation.labels[block.operands[2]] if not b_block.refuse(self.simulation): self.current_block = b_block.number elif not c_block.refuse(self.simulation): self.current_block = c_block.number else: # Refused entry to both Blocks, stay on this one self.current_block -= 1 self.simulation.current_events.append(self.update) return else: # Statistical transfer mode if self.simulation.rngs[1].random() < block.operands[0]: new_block = block.operands[2] else: new_block = block.operands[1] if new_block == "": # Continue to sequential Block continue self.current_block = ( self.simulation.labels[new_block].number)
41.610169
81
0.509029
702
7,365
5.25641
0.18661
0.155556
0.049322
0.058537
0.42439
0.367751
0.258537
0.15664
0.153388
0.102439
0
0.009908
0.410726
7,365
176
82
41.846591
0.840323
0.089206
0
0.310078
0
0
0.044272
0.016602
0
0
0
0
0
1
0.062016
false
0
0.023256
0.015504
0.162791
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
19b3f6aeb28dd07d2770e4ea600d2a99c0c06e65
3,134
py
Python
train_video.py
jacke121/MBMD
2daf5edb4fb40ee652baead4f9332ca00fa111a5
[ "MIT" ]
220
2018-09-17T15:42:54.000Z
2021-09-13T13:14:22.000Z
train_video.py
jacke121/MBMD
2daf5edb4fb40ee652baead4f9332ca00fa111a5
[ "MIT" ]
12
2018-09-19T09:30:42.000Z
2019-07-01T04:03:51.000Z
train_video.py
jacke121/MBMD
2daf5edb4fb40ee652baead4f9332ca00fa111a5
[ "MIT" ]
60
2018-09-18T00:29:50.000Z
2021-02-22T03:55:19.000Z
import functools import tensorflow as tf from core import trainer_video, input_reader from core.model_builder import build_man_model from google.protobuf import text_format from object_detection.builders import input_reader_builder from object_detection.protos import input_reader_pb2 from object_detection.protos import model_pb2 from object_detection.protos import pipeline_pb2 from object_detection.protos import train_pb2 import os ''' lijun's code modify bb to conv1*2 conv3*2 l2 normalization to match ''' os.environ["CUDA_VISIBLE_DEVICES"]="0" #os.environ["CUDA_VISIBLE_DEVICES"]="0" tf.logging.set_verbosity(tf.logging.INFO) flags = tf.app.flags flags.DEFINE_string('train_dir', 'model/dump', 'Directory to save the checkpoints and training summaries.') flags.DEFINE_string('pipeline_config_path', 'model/ssd_mobilenet_video.config', 'Path to a pipeline_pb2.TrainEvalPipelineConfig config ' 'file. If provided, other configs are ignored') flags.DEFINE_string('train_config_path', '', 'Path to a train_pb2.TrainConfig config file.') flags.DEFINE_string('input_config_path', '', 'Path to an input_reader_pb2.InputReader config file.') flags.DEFINE_string('model_config_path', '', 'Path to a model_pb2.DetectionModel config file.') flags.DEFINE_string('image_root', '/home/xiaobai/Documents/ILSVRC2014_DET_train/image/ILSVRC2014_DET_train', 'Root path to input images') flags.DEFINE_string('video_root', '/home/xiaobai/Documents/ILSVRC2015/', 'Root path to input videos') flags.DEFINE_string('image_tfrecord', './train_seq.record', 'Path to image tfrecord.') flags.DEFINE_string('video_tfrecord', './train_vid.record', 'Path to video tfrecord') FLAGS = flags.FLAGS def get_configs_from_pipeline_file(): """Reads training configuration from a pipeline_pb2.TrainEvalPipelineConfig. Reads training config from file specified by pipeline_config_path flag. Returns: model_config: model_pb2.DetectionModel train_config: train_pb2.TrainConfig input_config: input_reader_pb2.InputReader """ pipeline_config = pipeline_pb2.TrainEvalPipelineConfig() with tf.gfile.GFile(FLAGS.pipeline_config_path, 'r') as f: text_format.Merge(f.read(), pipeline_config) model_config = pipeline_config.model.ssd train_config = pipeline_config.train_config input_config = pipeline_config.train_input_reader return model_config, train_config, input_config def main(_): model_config, train_config, input_config = get_configs_from_pipeline_file() model_fn = functools.partial( build_man_model, model_config=model_config, is_training=True) create_input_image_dict_fn = functools.partial( input_reader.read_video_image, FLAGS.video_tfrecord, FLAGS.image_tfrecord) trainer_video.train(model_fn, create_input_image_dict_fn, train_config, FLAGS.train_dir, FLAGS.image_root, FLAGS.video_root) if __name__ == '__main__': # update moving average tf.app.run()
35.613636
128
0.744735
413
3,134
5.33414
0.288136
0.044939
0.069451
0.045393
0.22424
0.10168
0
0
0
0
0
0.012284
0.168794
3,134
87
129
36.022989
0.833397
0.108807
0
0
0
0
0.272424
0.091549
0
0
0
0
0
1
0.037736
false
0
0.207547
0
0.264151
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
19b7ef31e8ac32e464e2b7f9641c6ad98cd6de46
3,301
py
Python
conf_dblp.py
AmiraKetfi/ScientificProductScraper
c700fb579ac47266e76ec834ccbd8674abeaff50
[ "MIT" ]
4
2018-04-04T12:10:59.000Z
2020-02-22T17:26:14.000Z
conf_dblp.py
AmiraKetfi/ScientificProductScraper
c700fb579ac47266e76ec834ccbd8674abeaff50
[ "MIT" ]
null
null
null
conf_dblp.py
AmiraKetfi/ScientificProductScraper
c700fb579ac47266e76ec834ccbd8674abeaff50
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- """ Created on Sat Mar 17 23:01:40 2018 @author: pc """ import scholarly,re,urllib.request,nltk import bs4 as bs # ============================================================================= # #Probléme les derniere conf ne se rajoute pas # ============================================================================= def find_ComputerScienceConferences_Workshops_names_DBLP(url_deb): page=urllib.request.urlopen(url_deb).read() c,soup=0,bs.BeautifulSoup(page,'lxml') for p in soup.find_all('a'): if c==1 and p.text!="[previous 100 entries]": print(p.text) # s1=p.get("href") # if re.search(r"http://dblp.uni-trier.de/db/conf/.",s1): # publication_conf_dblp(s1) if p.text=="[next 100 entries]": c,s=1,p.get("href") url_a="http://dblp.uni-trier.de/db/conf/"+s if (p.text=="[previous 100 entries]")and(c==1): find_ComputerScienceConferences_Workshops_names_DBLP(url_a) def Timeline_of_conferences(url_deb): page=urllib.request.urlopen(url_deb).read() soup=bs.BeautifulSoup(page,'lxml') last_s="" for q in soup.find_all('a'): s=q.get("href") if re.search(r"http://dblp.uni-trier.de/db/conf/.*/.*\.html",s): if last_s!=s: fichier = open("Lien_de_toutes_les_conf.txt", "a") fichier.write("\n"+s) fichier.close() last_s=s def publication_conf_dblp(url): fichier = open("conf.txt", "w") fichier.close() fichier = open("publisher.txt", "w") fichier.close() fichier = open("Date.txt", "w") fichier.close() fichier = open("isbn.txt", "w") fichier.close() page=urllib.request.urlopen(url).read() soup=bs.BeautifulSoup(page,'lxml') c=0 for p in soup.find_all('span'): s1=p.get("class") try: if s1[0]=='title': fichier = open("conf.txt", "a") fichier.write("\n"+p.text) fichier.close() except TypeError: print("\t") s2=p.get("itemprop") try: if s2=="publisher": fichier = open("publisher.txt", "a") fichier.write("\n"+p.text) fichier.close() if s2=="datePublished": fichier = open("Date.txt", "a") fichier.write("\n"+p.text) fichier.close() if s2=="isbn": fichier = open("isbn.txt", "a") fichier.write("\n"+p.text) fichier.close() if s2=="pagination": fichier = open("pages.txt", "a") fichier.write("\n"+p.text) fichier.close() except TypeError: print("\t") # pass url_deb='https://dblp.uni-trier.de/db/conf/' url_deb2='http://dblp.uni-trier.de/db/conf/3dim/3dimpvt2012.html' url_deb3='http://dblp.uni-trier.de/db/conf/3dpvt/' #Timeline_of_conferences(url_deb2) publication_conf_dblp(url_deb3) #find_ComputerScienceConferences_Workshops_names_DBLP(url_deb)
38.383721
124
0.499546
393
3,301
4.083969
0.279898
0.068536
0.04486
0.052336
0.572586
0.517757
0.350156
0.241745
0.241745
0.1919
0
0.021786
0.304756
3,301
86
125
38.383721
0.67756
0.157528
0
0.362319
0
0
0.180727
0.010124
0
0
0
0
0
1
0.043478
false
0
0.028986
0
0.072464
0.043478
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
19b8ce0aa97bf71df30c5a8e086263306534c4c7
4,540
py
Python
src/robot.py
FROG3160/FRC2018-ARWING
6635274d79839ea92d8591af2c8e51f8e1112ec1
[ "MIT" ]
1
2019-01-15T00:47:16.000Z
2019-01-15T00:47:16.000Z
src/robot.py
FROG3160/FRC2018-ARWING
6635274d79839ea92d8591af2c8e51f8e1112ec1
[ "MIT" ]
18
2018-02-15T01:07:03.000Z
2018-04-10T00:25:59.000Z
src/robot.py
FROG3160/FRC2018-ARWING
6635274d79839ea92d8591af2c8e51f8e1112ec1
[ "MIT" ]
4
2018-01-31T01:53:44.000Z
2018-02-16T00:30:14.000Z
#!/usr/bin/env python3 """ Main code for Robot """ import wpilib import robotmap from wpilib import Joystick from subsystems.drivetrain import DriveTrain as Drive from subsystems.grabber import cubeGrabber from subsystems.elevator import Elevator from subsystems.climber import Climber from subsystems.autonomous import Autonomous from wpilib.sendablechooser import SendableChooser # from robotpy_ext.common_drivers.navx import AHRS class Robot(wpilib.IterativeRobot): def robotInit(self): """ This function is called upon program startup and should be used for any initialization code. """ self.pneumaticControlModuleCANID = robotmap.PCM self.kDriveTrain = robotmap.DriveTrain self.kCubeGrabber = robotmap.CubeGrabber self.kElevator = robotmap.Elevator self.kSticks = robotmap.Sticks self.kClimber = robotmap.Climber self.dStick = Joystick(self.kSticks['drive']) self.cStick = Joystick(self.kSticks['control']) self.drive = Drive(self) self.cubeGrabber = cubeGrabber(self) self.elevator = Elevator(self) self.climber = Climber(self) self.sendableChooser() def robotPeriodic(self): pass def disabledInit(self): pass def disabledPeriodic(self): self.drive.stop() def autonomousInit(self): """This function is run once each time the robot enters autonomous mode.""" self.autonomous = Autonomous(self) self.autonomous.reset() self.drive.autoInit() def autonomousPeriodic(self): """This function is called periodically during autonomous.""" #self.autonomous.testMove(self.autonomous.WALL_TO_SCALE, -1, False) #self.autonomous.testAngle(-90, -1) #self.elevator.setElevatorPosition(self.elevator.kScale) #self.autonomous.start() self.autonomous.run() #self.elevator.setElevatorPosition(-20000) #self.autonomous.telemetry() def teleopInit(self): self.drive.teleInit() def teleopPeriodic(self): """This function is called periodically during operator control.""" speed = (self.dStick.getY() * -1)**3 rotation = self.dStick.getTwist()/(1.1+self.dStick.getRawAxis(3)) # self.drive.moveSpeed(speed, speed) self.drive.arcadeWithRPM(speed, rotation, 2800) self.cubeGrabber.grabberFunction() # self.elevator.elevatorFunction() #self.elevator.telemetry() self.climber.climberFunction() def testInit(self): pass def testPeriodic(self): wpilib.LiveWindow.setEnabled(True) pass def sendableChooser(self): self.startingChooser = SendableChooser() self.startingChooser.addDefault('Move Forward Only', '!') self.startingChooser.addObject('Starting Left', 'L') self.startingChooser.addObject('Starting Middle', 'M') self.startingChooser.addObject('Starting Right', 'R') wpilib.SmartDashboard.putData('Starting Side', self.startingChooser) self.startingDelayChooser = SendableChooser() self.startingDelayChooser.addDefault('0', 0) self.startingDelayChooser.addObject('1', 1) self.startingDelayChooser.addObject('2', 2) self.startingDelayChooser.addObject('3', 3) self.startingDelayChooser.addObject('4', 4) self.startingDelayChooser.addObject('5', 5) self.startingDelayChooser.addObject('6', 6) self.startingDelayChooser.addObject('7', 7) self.startingDelayChooser.addObject('8', 8) self.startingDelayChooser.addObject('9', 9) self.startingDelayChooser.addObject('10', 10) self.startingDelayChooser.addObject('11', 11) self.startingDelayChooser.addObject('12', 12) self.startingDelayChooser.addObject('13', 13) self.startingDelayChooser.addObject('14', 14) self.startingDelayChooser.addObject('15', 15) wpilib.SmartDashboard.putData('Delay Time(sec)', self.startingDelayChooser) self.switchOrScale = SendableChooser() self.switchOrScale.addDefault('Switch', 'Switch') self.switchOrScale.addObject('Scale', 'Scale') wpilib.SmartDashboard.putData('Switch or Scale', self.switchOrScale) if __name__ == "__main__": wpilib.run(Robot)
32.661871
83
0.652643
438
4,540
6.737443
0.3379
0.146391
0.16774
0.024399
0.036598
0.028465
0.028465
0
0
0
0
0.018314
0.242291
4,540
138
84
32.898551
0.839535
0.151322
0
0.049383
0
0
0.044867
0
0
0
0
0
0
1
0.135802
false
0.049383
0.111111
0
0.259259
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
19b94d7c9d394f09ecf7228b67004f998dd55522
1,764
py
Python
api/attomized_avm.py
johncoleman83/attom_python_client
2fad572162f481a71cccf6003da4cbd8ec4477d4
[ "MIT" ]
null
null
null
api/attomized_avm.py
johncoleman83/attom_python_client
2fad572162f481a71cccf6003da4cbd8ec4477d4
[ "MIT" ]
null
null
null
api/attomized_avm.py
johncoleman83/attom_python_client
2fad572162f481a71cccf6003da4cbd8ec4477d4
[ "MIT" ]
1
2020-11-20T19:28:36.000Z
2020-11-20T19:28:36.000Z
#!/usr/bin/env python3 """ ATTOM API https://api.developer.attomdata.com """ import requests from urllib.parse import quote, urlencode from api import api PATH = "attomavm/detail" def get_avm_by_address(number_street, city_state): """ API request to get attomavm/detail """ params = urlencode( { "address1": number_street, "address2": city_state, } ) url = "{}/{}?{}".format(api.ATTOM_URL, PATH, params) r = requests.get(url, headers=api.headers) return r.json() def get_building_from(p, all_beds, all_baths, all_building_sizes): b = { 'size': p.get('building', {}).get('size', {}).get('livingsize', None), 'baths': p.get('building', {}).get('rooms', {}).get('bathstotal', None), 'beds': p.get('building', {}).get('rooms', {}).get('beds', None), 'bsmt': p.get('building', {}).get('interior', {}).get('bsmtsize', None), } if b.get('beds'): all_beds.append(b.get('beds')) if b.get('baths'): all_baths.append(b.get('baths')) if b.get('size'): all_building_sizes.append(b.get('size')) return b def get_sale_from(p, all_sale_values): sale = { 'saleamt': p.get('sale', {}).get('amount', {}).get('saleamt', None), 'saledate': p.get('sale', {}).get('amount', {}).get('salerecdate', None), } if sale.get('saleamt') == 0: sale['saleamt'] = None if sale.get('saleamt'): all_sale_values.append(sale.get('saleamt')) return sale def get_address_from(p): return p.get('address', {}).get('line1', "NULL") def get_lot_from(p): return p.get('lot', {}).get('lotsize2', "NULL") def get_market_value_from(p): return p.get('assessment', {}).get('market', {}).get('mktttlvalue', None) def get_avm_from(p): return p.get('avm', {}).get('amount', {}).get('value', None)
27.138462
77
0.620181
249
1,764
4.261044
0.289157
0.0377
0.04524
0.05655
0.175306
0.081056
0
0
0
0
0
0.004008
0.151361
1,764
64
78
27.5625
0.704743
0.057823
0
0
0
0
0.195374
0
0
0
0
0
0
1
0.152174
false
0
0.065217
0.086957
0.369565
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
19b9c7cf12ec5b8b173b1bc2764d7bfc2577385f
7,064
py
Python
idmap/models.py
tkhyn/django-idmap
383124fc4bd537d053f9d4c0d02a498f66831baa
[ "BSD-2-Clause" ]
1
2021-04-24T16:35:15.000Z
2021-04-24T16:35:15.000Z
idmap/models.py
tkhyn/django-idmap
383124fc4bd537d053f9d4c0d02a498f66831baa
[ "BSD-2-Clause" ]
null
null
null
idmap/models.py
tkhyn/django-idmap
383124fc4bd537d053f9d4c0d02a498f66831baa
[ "BSD-2-Clause" ]
1
2021-02-27T14:45:48.000Z
2021-02-27T14:45:48.000Z
import django from django.db import models from django.db.models.base import ModelBase from django.utils import six from .manager import IdMapManager from . import tls # thread local storage META_VALUES = { 'use_strong_refs': False, 'multi_db': False } class IdMapModelBase(ModelBase): def __new__(mcs, name, bases, attrs): meta = attrs.get('Meta', type('Meta', (object,), {})) meta_values = {} for attr, default in six.iteritems(META_VALUES): try: meta_values[attr] = getattr(meta, attr) delattr(meta, attr) except AttributeError: pass if django.VERSION < (1, 10): # these attributes are only supported from 1.10 onwards # if they are still defined when calling super.__new__ this raises # an exception for attr in ['base_manager_name', 'default_manager_name']: try: delattr(meta, attr) except AttributeError: pass cls = super(IdMapModelBase, mcs).__new__(mcs, name, bases, attrs) for attr in six.iterkeys(META_VALUES): try: # value defined in the class' own Meta setattr(cls._meta, attr, meta_values[attr]) except KeyError: # value not defined, look into bases' Meta for base in cls.mro()[1:]: try: setattr(cls._meta, attr, getattr(base._meta, attr)) break except AttributeError: pass else: setattr(cls._meta, attr, META_VALUES[attr]) return cls class IdMapModel(six.with_metaclass(IdMapModelBase, models.Model)): """ Abstract class to derive any idmap-enabled model from Meta can set ``use_strong_refs`` to True if one should use strong references (= kept in cache until explicitly flushed) for stored instances, and ``multi_db`` to True if the model is used in several databases """ objects = IdMapManager() class Meta: # does not inherit from base_class.Meta but that's not an issue abstract = True base_manager_name = 'objects' default_manager_name = 'objects' # OVERRIDES @classmethod def from_db(cls, db, field_names, values): """ This method will either create an instance (by calling the default implementation) or try to retrieve one from the class-wide cache by infering the pk value from args and kwargs. The cache is then populated whenever possible (ie when it is possible to infer the pk value). """ try: is_deferred = cls is models.DEFERRED except AttributeError: # django < 1.10 is_deferred = cls._deferred if is_deferred: args = () kwargs = dict(zip(field_names, values)) else: args = values kwargs = {} instance_key = cls._get_cache_key(args, kwargs) def create_instance(): inst = cls(*args, **kwargs) inst._state.adding = False inst._state.db = db cls.cache_instance(inst) return inst # depending on the arguments, we might not be able to infer the PK # in that case, we create a new instance if instance_key is None: return create_instance() else: instance = cls.get_cached_instance(instance_key, db) if instance is None: return create_instance() else: return instance def refresh_from_db(self, using=None, fields=None): self.flush_cached_instance(self) super(IdMapModel, self).refresh_from_db(using, fields) self.cache_instance(self) # DJANGO-IDMAP METHODS @classmethod def _get_cache_key(cls, args, kwargs): """ This method is used by the caching subsystem to infer the PK value from the constructor arguments. It is used to decide if an instance has to be built or is already in the cache. """ result = None # Quick hack for my composites work for now. if hasattr(cls._meta, 'pks'): pk = cls._meta.pks[0] else: pk = cls._meta.pk pk_position = getattr(cls._meta, 'pk_pos', None) if pk_position is None: # the pk position could not be extracted from _meta # calculate it ... pk_position = cls._meta.fields.index(pk) # ... and store it setattr(cls._meta, 'pk_pos', pk_position) if len(args) > pk_position: # if it's in the args, we can get it easily by index result = args[pk_position] elif pk.attname in kwargs: # retrieve the pk value. Note that we use attname instead of name, # to handle the case where the pk is a ForeignKey. result = kwargs[pk.attname] elif pk.name != pk.attname and pk.name in kwargs: # ok we couldn't find the value, but maybe it's a FK and we can # find the corresponding object instead result = kwargs[pk.name] if result is not None and isinstance(result, models.Model): # if the pk value happens to be a model instance (which can # happen with a FK), we'd rather use its own pk as the key result = result._get_pk_val() return result @classmethod def get_cached_instance(cls, pk, db=None): """ Method to retrieve a cached instance by pk value and db. Returns None when not found (which will always be the case when caching is disabled for this class). Please note that the lookup will be done even when instance caching is disabled. """ return tls.get_cached_instance(cls, pk, db) @classmethod def cache_instance(cls, instance): """ Method to store an instance in the cache. """ pk = instance._get_pk_val() if pk is not None: tls.cache_instance(cls, instance) @classmethod def flush_cached_instance(cls, instance): """ Method to flush an instance from the cache. The instance will always be flushed from the cache, since this is most likely called from delete(), and we want to make sure we don't cache dead objects. """ tls.flush_cached_instance(cls, instance) @classmethod def flush_instance_cache(cls, db=None, flush_sub=False): tls.get_cache(cls, flush=db) if flush_sub: for s in cls.__subclasses__(): s.flush_instance_cache(db, flush_sub) def save(self, *args, **kwargs): """ Caches the instance on save """ super(IdMapModel, self).save(*args, **kwargs) self.__class__.cache_instance(self)
33.799043
80
0.58876
895
7,064
4.515084
0.261453
0.01559
0.012373
0.013363
0.116555
0.080673
0.015838
0
0
0
0
0.00235
0.337344
7,064
208
81
33.961538
0.860927
0.307475
0
0.230769
0
0
0.020978
0
0
0
0
0
0
1
0.08547
false
0.025641
0.051282
0
0.230769
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
19bd0b651a92c3989a6dcd3e14655ea86b1f4a83
2,501
py
Python
pyrfu/pyrf/ts_skymap.py
ablotekar/irfu-python
740cb51ca9ce2ab0d62cb6fef3a7a722d430d79e
[ "MIT" ]
2
2020-11-27T11:35:42.000Z
2021-07-17T11:08:10.000Z
pyrfu/pyrf/ts_skymap.py
ablotekar/irfu-python
740cb51ca9ce2ab0d62cb6fef3a7a722d430d79e
[ "MIT" ]
1
2021-12-04T07:55:48.000Z
2021-12-10T12:45:27.000Z
pyrfu/pyrf/ts_skymap.py
ablotekar/irfu-python
740cb51ca9ce2ab0d62cb6fef3a7a722d430d79e
[ "MIT" ]
2
2021-07-17T11:08:12.000Z
2021-07-18T18:41:42.000Z
#!/usr/bin/env python # -*- coding: utf-8 -*- # 3rd party imports import numpy as np import xarray as xr __author__ = "Louis Richard" __email__ = "louisr@irfu.se" __copyright__ = "Copyright 2020-2021" __license__ = "MIT" __version__ = "2.3.7" __status__ = "Prototype" def ts_skymap(time, data, energy, phi, theta, **kwargs): r"""Creates a skymap of the distribution function. Parameters ---------- time : ndarray List of times. data : ndarray Values of the distribution function. energy : ndarray Energy levels. phi : ndarray Azimuthal angles. theta : ndarray Elevation angles. Other Parameters --------------- **kwargs Hash table of keyword arguments with : * energy0 : ndarray Energy table 0 (odd time indices). * energy1 : ndarray Energy table 1 (even time indices). * esteptable : ndarray Time series of the stepping table between energies (burst). Returns ------- out : xarray.Dataset Skymap of the distribution function. """ energy0, energy1, esteptable = [None] * 3 energy0_ok, energy1_ok, esteptable_ok = [False] * 3 if energy is None: if "energy0" in kwargs: energy0, energy0_ok = [kwargs["energy0"], True] if "energy1" in kwargs: energy1, energy1_ok = [kwargs["energy1"], True] if "esteptable" in kwargs: esteptable, esteptable_ok = [kwargs["esteptable"], True] if not energy0_ok and not energy1_ok and not esteptable_ok: raise ValueError("Energy input required") energy = np.tile(energy0, (len(esteptable), 1)) energy[esteptable == 1] = np.tile(energy1, (int(np.sum(esteptable)), 1)) if phi.ndim == 1: phi = np.tile(phi, (len(time), 1)) out_dict = {"data": (["time", "idx0", "idx1", "idx2"], data), "phi": (["time", "idx1"], phi), "theta": (["idx2"], theta), "energy": (["time", "idx0"], energy), "time": time, "idx0": np.arange(energy.shape[1]), "idx1": np.arange(phi.shape[1]), "idx2": np.arange(len(theta))} out = xr.Dataset(out_dict) if energy0_ok: out.attrs["energy0"] = energy0 if energy1_ok: out.attrs["energy1"] = energy1 if energy0_ok: out.attrs["esteptable"] = esteptable return out
26.892473
79
0.562575
283
2,501
4.833922
0.378092
0.032895
0.037281
0.054825
0.073099
0
0
0
0
0
0
0.033353
0.304678
2,501
92
80
27.184783
0.753307
0.29988
0
0.051282
0
0
0.138396
0
0
0
0
0
0
1
0.025641
false
0
0.051282
0
0.102564
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
19be0f2de874f8b441c89b5d8fd8cac69393789a
2,037
py
Python
src/log_utils.py
alexklwong/calibrated-backprojection-network
57dbec03c6da94ee0cd020b6de5f02e7e8ee726e
[ "Intel" ]
38
2021-08-28T06:01:25.000Z
2022-03-03T03:23:23.000Z
src/log_utils.py
alexklwong/calibrated-backprojection-network
57dbec03c6da94ee0cd020b6de5f02e7e8ee726e
[ "Intel" ]
14
2021-11-15T12:30:34.000Z
2022-03-30T14:03:16.000Z
src/log_utils.py
alexklwong/calibrated-backprojection-network
57dbec03c6da94ee0cd020b6de5f02e7e8ee726e
[ "Intel" ]
9
2021-10-19T23:45:07.000Z
2021-12-20T07:45:37.000Z
''' Author: Alex Wong <alexw@cs.ucla.edu> If you use this code, please cite the following paper: A. Wong, and S. Soatto. Unsupervised Depth Completion with Calibrated Backprojection Layers. https://arxiv.org/pdf/2108.10531.pdf @inproceedings{wong2021unsupervised, title={Unsupervised Depth Completion with Calibrated Backprojection Layers}, author={Wong, Alex and Soatto, Stefano}, booktitle={Proceedings of the IEEE/CVF International Conference on Computer Vision}, pages={12747--12756}, year={2021} } ''' import os import torch import numpy as np from matplotlib import pyplot as plt def log(s, filepath=None, to_console=True): ''' Logs a string to either file or console Arg(s): s : str string to log filepath output filepath for logging to_console : bool log to console ''' if to_console: print(s) if filepath is not None: if not os.path.isdir(os.path.dirname(filepath)): os.makedirs(os.path.dirname(filepath)) with open(filepath, 'w+') as o: o.write(s + '\n') else: with open(filepath, 'a+') as o: o.write(s + '\n') def colorize(T, colormap='magma'): ''' Colorizes a 1-channel tensor with matplotlib colormaps Arg(s): T : torch.Tensor[float32] 1-channel tensor colormap : str matplotlib colormap ''' cm = plt.cm.get_cmap(colormap) shape = T.shape # Convert to numpy array and transpose if shape[0] > 1: T = np.squeeze(np.transpose(T.cpu().numpy(), (0, 2, 3, 1))) else: T = np.squeeze(np.transpose(T.cpu().numpy(), (0, 2, 3, 1)), axis=-1) # Colorize using colormap and transpose back color = np.concatenate([ np.expand_dims(cm(T[n, ...])[..., 0:3], 0) for n in range(T.shape[0])], axis=0) color = np.transpose(color, (0, 3, 1, 2)) # Convert back to tensor return torch.from_numpy(color.astype(np.float32))
26.802632
92
0.60972
281
2,037
4.398577
0.451957
0.029126
0.043689
0.050162
0.171521
0.171521
0.153722
0.055016
0.055016
0.055016
0
0.035523
0.26755
2,037
75
93
27.16
0.792895
0.470299
0
0.148148
0
0
0.013118
0
0
0
0
0
0
1
0.074074
false
0
0.148148
0
0.259259
0.037037
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
19c214d222aa500c556609e883b1ff02ba286869
788
py
Python
add-two-numbers/add-two-numbers.py
shaurya-src/code-leet
f642b81eb7bead46c66404bd48ca74bdfeb2abbb
[ "MIT" ]
null
null
null
add-two-numbers/add-two-numbers.py
shaurya-src/code-leet
f642b81eb7bead46c66404bd48ca74bdfeb2abbb
[ "MIT" ]
null
null
null
add-two-numbers/add-two-numbers.py
shaurya-src/code-leet
f642b81eb7bead46c66404bd48ca74bdfeb2abbb
[ "MIT" ]
null
null
null
# Definition for singly-linked list. # class ListNode: # def __init__(self, val=0, next=None): # self.val = val # self.next = next class Solution: def addTwoNumbers(self, l1: Optional[ListNode], l2: Optional[ListNode]) -> Optional[ListNode]: a = self.get_num(l1) b = self.get_num(l2) total = str(a+b)[::-1] res = ListNode(total[0]) itr = res for i in range(1, len(total)): curr = ListNode(total[i]) itr.next = curr itr = itr.next return res def get_num(self, ll): if not ll: return 0 num = "" curr = ll while curr: num += str(curr.val) curr = curr.next return int(num[::-1])
28.142857
98
0.497462
99
788
3.888889
0.393939
0.124675
0.051948
0
0
0
0
0
0
0
0
0.02045
0.379442
788
28
99
28.142857
0.766871
0.177665
0
0
0
0
0
0
0
0
0
0
0
1
0.095238
false
0
0
0
0.285714
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
19c251bd8c7eb79b25c470c6951dca0f932a8918
2,834
py
Python
likedtweets.py
PoliTwit1984/Politwitverse
837dd2d05b3977aa24a70f52a3b951ef22c51dc6
[ "MIT" ]
3
2022-01-05T07:12:14.000Z
2022-02-19T20:58:25.000Z
likedtweets.py
PoliTwit1984/Politwitverse
837dd2d05b3977aa24a70f52a3b951ef22c51dc6
[ "MIT" ]
25
2022-01-05T08:23:59.000Z
2022-02-07T01:25:39.000Z
likedtweets.py
PoliTwit1984/Politwitverse
837dd2d05b3977aa24a70f52a3b951ef22c51dc6
[ "MIT" ]
1
2022-02-01T22:39:57.000Z
2022-02-01T22:39:57.000Z
import time import re import tweepy import preprocessor as p import config import string consumer_key = config.consumer_key consumer_secret = config.consumer_secret access_token = config.access_token access_token_secret = config.access_token_secret bearer_token = config.bearer_token username = config.username password = config.password def clean_text(text): """ Function to clean the text. Parameters: text: the raw text as a string value that needs to be cleaned Returns: cleaned_text: the cleaned text as string """ # convert to lower case cleaned_text = text.lower() # remove HTML tags html_pattern = re.compile('<.*?>') cleaned_text = re.sub(html_pattern, '', cleaned_text) # remove punctuations cleaned_text = cleaned_text.translate( str.maketrans('', '', string.punctuation)) return cleaned_text.strip() def remove_whitespace(text): return " ".join(text.split()) def clean_tweets(tweet_text): # URL p.OPT.URL # Mention p.OPT.MENTION # Hashtag p.OPT.HASHTAG # Reserved Words p.OPT.RESERVED # Emoji p.OPT.EMOJI # Smiley p.OPT.SMILEY # Number p.OPT.NUMBER p.set_options(p.OPT.URL, p.OPT.MENTION, p.OPT.EMOJI, p.OPT.SMILEY) clean_tweet_text = p.clean(tweet_text) clean_tweet_text = remove_whitespace(clean_tweet_text) clean_tweet_text = clean_tweet_text.replace('&amp', "") return(clean_tweet_text) def makeitastring(wannabestring): convertedstring = ','.join(map(str, wannabestring)) return(convertedstring) client = tweepy.Client(bearer_token=bearer_token) list_id = "1467207384011526144" # all missouri legislators response = client.get_list_members(list_id, max_results = 100) users = response.data metadata = response.meta next_token = metadata.get("next_token") print(next_token) while next_token is not None: for user in users: string = str(user.name)+","+str(user.id)+","+str(user.username)+"\n" with open('moleglistmembership.txt', 'a') as f: f.write(string) response = client.get_list_members(list_id, pagination_token = next_token, max_results = 100) users = response.data metadata = response.meta next_token = metadata.get("next_token") print(next_token) # tweet_text = tweet.text # tweet_clean_text = clean_tweets(tweet.text) # tweet_created_at = tweet.created_at # tweet_clean_text = clean_text(tweet_clean_text) # print(tweet_clean_text) # print('\n') # print(tweet_created_at) # print('\n') # print('-----------------------------------------------------------------') # with open('molegmembership.txt', 'a') as f: # f.write(tweet_clean_text) # f.write('\n') # response = client.get_list_tweets(list_id, max_results=100)
26.485981
97
0.677135
371
2,834
4.951482
0.291105
0.053892
0.053348
0.031029
0.200871
0.190528
0.138269
0.101252
0.101252
0.101252
0
0.012232
0.192308
2,834
106
98
26.735849
0.790301
0.314397
0
0.166667
0
0
0.041335
0.012189
0
0
0
0
0
1
0.083333
false
0.020833
0.125
0.020833
0.25
0.041667
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
19c43d42b7108f348940b9fd8fc9fb33a8830e2c
2,112
py
Python
audclass.py
theunafraid/audiofeedback-prevention
0dd3e8ab7b5a65aff214e74b7bd7869366b1b7b5
[ "Apache-2.0" ]
1
2022-01-20T08:30:20.000Z
2022-01-20T08:30:20.000Z
audclass.py
theunafraid/audiofeedback-prevention
0dd3e8ab7b5a65aff214e74b7bd7869366b1b7b5
[ "Apache-2.0" ]
null
null
null
audclass.py
theunafraid/audiofeedback-prevention
0dd3e8ab7b5a65aff214e74b7bd7869366b1b7b5
[ "Apache-2.0" ]
null
null
null
import tensorflow as tf import numpy as np from tensorflow.python.ops.gen_batch_ops import batch from model import AudioClass from qrnn import QRNN from numpy.random import seed from numpy.random import randn from random import randint from lstmfcn import LSTM_FCN import librosa import os def getData(): outX = [] outY = [] for i in range(10): values = randn(16000) outX.append(np.array(values)) pos = randint(0, 2) outY1=np.zeros(3) outY1[pos] = 1.0 outY.append(outY1) outX = np.array(outX) return outX, np.array(outY) def readFileData(dir, filename): class_id = (filename.split('-')[1]).split('.')[0] # print("found class : ", class_id, flush=True) filepath = dir + '/'+filename data, sample_rate = librosa.load(filepath,sr=16000) # a = np.vstack(data) # print(a.shape) return np.vstack(data), int(class_id) def getDataFromFolder(folder): outX = [] outY = [] files = os.listdir(folder) print("files : ", files) for file in files: if os.path.isfile(folder + "/" +file): data, classid = readFileData(folder, file) # print("data ", data) # print("classid ", classid) outX.append(np.asarray(data).astype(np.float32))#np.array(data)) # pos = randint(0, 2) outY1=np.zeros(3) outY1[classid] = 1.0 outY.append(outY1) #print(outX, flush=True) outX = np.asarray(outX).astype(np.float32) #np.array(outX, dtype="object") return outX, np.array(outY) def main(): try: model = QRNN(16000, 5120) #16000)#AudioClass(3) model.printmodel() # return X, Y = getDataFromFolder("./audio/ds_0.3s/300ms_additional/") #print(Y.shape) #print(X.shape) #print(Y) #print(X) # return epochs = 350 batch = 8 model.train(X, Y, epochs, batch) print("save model...", flush=True) model.save("./qrnn.h5") except Exception as ex: print(ex) if __name__ == "__main__": main()
26.734177
78
0.588542
274
2,112
4.474453
0.361314
0.034258
0.026917
0.034258
0.151713
0.088091
0.04894
0.04894
0.04894
0
0
0.038462
0.273674
2,112
78
79
27.076923
0.760756
0.138731
0
0.178571
0
0
0.041644
0.018323
0
0
0
0
0
1
0.071429
false
0
0.196429
0
0.321429
0.071429
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
19c79aebe6cccec71cf534b0497f44d1a8496883
4,127
py
Python
python_implementation/matriz/quadrada.py
SousaPedro11/algoritmos
86a3601912778d120b9ec8094267c26a7eb6d153
[ "MIT" ]
null
null
null
python_implementation/matriz/quadrada.py
SousaPedro11/algoritmos
86a3601912778d120b9ec8094267c26a7eb6d153
[ "MIT" ]
null
null
null
python_implementation/matriz/quadrada.py
SousaPedro11/algoritmos
86a3601912778d120b9ec8094267c26a7eb6d153
[ "MIT" ]
null
null
null
import math from typing import List, Tuple def __cria_matriz_quadrada(tamanho: int = 20) -> List[List[str]]: matriz = [] for _ in range(tamanho): linha = ['0' for _ in range(tamanho)] matriz.append(linha) return matriz def __diagonais(matriz: List[List[str]]) -> Tuple[list, list]: tamanho = len(matriz) diagonal_principal = [] diagonal_secundaria = [] top, bottom, right, left = 'B', 'A', 'Y', 'X' if tamanho >= 20: ponto_medio = math.ceil(tamanho / 2) diagonal_principal = [j for j in range(tamanho)] diagonal_secundaria = [j for j in range(tamanho)[::-1]] for i, j in enumerate(diagonal_secundaria): matriz[i][j] = right if (i < ponto_medio) else left for i, j in enumerate(diagonal_principal): matriz[i][j] = top if (j < ponto_medio) else bottom return diagonal_principal, diagonal_secundaria def __quadrantes(matriz: List[List[str]], diagonal_p: list, diagonal_s: list) -> None: tamanho = len(matriz) if tamanho >= 20: for i in range(tamanho): elemento_dp = diagonal_p[i] elemento_ds = diagonal_s[i] for j in range(tamanho): if elemento_dp < j < elemento_ds: matriz[i][j] = 'B' elif elemento_ds < j < elemento_dp: matriz[i][j] = 'A' elif j < elemento_dp and j < elemento_ds: matriz[i][j] = 'X' elif j > elemento_dp and j > elemento_ds: matriz[i][j] = 'Y' def __imprime_matriz(matriz: List[List[str]]) -> None: try: print(f'Matriz de tamanho: {len(matriz)}') for linha in matriz: print(' '.join(linha)) print('\n') except ValueError as e: print(e) def __define_tamanho(msg: str) -> int: while True: try: tamanho = int(input(f'{msg}: ')) break except ValueError: print('O valor informado não é um inteiro!') return tamanho def __define_matriz_maior() -> List[List[str]]: print('MATRIZ MAIOR') tamanho = __define_tamanho( msg='Defina a ordem de uma matriz quadrada (inteiro maior ou igual a 20)', ) while tamanho < 20: print('Valor informado menor que 20!') tamanho = __define_tamanho( msg='Defina a ordem de uma matriz quadrada (inteiro maior ou igual a 20)', ) matriz = __cria_matriz_quadrada(tamanho) diagonal_principal, diagonal_secundaria = __diagonais(matriz) __quadrantes(matriz, diagonal_principal, diagonal_secundaria) __imprime_matriz(matriz) return matriz def __define_matriz_menor(len_matriz_maior: int) -> List[List[str]]: print('MATRIZ MENOR') tamanho = __define_tamanho( msg=f'Defina a ordem de uma matriz quadrada (inteiro menor que {len_matriz_maior})', ) while tamanho >= len_matriz_maior: print(f'Valor informado maior que {len_matriz_maior}!') tamanho = __define_tamanho( msg=f'Defina a ordem de uma matriz quadrada (inteiro menor que {len_matriz_maior})', ) matriz = __cria_matriz_quadrada(tamanho) __imprime_matriz(matriz) return matriz def __gera_matriz_concentrica(matriz_maior: List[List[str]], matriz_menor: List[List[str]]) -> None: if len(matriz_menor) > len(matriz_maior): raise ValueError('Matriz menor declarada no local errado!') print('MATRIZ CONCENTRICA') maior = matriz_maior.copy() menor = matriz_menor.copy() ponto_medio_maior = math.ceil(len(maior) / 2) ponto_medio_menor = math.ceil(len(menor) / 2) diferenca = ponto_medio_maior - ponto_medio_menor for i, linha in enumerate(menor): for j, coluna in enumerate(linha): maior[i + diferenca][j + diferenca] = coluna __imprime_matriz(maior) def solucao_problema(): matriz_maior = __define_matriz_maior() matriz_menor = __define_matriz_menor(len(matriz_maior)) __gera_matriz_concentrica(matriz_maior, matriz_menor) if __name__ == '__main__': solucao_problema()
33.282258
100
0.628544
525
4,127
4.672381
0.188571
0.067265
0.035874
0.057073
0.392173
0.265389
0.16062
0.16062
0.16062
0.16062
0
0.006262
0.264841
4,127
123
101
33.552846
0.802241
0
0
0.21
0
0
0.129876
0
0
0
0
0
0
1
0.09
false
0
0.02
0
0.16
0.1
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
19c9e0f683fb12bcf45633873b78ecba612bb09f
7,399
py
Python
theseus/util/serialize.py
shiplift/theseus
9324d67e6e0c6b93a7734a5531838c5a909a1424
[ "0BSD" ]
null
null
null
theseus/util/serialize.py
shiplift/theseus
9324d67e6e0c6b93a7734a5531838c5a909a1424
[ "0BSD" ]
null
null
null
theseus/util/serialize.py
shiplift/theseus
9324d67e6e0c6b93a7734a5531838c5a909a1424
[ "0BSD" ]
null
null
null
#!/usr/bin/env python # -*- coding: utf-8 -*- """ serialize provide means to persist and recreate the currently known set of W_Tags and all shapes and transformations reachable from there. The rmarshal modules is used for serialization; the format is marshal_proto = ( int, # number of shapes [ # shape list ( # a shape int, # id (str, int), # tag [int], # structure: list of id's { # _hist (int, int) : # index, id int # count }, { # transformation_rules (int, int) : # index, id int # id } ) ], { (str, int) : # name arity int #id } ) The serialized tree is written to a '.docked' files """ import os.path from rpython.rlib.streamio import open_file_as_stream from rpython.rlib.rmarshal import get_marshaller, get_unmarshaller from rpython.rlib.debug import debug_start, debug_stop, debug_print from theseus.model import W_Tag from theseus.shape import in_storage_shape, CompoundShape marshal_proto = ( int, # number of shapes [ # shape list ( # a shape int, # id (str, int), # tag [int], # structure: list of id's { # _hist (int, int) : # index, id int # count }, { # transformation_rules (int, int) : # index, id int # id } ) ], { (str, int) : # name arity int #id } ) marshaller = get_marshaller(marshal_proto) unmarshaller = get_unmarshaller(marshal_proto) def punch_shape(s, registry): """ Punch a shape to a tuple for marshalling. See slurp_shapes, configure_shapes for inverse. Format is ( # a shape int, # id (str, int), # tag [int], # structure: list of id's { # _hist (int, int) : # index, id int # count }, { # transformation_rules (int, int) : # index, id int # id } ) """ if s == in_storage_shape: return (0, ('', 0), [], {}, {}) else: assert isinstance(s, CompoundShape) my_index = registry.index(s) hist = {} for (index, shape), count in s._hist.items(): shape_id = registry.index(shape) hist[(index, shape_id)] = count trans = {} for (index, shape), to_shape in s.transformation_rules.items(): shape_id = registry.index(shape) to_shape_id = registry.index(to_shape) trans[(index, registry.index(shape))] = registry.index(to_shape) punchee = ( registry.index(s), (s._tag.name, s._tag.arity()), [registry.index(subshape) for subshape in s._structure], hist, trans ) return punchee def recreate_shape(shape_desc, tags, registry): """ Recreate a shape from its punched format; see punch_shape. Does not handle history and transformations. See configure_shape(s). """ id, tag, structure_ids = shape_desc structure = [None] * len(structure_ids) for structure_index, sub_id in enumerate(structure_ids): assert sub_id < id subshape = registry[sub_id] assert subshape is not None structure[structure_index] = subshape return CompoundShape(tags[tag], structure) def configure_shape(shape, hist, trans, registry): """ Reconfigure a shape from its punched format; see punch_shape. Does _only_ handle history and transformations. See configure_shapes. """ assert isinstance(shape, CompoundShape) shape._hist = {} for (index, s_id), count in hist.items(): k = (index, registry[s_id]) shape._hist[k] = count shape.transformation_rules = {} for (index, s_id), to_s_id in trans.items(): k = (index, registry[s_id]) shape.transformation_rules[k] = registry[to_s_id] def configure_shapes(shapes, registry): """ Reconfigure all shapes. Does _only_ handle history and transformations. See configure_shapes. """ for id, _tag, _structure_ids, hist, trans in shapes: if id == 0: continue # in_storage_shape, no configure configure_shape(registry[id], hist, trans, registry) def slurp_registry(shapes, registry, tags_slurp, tags): """ Slurp all shapes from their punched format (see punch_shape) not including history or transformation """ known_ids = [0] for default_id in tags_slurp.values(): known_ids.append(default_id) for id, tag, structure_ids, _hist, _trans in shapes: if id in known_ids: continue assert registry[id] is None registry[id] = recreate_shape((id, tag, structure_ids), tags, registry) def punch_tags(tags): """ Punch all tags into marshallable format: ( int, # number of shapes [ # shape list ], { (str, int) : # name arity int #id } ) """ reg = [in_storage_shape] + CompoundShape._shapes punch_reg = [punch_shape(s, reg) for s in reg] res = {} for key, value in tags.items(): res[key] = reg.index(value.default_shape) return (len(punch_reg), punch_reg, res) def slurp_tags(un_tags): """ Slurp all tags from their punched format (see punch_tag). Recursively slurps shapes and then configures them. """ num_shapes, shapes_slurp, tags_slurp = un_tags registry = [None] * num_shapes registry[0] = in_storage_shape tags = {} for (name, arity), default_id in tags_slurp.items(): tag = W_Tag(name, arity) tags[(name, arity)] = tag registry[default_id] = tag.default_shape slurp_registry(shapes_slurp, registry, tags_slurp, tags) configure_shapes(shapes_slurp, registry) return tags def come_up(basename): """ Bring up previously marshalled Tags, shapes and transformations from '.docked' file un-marshalling, slurping and replacement of current Tags. """ from theseus.shape import CompoundShape # later # from os import stat # statres = stat(path) debug_start("theseus-come-up") path = basename + '.docked' if not os.path.exists(path): return try: f = open_file_as_stream(path, buffering=0) except OSError as e: os.write(2, "Error(come_up)%s -- %s\n" % (os.strerror(e.errno), path)) return try: res = unmarshaller(f.readall()) finally: f.close() del CompoundShape._shapes[:] W_Tag.tags.clear() new_tags = slurp_tags(res) for key, value in new_tags.items(): W_Tag.tags[key] = value debug_stop("theseus-come-up") def settle(basename): """ Settle Tags, shapes and transformations to a '.docked' file punching and marshalling all current Tags. """ debug_start("theseus-settle") path = basename + '.docked' buf = [] marshaller(buf, punch_tags(W_Tag.tags)) try: f = open_file_as_stream(path, mode="w", buffering=0) except OSError as e: os.write(2, "Error(settle)%s -- %s\n" % (os.strerror(e.errno), path)) return try: f.write(''.join(buf)) finally: f.close() debug_stop("theseus-settle")
27.403704
79
0.592783
911
7,399
4.654226
0.188804
0.010613
0.015566
0.018396
0.290802
0.272642
0.223349
0.199292
0.199292
0.173349
0
0.001932
0.300446
7,399
269
80
27.505576
0.817233
0.320043
0
0.18705
0
0
0.025386
0
0
0
0
0
0.035971
1
0.064748
false
0
0.05036
0
0.172662
0.007194
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
19cc7f391c49230cd25af4f7949e261ca27ffe2b
1,359
py
Python
external_scripts/run2.py
AAS97/tokenizRE
0186a2b533edaa0045b16b0b111b9637248e5046
[ "MIT" ]
null
null
null
external_scripts/run2.py
AAS97/tokenizRE
0186a2b533edaa0045b16b0b111b9637248e5046
[ "MIT" ]
null
null
null
external_scripts/run2.py
AAS97/tokenizRE
0186a2b533edaa0045b16b0b111b9637248e5046
[ "MIT" ]
null
null
null
from web3 import Web3, HTTPProvider import json import os w3 = Web3(HTTPProvider("http://127.0.0.1:7545", request_kwargs={'timeout': 60})) print(f"Web3 is connected : {w3.isConnected()}") accounts = w3.eth.accounts # ------------------------------- get contract ------------------------------- # abi_path = "./vapp/src/contracts/" with open(os.path.join(abi_path, 'TokenHolderPayer.json'), "r") as file: property_contract_compiled = json.load(file) property_contract_abi = property_contract_compiled['abi'] contract_address = "0xE5972821D1218120C4E98986A3eEc997931690b4" property_contract = w3.eth.contract(address=contract_address, abi=property_contract_abi) # ------------------- buy some token from realestate agent ------------------- # amount = 500 # Allow token to be sent property_contract.functions.increaseAllowance(accounts[1], amount).transact({'from':accounts[0], 'gas': 420000, 'gasPrice': 21000}) balance = property_contract.functions.balanceOf(accounts[1]).call() print(f"initial balance {balance}") tx_hash = property_contract.functions.transferFrom(accounts[0], accounts[1], 500).transact({'from':accounts[1], 'gas': 420000, 'gasPrice': 21000}) receipt = w3.eth.waitForTransactionReceipt(tx_hash) balance = property_contract.functions.balanceOf(accounts[1]).call() print(f"final balance {balance}")
37.75
146
0.693893
159
1,359
5.805031
0.440252
0.156013
0.108342
0.047671
0.130011
0.130011
0.130011
0.130011
0.130011
0.130011
0
0.072309
0.104489
1,359
35
147
38.828571
0.686113
0.130979
0
0.095238
0
0
0.197783
0.071611
0
0
0.035806
0
0
1
0
false
0
0.142857
0
0.142857
0.142857
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
19d4df790639614b567c8829dbce219210c26642
585
py
Python
src/weekly-reset.py
SlimeeGameS/VirginityBot
a1745893f21a16112bbf775fb2aff199c14dbbbb
[ "CC0-1.0" ]
null
null
null
src/weekly-reset.py
SlimeeGameS/VirginityBot
a1745893f21a16112bbf775fb2aff199c14dbbbb
[ "CC0-1.0" ]
14
2020-03-26T01:02:31.000Z
2021-03-24T23:48:44.000Z
src/weekly-reset.py
SlimeeGameS/VirginityBot
a1745893f21a16112bbf775fb2aff199c14dbbbb
[ "CC0-1.0" ]
2
2020-08-09T19:08:41.000Z
2021-05-12T17:44:28.000Z
import os import asyncio import logging from pony.orm import * import logger from database import start_orm, get_biggest_virgin, Guild, Virgin logger = logging.getLogger('virginity-bot') async def reset_weekly_virginity(): with db_session: virgins = Virgin.select() for virgin in virgins: virgin.total_vc_time = 0 virgin.virginity_score = 0 commit() async def main(): logger.info('Running weekly reset') start_orm() await reset_weekly_virginity() if __name__ == '__main__': loop = asyncio.get_event_loop() loop.run_until_complete(main())
18.870968
65
0.729915
79
585
5.101266
0.56962
0.039702
0.099256
0
0
0
0
0
0
0
0
0.004167
0.179487
585
30
66
19.5
0.835417
0
0
0
0
0
0.070085
0
0
0
0
0
0
1
0
false
0
0.285714
0
0.285714
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
19d525875da360fb20fb2929a08fff78176398d0
1,165
py
Python
hardhat/recipes/racket.py
stangelandcl/hardhat
1ad0c5dec16728c0243023acb9594f435ef18f9c
[ "MIT" ]
null
null
null
hardhat/recipes/racket.py
stangelandcl/hardhat
1ad0c5dec16728c0243023acb9594f435ef18f9c
[ "MIT" ]
null
null
null
hardhat/recipes/racket.py
stangelandcl/hardhat
1ad0c5dec16728c0243023acb9594f435ef18f9c
[ "MIT" ]
null
null
null
import os import shutil from .base import GnuRecipe class RacketRecipe(GnuRecipe): def __init__(self, *args, **kwargs): super(RacketRecipe, self).__init__(*args, **kwargs) self.sha256 = 'bf2bce50b02c626666a8d2093638893e' \ '8beb8b2a19cdd43efa151a686c88edcf' self.depends = ['libffi'] self.name = 'racket' self.version = '6.6' self.url = 'http://mirror.racket-lang.org/installers/$version/' \ 'racket-$version-src.tgz' self.configure_args = self.shell_args + [ '../src/configure', '--prefix=%s' % self.prefix_dir] # -O3 generates SIGSEGVs self.environment['CFLAGS'] = '-O2' self.environment['CXXFLAGS'] = '-O2' def patch(self): self.directory = os.path.join(self.directory, 'build') os.makedirs(self.directory) def clean(self): super(RacketRecipe, self).clean() dirs = ['include', 'etc', 'share/doc', 'share', 'lib'] for dir in dirs: d = os.path.join(self.prefix_dir, dir, 'racket') if os.path.exists(d): shutil.rmtree(d)
31.486486
73
0.572532
123
1,165
5.325203
0.504065
0.059542
0.064122
0.042748
0
0
0
0
0
0
0
0.053828
0.282403
1,165
36
74
32.361111
0.729665
0.018884
0
0
0
0
0.207713
0.076249
0
0
0
0
0
1
0.107143
false
0
0.107143
0
0.25
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
19d5619a8ce652fe7933c1843f9585227eb325de
3,257
py
Python
lichess-gist.py
swimmy4days/lichess-gist
b70e605345f789e032291253df506384ccbaa270
[ "MIT" ]
null
null
null
lichess-gist.py
swimmy4days/lichess-gist
b70e605345f789e032291253df506384ccbaa270
[ "MIT" ]
null
null
null
lichess-gist.py
swimmy4days/lichess-gist
b70e605345f789e032291253df506384ccbaa270
[ "MIT" ]
null
null
null
import os import sys import berserk from github import Github, InputFileContent, Gist SEPARATOR = "." PADDING = {"puzzle": 0, "crazyhouse": 0, "chess960": 0, "kingOfTheHill": 0, "threeCheck": 2, "antichess": 0, "atomic": 0, "horde": 0, "racingKings": 0, "ultraBullet": 0, "blitz": 1, "classical": 1, "rapid": 0, "bullet": 0, "correspondence": 3} emojis = {"puzzle": "🧩", "crazyhouse": "🤪", "chess960": "9️⃣6️⃣0️⃣", "kingOfTheHill": "👑", "threeCheck": "3️⃣", "antichess": "", "atomic": "⚛", "horde": "🐎", "racingKings": "🏁", "ultraBullet": "🚅", "blitz": "⚡", "classical": "🏛", "rapid": "⏰", "bullet": "🚂", "correspondence": "🤼‍♂️"} ENV_VAR_GIST_ID = "GIST_ID" ENV_VAR_GITHUB_TOKEN = "GH_TOKEN" ENV_VAR_LICHESS_USERNAME = "LICHESS_USERNAME" REQUIRED_ENVS = [ ENV_VAR_GIST_ID, ENV_VAR_GITHUB_TOKEN, ENV_VAR_LICHESS_USERNAME ] def check_vars() -> bool: env_vars_absent = [ env for env in REQUIRED_ENVS if env not in os.environ or len(os.environ[env]) == 0 ] if env_vars_absent: print( f"Please define {env_vars_absent} in your github secrets. Aborting...") return False return True def init() -> tuple: gh_gist = Github(ENV_VAR_GITHUB_TOKEN).get_gist(ENV_VAR_GIST_ID) lichess_acc = berserk.Client().users.get_public_data(ENV_VAR_LICHESS_USERNAME) return (gh_gist, lichess_acc) def get_rating(acc: dict) -> list: ratings = [] for key in acc['perfs'].keys(): prov = '?' try: acc['perfs'][key]['prov'] except KeyError: prov = "" ratings.append((key, acc['perfs'][key]['rating'], prov, acc['perfs'][key]['games'])) ratings.sort(key=lambda k: k[1], reverse=True) return ratings def fromated_line(variant: str, games: str, rating_prov: str, max_line_length: int) -> str: separation = max_line_length - ( len(variant) + len(games) + len(rating_prov) + 4 # emojis and brackets ) separator = f" {SEPARATOR * separation} " return variant + f"({games})" + separator + rating_prov def update_gist(gist: Gist, text: str) -> bool: gist.edit(description="", files={list(gist.files.keys())[0]: InputFileContent(content=text)}) def main(): if not check_vars(): return global ENV_VAR_GIST_ID, ENV_VAR_GITHUB_TOKEN, ENV_VAR_LICHESS_USERNAME ENV_VAR_GIST_ID = os.environ[ENV_VAR_GIST_ID] ENV_VAR_GITHUB_TOKEN = os.environ[ENV_VAR_GITHUB_TOKEN] ENV_VAR_LICHESS_USERNAME = os.environ[ENV_VAR_LICHESS_USERNAME] gist, lichess_acc = init() rating = get_rating(lichess_acc) content = [fromated_line((emojis[line[0]] + line[0]), str(line[3]), str(line[1]) + line[2] + " 📈", 52 + PADDING[line[0]]) for line in rating] print("\n".join(content)) update_gist(gist, "\n".join(content)) if __name__ == "__main__": # test with python lichess-gist.py test <gist> <github-token> <user> if len(sys.argv) > 1: os.environ[ENV_VAR_GIST_ID] = sys.argv[2] os.environ[ENV_VAR_GITHUB_TOKEN] = sys.argv[3] os.environ[ENV_VAR_LICHESS_USERNAME] = sys.argv[4] main() # %%
31.317308
118
0.612834
437
3,257
4.379863
0.299771
0.065831
0.036573
0.043887
0.178161
0.164577
0.087252
0.087252
0.052247
0.052247
0
0.016315
0.228431
3,257
103
119
31.621359
0.735774
0.027326
0
0
0
0
0.147598
0
0
0
0
0
0
1
0.081081
false
0
0.054054
0
0.216216
0.027027
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
19d5e02630a84a1866bbfe9f9deb571cc98a96cc
951
py
Python
alembic/versions/60c735df8d2f_.py
brouberol/grand-cedre
05f18d1f8b7253ffa7fb5b33b30ceadcc93c4e93
[ "BSD-3-Clause" ]
null
null
null
alembic/versions/60c735df8d2f_.py
brouberol/grand-cedre
05f18d1f8b7253ffa7fb5b33b30ceadcc93c4e93
[ "BSD-3-Clause" ]
22
2019-09-03T20:08:42.000Z
2022-03-11T23:58:02.000Z
alembic/versions/60c735df8d2f_.py
brouberol/grand-cedre
05f18d1f8b7253ffa7fb5b33b30ceadcc93c4e93
[ "BSD-3-Clause" ]
null
null
null
"""empty message Revision ID: 60c735df8d2f Revises: 88bb7e12da60 Create Date: 2019-09-06 08:27:03.082097 """ from alembic import op import sqlalchemy as sa # revision identifiers, used by Alembic. revision = "60c735df8d2f" down_revision = "88bb7e12da60" branch_labels = None depends_on = None def upgrade(): # ### commands auto generated by Alembic - please adjust! ### op.add_column("invoices", sa.Column("payed_at", sa.Date(), nullable=True)) op.add_column("invoices", sa.Column("check_number", sa.String(), nullable=True)) op.add_column( "invoices", sa.Column("wire_transfer_number", sa.String(), nullable=True) ) # ### end Alembic commands ### def downgrade(): # ### commands auto generated by Alembic - please adjust! ### op.drop_column("invoices", "wire_transfer_number") op.drop_column("invoices", "check_number") op.drop_column("invoices", "payed_at") # ### end Alembic commands ###
27.171429
84
0.690852
120
951
5.333333
0.45
0.13125
0.051563
0.089063
0.442188
0.298438
0.259375
0.259375
0
0
0
0.060377
0.164038
951
34
85
27.970588
0.744654
0.3102
0
0
0
0
0.245557
0
0
0
0
0
0
1
0.125
false
0
0.125
0
0.25
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
19d5e29e652c7abc55afdd0fed0c5112571018a1
3,640
py
Python
python/genre_classifier.py
nscharrenberg/Aliran
628de0476b8f8b413a6fdddf5392c590e8b27654
[ "MIT" ]
null
null
null
python/genre_classifier.py
nscharrenberg/Aliran
628de0476b8f8b413a6fdddf5392c590e8b27654
[ "MIT" ]
null
null
null
python/genre_classifier.py
nscharrenberg/Aliran
628de0476b8f8b413a6fdddf5392c590e8b27654
[ "MIT" ]
null
null
null
import scipy.io.wavfile as wav import numpy as np import os import pickle import random import operator from python_speech_features import mfcc dataset = [] training_set = [] test_set = [] # Get the distance between feature vectors def distance(instance1, instance2, k): mm1 = instance1[0] cm1 = instance1[1] mm2 = instance2[0] cm2 = instance2[1] dist = np.trace(np.dot(np.linalg.inv(cm2), cm1)) dist += (np.dot(np.dot((mm2 - mm1).transpose(), np.linalg.inv(cm2)), mm2 - mm1)) dist += np.log(np.linalg.det(cm2)) - np.log(np.linalg.det(cm1)) dist -= k return dist # Find Neighbors def get_neighbors(training_dataset, instance, k): distances = [] for i in range(len(training_dataset)): dist = distance(training_dataset[i], instance, k) + distance(instance, training_dataset[i], k) distances.append((training_dataset[i][2], dist)) distances.sort(key=operator.itemgetter(1)) neighbors = [] for i in range(k): neighbors.append(distances[i][0]) return neighbors # Identify the Nearest Neighbor (Genres) def nearest_genre(neighbors): class_vote = {} for i in range(len(neighbors)): res = neighbors[i] if res in class_vote: class_vote[res] += 1 else: class_vote[res] = 1 sorted_vote = sorted(class_vote.items(), key=operator.itemgetter(1), reverse=True) return sorted_vote[0][0] # Model Evaluation to get the accuracy def get_accuracy(temp_test_set, temp_predictions): correct = 0 for i in range(len(temp_test_set)): if temp_test_set[i][-1] == temp_predictions[i]: correct += 1 return 1.0 * correct / len(temp_test_set) # Extract features from the audio files and store them in a model file def extract_features(filename): directory = "Data/genres_original/" f = open(filename, "wb") it = 0 for tempDir in os.listdir(directory): it += 1 if it == 11: break for file in os.listdir(directory + tempDir): try: print(file) (rate, sig) = wav.read(directory + tempDir + "/" + file) mfcc_feat = mfcc(sig, rate, winlen=0.020, appendEnergy=False) covariance = np.cov(np.matrix.transpose(mfcc_feat)) mean_matrix = mfcc_feat.mean(0) feature = (mean_matrix, covariance, it) pickle.dump(feature, f) except EOFError: f.close() f.close() # Load in the Dataset def load_dataset(filename, split, tr_set, te_set): with open(filename, "rb") as f: while True: try: dataset.append(pickle.load(f)) except EOFError: f.close() break for i in range(len(dataset)): if random.random() < split: tr_set.append(dataset[i]) else: te_set.append(dataset[i]) if __name__ == '__main__': print('Starting....') local_filename = "dataset.aliran" extracting = False if extracting: print('Extracting Features...') print('Building Model...') extract_features(local_filename) print('Loading Dataset...') load_dataset(local_filename, 0.66, training_set, test_set) print('Making a prediction...') print('(This may take a few minutes)') predictions = [] for x in range(len(test_set)): predictions.append(nearest_genre(get_neighbors(training_set, test_set[x], 5))) accuracy = get_accuracy(test_set, predictions) print('Prediction Accuracy is:') print(accuracy)
26.376812
102
0.613462
470
3,640
4.617021
0.308511
0.029032
0.013825
0.025346
0.059908
0
0
0
0
0
0
0.018386
0.267857
3,640
137
103
26.569343
0.795872
0.06044
0
0.113402
0
0
0.055963
0.006153
0
0
0
0
0
1
0.061856
false
0
0.072165
0
0.175258
0.092784
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
19d94ed3daa7c3c452d53a4b890d6a26c3139991
1,653
py
Python
run.py
dkosilov/reconciler_anchor_salesforce
5cf6a8ccaedce84e7dab6c32955c644ede0c6e07
[ "Xnet", "X11" ]
1
2020-09-22T11:49:07.000Z
2020-09-22T11:49:07.000Z
run.py
dkosilov/reconciler_anchor_salesforce
5cf6a8ccaedce84e7dab6c32955c644ede0c6e07
[ "Xnet", "X11" ]
null
null
null
run.py
dkosilov/reconciler_anchor_salesforce
5cf6a8ccaedce84e7dab6c32955c644ede0c6e07
[ "Xnet", "X11" ]
null
null
null
import argparse from libs.data_model import AnchorNorthstarDataframe, SalesForceDataframe, \ AnchorSalesforceAccountsDataframe, AnchorSalesforceContactsDataframe from libs.utils import save_dataframes_to_excel parser = argparse.ArgumentParser(description='Reconcile accounts and contacts between Anchor and Salesforce') parser.add_argument('-a', '--anchor-file', help='Path to Anchor Excel workbook', required=True) parser.add_argument('-n', '--northstar-file', help='Path to Northstar Excel workbook', required=True) parser.add_argument('-s', '--salesforce-file', help='Path to Salesforce Excel workbook', required=True) parser.add_argument('-t', '--account-name-match-ratio-threshold', type=int, help='Account names with specified (or above) similarity ratio will be used for joining Anchor and ' 'Salesforce account data. Number between 0 and 100.', default=75) parser.add_argument('-r', '--result-file', help='Path to result Excel workbook. The file will have 2 spreadsheets for accounts and ' 'contacts reconciliation', required=True) args = parser.parse_args() anchor_ns = AnchorNorthstarDataframe(args.anchor_file, args.northstar_file) salesforce = SalesForceDataframe(args.salesforce_file) anchor_sf_accounts = AnchorSalesforceAccountsDataframe(anchor_ns, salesforce, args.account_name_match_ratio_threshold) anchor_sf_contacts = AnchorSalesforceContactsDataframe(anchor_ns, salesforce) save_dataframes_to_excel(args.result_file, {'Accounts': anchor_sf_accounts.df, 'Contacts': anchor_sf_contacts.df}, wrap_text=False)
57
120
0.754991
193
1,653
6.295337
0.393782
0.037037
0.069959
0.046091
0.153086
0.103704
0.103704
0
0
0
0
0.004979
0.149425
1,653
28
121
59.035714
0.859175
0
0
0
0
0
0.317191
0.021792
0
0
0
0
0
1
0
false
0
0.142857
0
0.142857
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
19db3143b0967735343ec7fb40012d028a989ea5
1,650
py
Python
billrelease.py
arby36/BillAi
e5c10c35279a1669d218439671e03bc17acb7fdc
[ "MIT" ]
null
null
null
billrelease.py
arby36/BillAi
e5c10c35279a1669d218439671e03bc17acb7fdc
[ "MIT" ]
null
null
null
billrelease.py
arby36/BillAi
e5c10c35279a1669d218439671e03bc17acb7fdc
[ "MIT" ]
null
null
null
def bill(): print("I am bill, please input your name") name = str(raw_input()) print("Hi %s" % name) print("Now input a command") a = raw_input("Command line:") a = a.lower() if a == "": print("You inputed nothing") bill() if a == "help": print("The commands in my database are help, hello, do this * math problem, do this division math problem") bill() if a == "hello": print("Hello %s!" % name) bill() if a == "do this * math problem": print("Type no. 1") b = int(raw_input("Please type an integer")) print("Type no. 2") c = int(raw_input("Please type an integer")) print("Computing...") d = b * c print("The answer is %d" % d) bill() if a == "do this division math problem": print("Type no. 1") e = int(raw_input("Please type an integer")) print("Type no. 2") f = int(raw_input("Please type an integer")) print("Computing...") g = e * f print("The answer is %d" % g) bill() if a == "multiply my name": name * 100 bill() if a == "open database": print("Openining database") bill_database() else: print("That command is not in my database") def bill_database(): print("Welcome to the bill Profile database, input your first name (Sorry, this command has been discontinued in the release version.") a = str(raw_input("Enter Here:")) a = a.lower() print("Information for %s" % a) a = a.lower() bill() bill()
27.966102
139
0.527273
222
1,650
3.878378
0.324324
0.065041
0.04878
0.078978
0.361208
0.253194
0.199768
0.199768
0.199768
0.097561
0
0.006381
0.335152
1,650
59
140
27.966102
0.778487
0
0
0.34
0
0.04
0.409697
0
0
0
0
0
0
1
0.04
false
0
0
0
0.04
0.36
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
19e36b29ee592d089dc07f0b81f9a1312e103cce
34,894
py
Python
sw/EdgeBERT/transformers/src/transformers/modeling_highway_albert.py
yihuajack/EdgeBERT
a51ae7557187e3251f4b11bc13ef9cbd336019ff
[ "Apache-2.0" ]
8
2021-11-01T01:38:04.000Z
2022-03-20T16:03:39.000Z
sw/EdgeBERT/transformers/src/transformers/modeling_highway_albert.py
yihuajack/EdgeBERT
a51ae7557187e3251f4b11bc13ef9cbd336019ff
[ "Apache-2.0" ]
1
2021-11-19T08:04:02.000Z
2021-12-19T07:21:48.000Z
sw/EdgeBERT/transformers/src/transformers/modeling_highway_albert.py
yihuajack/EdgeBERT
a51ae7557187e3251f4b11bc13ef9cbd336019ff
[ "Apache-2.0" ]
5
2021-11-19T07:52:44.000Z
2022-02-10T08:23:19.000Z
import torch from torch import nn from torch.nn import CrossEntropyLoss, MSELoss from .modeling_albert import AlbertPreTrainedModel, AlbertLayerNorm, AlbertLayerGroup from .modeling_bert import BertEmbeddings from .modeling_highway_bert import BertPooler import numpy as np def entropy(x): # x: torch.Tensor, logits BEFORE softmax exp_x = torch.exp(x) A = torch.sum(exp_x, dim=1) # sum of exp(x_i) B = torch.sum(x*exp_x, dim=1) # sum of x_i * exp(x_i) return torch.log(A) - B/A class AlbertEmbeddings(BertEmbeddings): """Construct the embeddings from word, position and token_type embeddings. """ def __init__(self, config): #super(AlbertEmbeddings, self).__init__() super().__init__(config) #self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=0) #self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size) #self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size) self.word_embeddings = nn.Embedding(config.vocab_size, config.embedding_size, padding_idx=0) self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.embedding_size) self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.embedding_size) # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load # any TensorFlow checkpoint file #self.LayerNorm = AlbertLayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.LayerNorm = AlbertLayerNorm(config.embedding_size, eps=config.layer_norm_eps) #self.dropout = nn.Dropout(config.hidden_dropout_prob) #def forward(self, input_ids=None, token_type_ids=None, position_ids=None, inputs_embeds=None): # if input_ids is not None: # input_shape = input_ids.size() # else: # input_shape = inputs_embeds.size()[:-1] # # seq_length = input_shape[1] # device = input_ids.device if input_ids is not None else inputs_embeds.device # if position_ids is None: # position_ids = torch.arange(seq_length, dtype=torch.long, device=device) # position_ids = position_ids.unsqueeze(0).expand(input_shape) # if token_type_ids is None: # token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device) # # if inputs_embeds is None: # inputs_embeds = self.word_embeddings(input_ids) # position_embeddings = self.position_embeddings(position_ids) # token_type_embeddings = self.token_type_embeddings(token_type_ids) # # embeddings = inputs_embeds + position_embeddings + token_type_embeddings # embeddings = self.LayerNorm(embeddings) # #embeddings = self.dropout(embeddings) # return embeddings class AlbertTransformer(nn.Module): def __init__(self, config, params): super().__init__() self.config = config self.output_attentions = config.output_attentions self.output_hidden_states = config.output_hidden_states self.embedding_hidden_mapping_in = nn.Linear(config.embedding_size, config.hidden_size) self.albert_layer_groups = nn.ModuleList([AlbertLayerGroup(config, params) for _ in range(config.num_hidden_groups)]) self.entropy_predictor = config.entropy_predictor if config.entropy_predictor: self.lookup_table = np.loadtxt(config.lookup_table_file, delimiter=",") self.predict_layer = config.predict_layer self.predict_average_layers = config.predict_average_layers self.extra_layer=config.extra_layer self.get_predict_acc=config.get_predict_acc self.no_ee_before=config.no_ee_before #self.layer = nn.ModuleList([AlbertLayer(config) for _ in range(config.num_hidden_layers)]) ### try grouping for efficiency if config.one_class: self.highway = nn.ModuleList([AlbertHighway(config) for _ in range(config.num_hidden_groups)]) self.early_exit_entropy = [-1 for _ in range(config.num_hidden_groups)] else: self.highway = nn.ModuleList([AlbertHighway(config) for _ in range(config.num_hidden_layers)]) self.early_exit_entropy = [-1 for _ in range(config.num_hidden_layers)] def set_early_exit_entropy(self, x): print(x) if (type(x) is float) or (type(x) is int): for i in range(len(self.early_exit_entropy)): self.early_exit_entropy[i] = x else: self.early_exit_entropy = x def init_highway_pooler(self, pooler): loaded_model = pooler.state_dict() for highway in self.highway: for name, param in highway.pooler.state_dict().items(): param.copy_(loaded_model[name]) def forward(self, hidden_states, attention_mask=None, head_mask=None): hidden_states = self.embedding_hidden_mapping_in(hidden_states) all_attentions = () all_highway_exits = () #if self.output_hidden_states: # all_hidden_states = (hidden_states,) #for i,layer_module in enumerate(self.albert_layer_groups): #for i, layer_module in enumerate(self.layer): for i in range(self.config.num_hidden_layers): # Number of layers in a hidden group layers_per_group = int(self.config.num_hidden_layers / self.config.num_hidden_groups) # Index of the hidden group group_idx = int(i / (self.config.num_hidden_layers / self.config.num_hidden_groups)) layer_group_output = self.albert_layer_groups[group_idx]( hidden_states, attention_mask, head_mask[group_idx * layers_per_group : (group_idx + 1) * layers_per_group], ) hidden_states = layer_group_output[0] #stopped here if self.output_attentions: all_attentions = all_attentions + layer_group_output[-1] #added this section current_outputs = (hidden_states,) if self.output_hidden_states: current_outputs = current_outputs + (all_hidden_states,) if self.output_attentions: current_outputs = current_outputs + (all_attentions,) if self.config.one_class: highway_exit = self.highway[group_idx](current_outputs) else: highway_exit = self.highway[i](current_outputs) #added this section if not self.training: highway_logits = highway_exit[0] highway_entropy = entropy(highway_logits) highway_exit = highway_exit + (highway_entropy,) # logits, hidden_states(?), entropy all_highway_exits = all_highway_exits + (highway_exit,) if self.config.one_class: ent_ = self.early_exit_entropy[group_idx] else: ent_ = self.early_exit_entropy[i] if not self.entropy_predictor: if highway_entropy < ent_: new_output = (highway_logits,) + current_outputs[1:] + (all_highway_exits,) raise HighwayException(new_output, i+1) elif (self.get_predict_acc): if i==0: count = 0 check_ee = 0 if self.predict_layer-1 == i: if self.predict_average_layers: if i == 0: hw_ent_temp = highway_entropy.cpu().numpy()[0] else: hw_ent_temp = hw_ent_temp + highway_entropy.cpu().numpy()[0] hw_ent = hw_ent_temp / float((i+1)) else: hw_ent = highway_entropy.cpu().numpy()[0] #hash into lookup table w/ highway_entropy idx = (np.abs(self.lookup_table[:,0] - hw_ent)).argmin() entropy_layers = np.transpose(self.lookup_table[idx,1:]) below_thresh = entropy_layers < ent_ k = np.argmax(below_thresh) # k is number of remaining layers if (np.sum(below_thresh) == 0): #never hit threshold k = entropy_layers.shape[0] - 1 k = k + self.predict_layer count = count + 1 #print(idx) #print(self.lookup_table[idx,:]) #print(k) if ((highway_entropy < ent_) or (i == self.config.num_hidden_layers-1)) and not check_ee: j = i # j is hw exit layer count = count + 1 check_ee = 1 if count == 2: new_output = (highway_logits,) + current_outputs[1:] + (all_highway_exits,) #return abs value of diff between j and k if j>k: raise HighwayException(new_output, (j-k) + 1) else: raise HighwayException(new_output, (k-j) + 1) else: if (i < self.predict_layer - 1): # before predict layer #exit here???? if highway_entropy < ent_: new_output = (highway_logits,) + current_outputs[1:] + (all_highway_exits,) raise HighwayException(new_output, i+1) if self.predict_average_layers: # predict layer if i == 0: hw_ent_temp = highway_entropy.cpu().numpy()[0] else: hw_ent_temp = hw_ent_temp + highway_entropy.cpu().numpy()[0] if (i == self.predict_layer - 1): # predict layer if highway_entropy < ent_: new_output = (highway_logits,) + current_outputs[1:] + (all_highway_exits,) raise HighwayException(new_output, i+1) if self.predict_average_layers: if i == 0: hw_ent_temp = highway_entropy.cpu().numpy()[0] else: hw_ent_temp = hw_ent_temp + highway_entropy.cpu().numpy()[0] hw_ent = hw_ent_temp / float((i+1)) else: hw_ent = highway_entropy.cpu().numpy()[0] #hash into lookup table w/ highway_entropy idx = (np.abs(self.lookup_table[:,0] - hw_ent)).argmin() entropy_layers = np.transpose(self.lookup_table[idx,1:]) below_thresh = entropy_layers < ent_ k = np.argmax(below_thresh) # k is number of remaining layers if (np.sum(below_thresh) == 0): #never hit threshold k = entropy_layers.shape[0] - 1 # other layers (count down and then trigger highway exit if layer < self.num_hidden_layers) elif ((i >= self.predict_layer) and (i < self.config.num_hidden_layers - 2)): if (self.extra_layer): if k == 0: if highway_entropy < ent_: new_output = (highway_logits,) + current_outputs[1:] + (all_highway_exits,) raise HighwayException(new_output, i+1) elif k==-1: new_output = (highway_logits,) + current_outputs[1:] + (all_highway_exits,) raise HighwayException(new_output, i+1) else: if (not self.no_ee_before): if highway_entropy < ent_: new_output = (highway_logits,) + current_outputs[1:] + (all_highway_exits,) raise HighwayException(new_output, i+1) if k == 0: #exit after counting down layers (CHECK CORRECT # OF LAYERS) new_output = (highway_logits,) + current_outputs[1:] + (all_highway_exits,) raise HighwayException(new_output, i+1) k = k - 1 else: all_highway_exits = all_highway_exits + (highway_exit,) #use this???? # Add last layer if self.output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) outputs = (hidden_states,) if self.output_hidden_states: outputs = outputs + (all_hidden_states,) if self.output_attentions: outputs = outputs + (all_attentions,) outputs = outputs + (all_highway_exits,) return outputs # last-layer hidden state, (all hidden states), (all attentions) class AlbertModel(AlbertPreTrainedModel): def __init__(self, config, params): super().__init__(config, params) self.config = config self.embeddings = AlbertEmbeddings(config) self.embeddings.requires_grad_(requires_grad=False) self.encoder = AlbertTransformer(config, params) self.pooler = nn.Linear(config.hidden_size, config.hidden_size) self.pooler_activation = nn.Tanh() self.init_weights() def init_highway_pooler(self): self.encoder.init_highway_pooler(self.pooler) def get_input_embeddings(self): return self.embeddings.word_embeddings def set_input_embeddings(self, value): self.embeddings.word_embeddings = value def _prune_heads(self, heads_to_prune): """ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} ALBERT has a different architecture in that its layers are shared across groups, which then has inner groups. If an ALBERT model has 12 hidden layers and 2 hidden groups, with two inner groups, there is a total of 4 different layers. These layers are flattened: the indices [0,1] correspond to the two inner groups of the first hidden layer, while [2,3] correspond to the two inner groups of the second hidden layer. Any layer with in index other than [0,1,2,3] will result in an error. See base class PreTrainedModel for more information about head pruning """ for layer, heads in heads_to_prune.items(): group_idx = int(layer / self.config.inner_group_num) inner_group_idx = int(layer - group_idx * self.config.inner_group_num) self.encoder.albert_layer_groups[group_idx].albert_layers[inner_group_idx].attention.prune_heads(heads) #@add_start_docstrings_to_callable(ALBERT_INPUTS_DOCSTRING) def forward( self, input_ids=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, inputs_embeds=None, ): r""" Return: :obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.AlbertConfig`) and inputs: last_hidden_state (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`): Sequence of hidden-states at the output of the last layer of the model. pooler_output (:obj:`torch.FloatTensor`: of shape :obj:`(batch_size, hidden_size)`): Last layer hidden-state of the first token of the sequence (classification token) further processed by a Linear layer and a Tanh activation function. The Linear layer weights are trained from the next sentence prediction (classification) objective during pre-training. This output is usually *not* a good summary of the semantic content of the input, you're often better with averaging or pooling the sequence of hidden-states for the whole input sequence. hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_hidden_states=True``): Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape :obj:`(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_attentions=True``): Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. Example:: from transformers import AlbertModel, AlbertTokenizer import torch tokenizer = AlbertTokenizer.from_pretrained('albert-base-v2') model = AlbertModel.from_pretrained('albert-base-v2') input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute", add_special_tokens=True)).unsqueeze(0) # Batch size 1 outputs = model(input_ids) last_hidden_states = outputs[0] # The last hidden-state is the first element of the output tuple """ if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") elif input_ids is not None: input_shape = input_ids.size() elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] else: raise ValueError("You have to specify either input_ids or inputs_embeds") device = input_ids.device if input_ids is not None else inputs_embeds.device if attention_mask is None: attention_mask = torch.ones(input_shape, device=device) if token_type_ids is None: token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device) extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2) extended_attention_mask = extended_attention_mask.to(dtype=next(self.parameters()).dtype) # fp16 compatibility extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0 if head_mask is not None: if head_mask.dim() == 1: head_mask = head_mask.unsqueeze(0).unsqueeze(0).unsqueeze(-1).unsqueeze(-1) head_mask = head_mask.expand(self.config.num_hidden_layers, -1, -1, -1, -1) elif head_mask.dim() == 2: head_mask = ( head_mask.unsqueeze(1).unsqueeze(-1).unsqueeze(-1) ) # We can specify head_mask for each layer head_mask = head_mask.to( dtype=next(self.parameters()).dtype ) # switch to fload if need + fp16 compatibility else: head_mask = [None] * self.config.num_hidden_layers embedding_output = self.embeddings( input_ids, position_ids=position_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds ) #CHECK THIS encoder_outputs = self.encoder(embedding_output, extended_attention_mask, head_mask=head_mask) sequence_output = encoder_outputs[0] pooled_output = self.pooler_activation(self.pooler(sequence_output[:, 0])) outputs = (sequence_output, pooled_output) + encoder_outputs[1:] # add hidden_states and attentions if they are here return outputs class HighwayException(Exception): def __init__(self, message, exit_layer): self.message = message self.exit_layer = exit_layer # start from 1! class AlbertHighway(nn.Module): r"""A module to provide a shortcut from the output of one non-final BertLayer in BertEncoder to cross-entropy computation in BertForSequenceClassification """ def __init__(self, config): #super().__init__(config) ### super(AlbertHighway, self).__init__() self.pooler = nn.Linear(config.hidden_size, config.hidden_size) self.pooler_activation = nn.Tanh() ## # self.pooler = BertPooler(config) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.classifier = nn.Linear(config.hidden_size, config.num_labels) def forward(self, encoder_outputs): # Pooler pooler_input = encoder_outputs[0] # pooler_output = self.pooler(pooler_input) # "return" pooler_output #adding here: pooler_input = self.pooler(pooler_input[:,0]) pooler_output = self.pooler_activation(pooler_input) # BertModel bmodel_output = (pooler_input, pooler_output) + encoder_outputs[1:] # "return" bodel_output # Dropout and classification pooled_output = bmodel_output[1] pooled_output = self.dropout(pooled_output) logits = self.classifier(pooled_output) return logits, pooled_output class AlbertForSequenceClassification(AlbertPreTrainedModel): def __init__(self, config, params): super().__init__(config, params) self.num_labels = config.num_labels self.num_layers = config.num_hidden_layers self.albert = AlbertModel(config, params) self.dropout = nn.Dropout(config.classifier_dropout_prob) self.classifier = nn.Linear(config.hidden_size, self.config.num_labels) self.init_weights() #@add_start_docstrings_to_callable(ALBERT_INPUTS_DOCSTRING) def forward( self, input_ids=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, inputs_embeds=None, labels=None, output_layer=-1, train_highway=False ): r""" labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`, defaults to :obj:`None`): Labels for computing the sequence classification/regression loss. Indices should be in ``[0, ..., config.num_labels - 1]``. If ``config.num_labels == 1`` a regression loss is computed (Mean-Square loss), If ``config.num_labels > 1`` a classification loss is computed (Cross-Entropy). Returns: :obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.AlbertConfig`) and inputs: loss: (`optional`, returned when ``labels`` is provided) ``torch.FloatTensor`` of shape ``(1,)``: Classification (or regression if config.num_labels==1) loss. logits ``torch.FloatTensor`` of shape ``(batch_size, config.num_labels)`` Classification (or regression if config.num_labels==1) scores (before SoftMax). hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_hidden_states=True``): Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape :obj:`(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_attentions=True``): Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. Examples:: from transformers import AlbertTokenizer, AlbertForSequenceClassification import torch tokenizer = AlbertTokenizer.from_pretrained('albert-base-v2') model = AlbertForSequenceClassification.from_pretrained('albert-base-v2') input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute")).unsqueeze(0) # Batch size 1 labels = torch.tensor([1]).unsqueeze(0) # Batch size 1 outputs = model(input_ids, labels=labels) loss, logits = outputs[:2] """ exit_layer = self.num_layers try: outputs = self.albert( input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, ) pooled_output = outputs[1] pooled_output = self.dropout(pooled_output) logits = self.classifier(pooled_output) outputs = (logits,) + outputs[2:] # add hidden states and attention if they are here except HighwayException as e: outputs = e.message exit_layer = e.exit_layer logits = outputs[0] if not self.training: original_entropy = entropy(logits) highway_entropy = [] highway_logits_all = [] if labels is not None: if self.num_labels == 1: # We are doing regression loss_fct = MSELoss() loss = loss_fct(logits.view(-1), labels.view(-1)) else: loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) # work with highway exits highway_losses = [] for highway_exit in outputs[-1]: highway_logits = highway_exit[0] if not self.training: highway_logits_all.append(highway_logits) highway_entropy.append(highway_exit[2]) if self.num_labels == 1: # We are doing regression loss_fct = MSELoss() highway_loss = loss_fct(highway_logits.view(-1), labels.view(-1)) else: loss_fct = CrossEntropyLoss() highway_loss = loss_fct(highway_logits.view(-1, self.num_labels), labels.view(-1)) highway_losses.append(highway_loss) if train_highway: outputs = (sum(highway_losses[:-1]),) + outputs # exclude the final highway, of course else: outputs = (loss,) + outputs if not self.training: outputs = outputs + ((original_entropy, highway_entropy), exit_layer) if output_layer >= 0: outputs = (outputs[0],) +\ (highway_logits_all[output_layer],) +\ outputs[2:] ## use the highway of the last layer return outputs # (loss), logits, (hidden_states), (attentions) class AlbertForQuestionAnswering(AlbertPreTrainedModel): def __init__(self, config, params): super().__init__(config) self.num_labels = config.num_labels self.num_layers = config.num_hidden_layers #self.albert = AlbertModel(config) self.albert = AlbertModel(config, params) self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels) self.init_weights() # @add_start_docstrings_to_callable(ALBERT_INPUTS_DOCSTRING) def forward( self, input_ids=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, inputs_embeds=None, start_positions=None, end_positions=None, output_layer=-1, train_highway=False ): r""" start_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`, defaults to :obj:`None`): Labels for position (index) of the start of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence are not taken into account for computing the loss. end_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`, defaults to :obj:`None`): Labels for position (index) of the end of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence are not taken into account for computing the loss. Returns: :obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.AlbertConfig`) and inputs: loss: (`optional`, returned when ``labels`` is provided) ``torch.FloatTensor`` of shape ``(1,)``: Total span extraction loss is the sum of a Cross-Entropy for the start and end positions. start_scores ``torch.FloatTensor`` of shape ``(batch_size, sequence_length,)`` Span-start scores (before SoftMax). end_scores: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length,)`` Span-end scores (before SoftMax). hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_hidden_states=True``): Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape :obj:`(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_attentions=True``): Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. Examples:: # The checkpoint albert-base-v2 is not fine-tuned for question answering. Please see the # examples/run_squad.py example to see how to fine-tune a model to a question answering task. from transformers import AlbertTokenizer, AlbertForQuestionAnswering import torch tokenizer = AlbertTokenizer.from_pretrained('albert-base-v2') model = AlbertForQuestionAnswering.from_pretrained('albert-base-v2') question, text = "Who was Jim Henson?", "Jim Henson was a nice puppet" input_dict = tokenizer.encode_plus(question, text, return_tensors='pt') start_scores, end_scores = model(**input_dict) """ exit_layer = self.num_layers try: outputs = self.albert( input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds ) sequence_output = outputs[0] logits = self.qa_outputs(sequence_output) start_logits, end_logits = logits.split(1, dim=-1) start_logits = start_logits.squeeze(-1) end_logits = end_logits.squeeze(-1) outputs = (start_logits, end_logits,) + outputs[2:] except HighwayException as e: outputs = e.message exit_layer = e.exit_layer start_logits = outputs[0] end_logits = outputs[1] if not self.training: # original_start_entropy = entropy(start_logits) # original_end_entropy = entropy(end_logits) original_entropy = entropy(logits) highway_entropy = [] # highway_start_logits_all = [] # highway_end_logits_all = [] highway_logits_all = [] if start_positions is not None and end_positions is not None: # If we are on multi-GPU, split add a dimension if len(start_positions.size()) > 1: start_positions = start_positions.squeeze(-1) if len(end_positions.size()) > 1: end_positions = end_positions.squeeze(-1) # sometimes the start/end positions are outside our model inputs, we ignore these terms ignored_index = start_logits.size(1) start_positions.clamp_(0, ignored_index) end_positions.clamp_(0, ignored_index) loss_fct = CrossEntropyLoss(ignore_index=ignored_index) start_loss = loss_fct(start_logits, start_positions) end_loss = loss_fct(end_logits, end_positions) total_loss = (start_loss + end_loss) / 2 # outputs = (total_loss,) + outputs # work with highway exits highway_losses = [] for highway_exit in outputs[-1]: highway_logits = highway_exit[0] highway_start_logits, highway_end_logits = highway_logits.split(1, dim=-1) highway_start_logits = highway_start_logits.squeeze(-1) highway_end_logits = highway_end_logits.squeeze(-1) if not self.training: highway_logits_all.append(highway_logits) highway_entropy.append(highway_exit[1]) loss_fct = CrossEntropyLoss(ignore_index=ignored_index) start_loss = loss_fct(highway_start_logits, start_positions) end_loss = loss_fct(highway_end_logits, end_positions) highway_loss = (start_loss + end_loss) / 2 highway_losses.append(highway_loss) if train_highway: outputs = (sum(highway_losses[:-1]),) + outputs # exclude the final highway, of course else: outputs = (total_loss,) + outputs if not self.training: outputs = outputs + ((original_entropy, highway_entropy), exit_layer) if output_layer >= 0: outputs = (outputs[0],) +\ (highway_logits_all[output_layer],) +\ outputs[2:] ## use the highway of the last layer return outputs # (loss), start_logits, end_logits, (hidden_states), (attentions)
46.963661
148
0.611366
4,064
34,894
5.010581
0.103839
0.022394
0.012523
0.012375
0.590876
0.546727
0.521976
0.500221
0.452291
0.430192
0
0.007855
0.303175
34,894
742
149
47.026954
0.829611
0.348943
0
0.502347
0
0
0.00553
0
0
0
0
0
0
1
0.044601
false
0
0.016432
0.002347
0.093897
0.002347
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
19e3c7e8cb0d8e13048dc4a21c8f8d2b1867724a
1,809
py
Python
tests/test_sar.py
chris-angeli-rft/cloud-custodian
5ff331b114a591dbaf6d672e30ceefb7ae64a5dd
[ "Apache-2.0" ]
8
2021-05-18T02:22:03.000Z
2021-09-11T02:49:04.000Z
tests/test_sar.py
chris-angeli-rft/cloud-custodian
5ff331b114a591dbaf6d672e30ceefb7ae64a5dd
[ "Apache-2.0" ]
1
2021-04-26T04:38:35.000Z
2021-04-26T04:38:35.000Z
tests/test_sar.py
chris-angeli-rft/cloud-custodian
5ff331b114a591dbaf6d672e30ceefb7ae64a5dd
[ "Apache-2.0" ]
1
2021-11-10T02:28:47.000Z
2021-11-10T02:28:47.000Z
# Copyright 2020 Kapil Thangavelu # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from .common import BaseTest class SARTest(BaseTest): def test_query(self): factory = self.replay_flight_data('test_sar_query_app') p = self.load_policy({ 'name': 'test-sar', 'resource': 'aws.serverless-app'}, session_factory=factory) resources = p.run() self.assertEqual(len(resources), 1) self.assertEqual(resources[0]['Name'], 'GitterArchive') def test_cross_account(self): factory = self.replay_flight_data('test_sar_cross_account') p = self.load_policy({ 'name': 'test-sar', 'resource': 'aws.serverless-app', 'filters': [{ 'type': 'cross-account', 'whitelist_orgids': ['o-4adkskbcff'] }]}, session_factory=factory) resources = p.run() self.assertEqual(len(resources), 1) self.maxDiff = None self.assertEqual( resources[0]['CrossAccountViolations'], [ {'Actions': ['serverlessrepo:Deploy'], 'Effect': 'Allow', 'Principal': {'AWS': ['112233445566']}, 'StatementId': 'b364d84f-62d2-411c-9787-3636b2b1975c'} ])
35.470588
74
0.616363
204
1,809
5.377451
0.573529
0.054695
0.023701
0.02917
0.280766
0.280766
0.280766
0.280766
0.211486
0.211486
0
0.036953
0.266998
1,809
50
75
36.18
0.790347
0.303483
0
0.322581
0
0
0.254414
0.081059
0
0
0
0
0.129032
1
0.064516
false
0
0.032258
0
0.129032
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0