hexsha
stringlengths
40
40
size
int64
2
1.02M
ext
stringclasses
10 values
lang
stringclasses
1 value
max_stars_repo_path
stringlengths
4
245
max_stars_repo_name
stringlengths
6
130
max_stars_repo_head_hexsha
stringlengths
40
40
max_stars_repo_licenses
listlengths
1
10
max_stars_count
int64
1
191k
max_stars_repo_stars_event_min_datetime
stringlengths
24
24
max_stars_repo_stars_event_max_datetime
stringlengths
24
24
max_issues_repo_path
stringlengths
4
245
max_issues_repo_name
stringlengths
6
130
max_issues_repo_head_hexsha
stringlengths
40
40
max_issues_repo_licenses
listlengths
1
10
max_issues_count
int64
1
67k
max_issues_repo_issues_event_min_datetime
stringlengths
24
24
max_issues_repo_issues_event_max_datetime
stringlengths
24
24
max_forks_repo_path
stringlengths
4
245
max_forks_repo_name
stringlengths
6
130
max_forks_repo_head_hexsha
stringlengths
40
40
max_forks_repo_licenses
listlengths
1
10
max_forks_count
int64
1
105k
max_forks_repo_forks_event_min_datetime
stringlengths
24
24
max_forks_repo_forks_event_max_datetime
stringlengths
24
24
content
stringlengths
2
1.02M
avg_line_length
float64
1
417k
max_line_length
int64
1
987k
alphanum_fraction
float64
0
1
content_no_comment
stringlengths
0
1.01M
is_comment_constant_removed
bool
1 class
is_sharp_comment_removed
bool
1 class
f716ae18f7020711ec64646291e557f4c140c538
51
py
Python
temp.py
Ziggareto/hackerrank_python
02875e9677d74f397d75577b8f3b38b584f31506
[ "MIT" ]
null
null
null
temp.py
Ziggareto/hackerrank_python
02875e9677d74f397d75577b8f3b38b584f31506
[ "MIT" ]
null
null
null
temp.py
Ziggareto/hackerrank_python
02875e9677d74f397d75577b8f3b38b584f31506
[ "MIT" ]
null
null
null
if __name__ == '__main__': print('hi there!!')
25.5
27
0.588235
if __name__ == '__main__': print('hi there!!')
true
true
f716ae2b03112cbb59f032b586e1bf0f10e6bd85
11,973
py
Python
cbt/views.py
belloshehu/multiple-choice-questions
abfb7ac8cc24bc3f9ee34e9505bc6c6944786ac0
[ "MIT" ]
null
null
null
cbt/views.py
belloshehu/multiple-choice-questions
abfb7ac8cc24bc3f9ee34e9505bc6c6944786ac0
[ "MIT" ]
2
2020-09-03T21:48:33.000Z
2020-09-22T08:51:14.000Z
cbt/views.py
belloshehu/multiple-choice-questions
abfb7ac8cc24bc3f9ee34e9505bc6c6944786ac0
[ "MIT" ]
null
null
null
from django.shortcuts import ( render, redirect, reverse, get_object_or_404, get_list_or_404, ) from django.db.models import Q from django.urls import reverse_lazy from django.contrib.auth import authenticate, login, logout from django.contrib import messages from cbt.forms import ( IndividualAssessmentForm, InstitutionForm, InstitutionAssessmentForm ) from multiple_choices.forms import UserRegistration, UserLogin from .models import ( InstitutionAssessment, IndividualAssessment, Institution ) from account.forms import UserLoginForm, UserCreationForm from choice.models import IndividualChoice, InstitutionChoice from question.models import IndividualQuestion, InstitutionQuestion from django.core.mail import send_mail from django.conf import settings from django.contrib.auth.models import User from django.contrib.auth.forms import PasswordResetForm from django.http import HttpResponse from django.template.loader import render_to_string from django.utils.http import urlsafe_base64_encode from django.contrib.auth.tokens import default_token_generator from django.utils.encoding import force_bytes from django.views.generic import( CreateView, DeleteView, DetailView, ListView, UpdateView, TemplateView, View ) from django.contrib.auth.mixins import LoginRequiredMixin # Create your views here. def home(request): return render (request, 'cbt/home.html', {'form':UserLoginForm}) def cbt_type(request): '''Renders Assessment types template''' return render (request, 'cbt/assessment_types.html') class AssessmentHelpView(TemplateView): template_name = 'cbt/partials/assessment_help.html' ############### # Institution Assessment type CRUD, list and details views: ############## class InstitutionAssessmentCreateView(LoginRequiredMixin ,CreateView): ''' View to create Assessment by organisation. ''' model = InstitutionAssessment form_class = InstitutionAssessmentForm template_name = 'cbt/institution/assessment_form.html' success_url = reverse_lazy('cbt:institution-assessment-list') def form_valid(self, form): form.instance.user = self.request.user return super().form_valid(form) class InstitutionAssessmentListView(LoginRequiredMixin, ListView): model = InstitutionAssessment template_name = 'cbt/institution/assessment_list.html' context_object_name = 'assessments' class InstitutionAssessmentDetailView(LoginRequiredMixin, DetailView): model = InstitutionAssessment template_name = 'cbt/institution/assessment_detail.html' context_object_name = 'assessment' def get_context_data(self, *args, **kwargs): context = super().get_context_data(*args, **kwargs) try: context['choices'] = InstitutionChoice.objects.all() context['questions'] = InstitutionQuestion.objects.filter( assessment_id=self.kwargs.get('pk') ) except InstitutionChoice.DoesNotExist: context['choices'] = None except InstitutionQuestion.DoesNotExist: context['questions'] = None return context class InstitutionAssessmentUpdateView(LoginRequiredMixin, UpdateView): model = InstitutionAssessment form_class = InstitutionAssessmentForm template_name = 'cbt/institution/assessment_update_form.html' success_url = reverse_lazy('cbt:institution-assessment-list') context_object_name = 'assessment' class InstitutionAssessmentDeleteView(LoginRequiredMixin, DeleteView): model = InstitutionAssessment form_class = InstitutionAssessmentForm template_name = 'cbt/institution/assessment_delete_confirm.html' success_url = reverse_lazy('cbt:institution-assessment-list') context_object_name = 'assessment' # #################### # Individual assessment CRUD, details and list views: ###################### class IndividualAssessmentCreateView(LoginRequiredMixin ,CreateView): ''' View to create Assessment by individuals. ''' model = IndividualAssessment form_class = IndividualAssessmentForm template_name = 'cbt/individual/individual_assessment.html' success_url = reverse_lazy('cbt:individual-assessment-list') def form_valid(self, form): form.instance.user = self.request.user return super().form_valid(form) class IndividualAssessmentListView(LoginRequiredMixin, ListView): model = IndividualAssessment template_name = 'cbt/individual/individual_assessment_list.html' context_object_name = 'assessments' def get_queryset(self): try: queryset = IndividualAssessment.objects.filter( user=self.request.user ) except IndividualAssessment.DoesNotExist: pass return queryset class IndividualAssessmentDetailView(LoginRequiredMixin, DetailView): model = IndividualAssessment template_name = 'cbt/individual/individual_assessment_detail.html' context_object_name = 'assessment' def get_context_data(self, *args, **kwargs): context = super().get_context_data(*args, **kwargs) try: context['choices'] = IndividualChoice.objects.filter( question__assessment__user=self.request.user ) context['questions'] = IndividualQuestion.objects.filter( assessment_id=self.kwargs.get('pk'), assessment__user=self.request.user ) except IndividualChoice.DoesNotExist: context['choices'] = None except IndividualQuestion.DoesNotExist: context['questions'] = None return context def get_question_with_passage(self): Q1 = Q(passage__title=None) Q2 = Q(passage__body=None) Q3 = Q(passage__no_of_questions=None) question_with_passage = None try: question_with_passage = IndividualQuestion.objects.filter( Q1&Q2&Q3 ) except IndividualQuestion.DoesNotExist: pass return question_with_passage class IndividualAssessmentDeleteView(LoginRequiredMixin, DeleteView): model = IndividualAssessment form_class = IndividualAssessmentForm template_name = 'cbt/individual/assessment_confirm_delete.html' success_url = reverse_lazy('cbt:individual-assessment-list') context_object_name = 'assessment' class IndividualAssessmentUpdateView(LoginRequiredMixin, UpdateView): model = IndividualAssessment form_class = IndividualAssessmentForm template_name = 'cbt/individual/assessment_update_form.html' success_url = reverse_lazy('cbt:individual-assessment-list') context_object_name = 'assessment' #================================ # Sample Assessments #=============================== class SampleAssessmentListView(LoginRequiredMixin, ListView): model = IndividualAssessment template_name = 'cbt/sample/sample_list.html' context_object_name = 'assessments' def get_queryset(self): try: queryset = IndividualAssessment.objects.filter( user__is_superuser=True, is_sample=True ) except IndividualAssessment.DoesNotExist: pass return queryset #========================= # Institution CRUD, list and details views #========================== class InstitutionListView(LoginRequiredMixin, ListView): model = Institution template_name = 'cbt/institution_list.html' context_object_name = 'institutions' def get_queryset(self): try: queryset = Institution.objects.filter( user=self.request.user ) except Institution.DoesNotExist: pass return queryset class InstitutionDetailView(LoginRequiredMixin, DetailView): pass class InstitutionDeleteView(LoginRequiredMixin, DeleteView): model = Institution template_name = 'cbt/institution_confirm_delete.html' success_url = reverse_lazy('cbt:institution-list') class InstitutionUpdateView(LoginRequiredMixin, UpdateView): model = Institution form_class = InstitutionForm template_name = 'cbt/institution_update_form.html' success_url = reverse_lazy('cbt:institution-list') class InstitutionCreateView(LoginRequiredMixin, CreateView): ''' View to create instance of Institution.''' model = Institution form_class = InstitutionForm template_name = 'cbt/institution_form.html' success_url = reverse_lazy('cbt:institution-list') def form_valid(self, form): form.instance.user = self.request.user return super().form_valid(form) def user_login(request): form = UserLogin() if request.method == 'POST': user = authenticate(username=request.POST['username'], password=request.POST['password']) if user: login(request, user) return redirect('cbt:cbt_list') messages.error(request, 'Login credentials error!') return redirect(reverse('cbt:login')) return render(request, 'cbt/login.html', {'form':form} ) def user_signup(request): form = UserRegistration() if request.method == 'POST': form = UserRegistration(request.POST) if form.is_valid: if User.objects.filter(email=request.POST['email']).exists(): messages.error(request, 'Username is already taken') return render(request, 'cbt/signup.html', {'form':form}) form.save() user_detail = request.POST email_subject = 'Welcome to CBTMaker' message = f'''Hi {user_detail.get('username')}, \n Thank you for registering with CBTMaker. \n\n Enjoy CBTMaker. \n\n CBTMaker team.''' email_sender = settings.EMAIL_HOST_USER recipient_list = [user_detail.get('email')] send_mail(email_subject, message, email_sender, recipient_list) print('Email sent') return redirect('cbt:home') else: return redirect(reverse('cbt:signup')) return render(request, 'cbt/signup.html', {'form':form}) def user_logout(request): logout(request) return redirect('cbt:home') def password_reset(request): ''' View for resetting user's password. ''' if request.method == 'POST': password_reset_form = PasswordResetForm(request.POST) if password_reset_form.is_valid: email = request.POST['email'] associated_users = User.objects.filter(email=email) if associated_users.exists(): for user in associated_users: subject = "Password Reset Requested" email_template_name = "cbt/password/password_email.txt" c = { "email":user.email, 'domain':'127.0.0.1:8000', 'site_name': 'Website', "uid": urlsafe_base64_encode(force_bytes(user.pk)), "user": user, 'token': default_token_generator.make_token(user), 'protocol': 'http', } email = render_to_string(email_template_name, c) try: send_mail(subject, email, 'admin@example.com' , [user.email], fail_silently=False) except BadHeaderError: return HttpResponse('Invalid header found.') return redirect('password_reset_done') password_reset_form = PasswordResetForm() context = {'password_reset_form':password_reset_form} return render(request, 'cbt/password/password_reset.html', context)
35.633929
106
0.664662
from django.shortcuts import ( render, redirect, reverse, get_object_or_404, get_list_or_404, ) from django.db.models import Q from django.urls import reverse_lazy from django.contrib.auth import authenticate, login, logout from django.contrib import messages from cbt.forms import ( IndividualAssessmentForm, InstitutionForm, InstitutionAssessmentForm ) from multiple_choices.forms import UserRegistration, UserLogin from .models import ( InstitutionAssessment, IndividualAssessment, Institution ) from account.forms import UserLoginForm, UserCreationForm from choice.models import IndividualChoice, InstitutionChoice from question.models import IndividualQuestion, InstitutionQuestion from django.core.mail import send_mail from django.conf import settings from django.contrib.auth.models import User from django.contrib.auth.forms import PasswordResetForm from django.http import HttpResponse from django.template.loader import render_to_string from django.utils.http import urlsafe_base64_encode from django.contrib.auth.tokens import default_token_generator from django.utils.encoding import force_bytes from django.views.generic import( CreateView, DeleteView, DetailView, ListView, UpdateView, TemplateView, View ) from django.contrib.auth.mixins import LoginRequiredMixin def home(request): return render (request, 'cbt/home.html', {'form':UserLoginForm}) def cbt_type(request): return render (request, 'cbt/assessment_types.html') class AssessmentHelpView(TemplateView): template_name = 'cbt/partials/assessment_help.html' ment_form.html' success_url = reverse_lazy('cbt:institution-assessment-list') def form_valid(self, form): form.instance.user = self.request.user return super().form_valid(form) class InstitutionAssessmentListView(LoginRequiredMixin, ListView): model = InstitutionAssessment template_name = 'cbt/institution/assessment_list.html' context_object_name = 'assessments' class InstitutionAssessmentDetailView(LoginRequiredMixin, DetailView): model = InstitutionAssessment template_name = 'cbt/institution/assessment_detail.html' context_object_name = 'assessment' def get_context_data(self, *args, **kwargs): context = super().get_context_data(*args, **kwargs) try: context['choices'] = InstitutionChoice.objects.all() context['questions'] = InstitutionQuestion.objects.filter( assessment_id=self.kwargs.get('pk') ) except InstitutionChoice.DoesNotExist: context['choices'] = None except InstitutionQuestion.DoesNotExist: context['questions'] = None return context class InstitutionAssessmentUpdateView(LoginRequiredMixin, UpdateView): model = InstitutionAssessment form_class = InstitutionAssessmentForm template_name = 'cbt/institution/assessment_update_form.html' success_url = reverse_lazy('cbt:institution-assessment-list') context_object_name = 'assessment' class InstitutionAssessmentDeleteView(LoginRequiredMixin, DeleteView): model = InstitutionAssessment form_class = InstitutionAssessmentForm template_name = 'cbt/institution/assessment_delete_confirm.html' success_url = reverse_lazy('cbt:institution-assessment-list') context_object_name = 'assessment' equiredMixin, ListView): model = IndividualAssessment template_name = 'cbt/individual/individual_assessment_list.html' context_object_name = 'assessments' def get_queryset(self): try: queryset = IndividualAssessment.objects.filter( user=self.request.user ) except IndividualAssessment.DoesNotExist: pass return queryset class IndividualAssessmentDetailView(LoginRequiredMixin, DetailView): model = IndividualAssessment template_name = 'cbt/individual/individual_assessment_detail.html' context_object_name = 'assessment' def get_context_data(self, *args, **kwargs): context = super().get_context_data(*args, **kwargs) try: context['choices'] = IndividualChoice.objects.filter( question__assessment__user=self.request.user ) context['questions'] = IndividualQuestion.objects.filter( assessment_id=self.kwargs.get('pk'), assessment__user=self.request.user ) except IndividualChoice.DoesNotExist: context['choices'] = None except IndividualQuestion.DoesNotExist: context['questions'] = None return context def get_question_with_passage(self): Q1 = Q(passage__title=None) Q2 = Q(passage__body=None) Q3 = Q(passage__no_of_questions=None) question_with_passage = None try: question_with_passage = IndividualQuestion.objects.filter( Q1&Q2&Q3 ) except IndividualQuestion.DoesNotExist: pass return question_with_passage class IndividualAssessmentDeleteView(LoginRequiredMixin, DeleteView): model = IndividualAssessment form_class = IndividualAssessmentForm template_name = 'cbt/individual/assessment_confirm_delete.html' success_url = reverse_lazy('cbt:individual-assessment-list') context_object_name = 'assessment' class IndividualAssessmentUpdateView(LoginRequiredMixin, UpdateView): model = IndividualAssessment form_class = IndividualAssessmentForm template_name = 'cbt/individual/assessment_update_form.html' success_url = reverse_lazy('cbt:individual-assessment-list') context_object_name = 'assessment' class SampleAssessmentListView(LoginRequiredMixin, ListView): model = IndividualAssessment template_name = 'cbt/sample/sample_list.html' context_object_name = 'assessments' def get_queryset(self): try: queryset = IndividualAssessment.objects.filter( user__is_superuser=True, is_sample=True ) except IndividualAssessment.DoesNotExist: pass return queryset class InstitutionListView(LoginRequiredMixin, ListView): model = Institution template_name = 'cbt/institution_list.html' context_object_name = 'institutions' def get_queryset(self): try: queryset = Institution.objects.filter( user=self.request.user ) except Institution.DoesNotExist: pass return queryset class InstitutionDetailView(LoginRequiredMixin, DetailView): pass class InstitutionDeleteView(LoginRequiredMixin, DeleteView): model = Institution template_name = 'cbt/institution_confirm_delete.html' success_url = reverse_lazy('cbt:institution-list') class InstitutionUpdateView(LoginRequiredMixin, UpdateView): model = Institution form_class = InstitutionForm template_name = 'cbt/institution_update_form.html' success_url = reverse_lazy('cbt:institution-list') class InstitutionCreateView(LoginRequiredMixin, CreateView): model = Institution form_class = InstitutionForm template_name = 'cbt/institution_form.html' success_url = reverse_lazy('cbt:institution-list') def form_valid(self, form): form.instance.user = self.request.user return super().form_valid(form) def user_login(request): form = UserLogin() if request.method == 'POST': user = authenticate(username=request.POST['username'], password=request.POST['password']) if user: login(request, user) return redirect('cbt:cbt_list') messages.error(request, 'Login credentials error!') return redirect(reverse('cbt:login')) return render(request, 'cbt/login.html', {'form':form} ) def user_signup(request): form = UserRegistration() if request.method == 'POST': form = UserRegistration(request.POST) if form.is_valid: if User.objects.filter(email=request.POST['email']).exists(): messages.error(request, 'Username is already taken') return render(request, 'cbt/signup.html', {'form':form}) form.save() user_detail = request.POST email_subject = 'Welcome to CBTMaker' message = f'''Hi {user_detail.get('username')}, \n Thank you for registering with CBTMaker. \n\n Enjoy CBTMaker. \n\n CBTMaker team.''' email_sender = settings.EMAIL_HOST_USER recipient_list = [user_detail.get('email')] send_mail(email_subject, message, email_sender, recipient_list) print('Email sent') return redirect('cbt:home') else: return redirect(reverse('cbt:signup')) return render(request, 'cbt/signup.html', {'form':form}) def user_logout(request): logout(request) return redirect('cbt:home') def password_reset(request): if request.method == 'POST': password_reset_form = PasswordResetForm(request.POST) if password_reset_form.is_valid: email = request.POST['email'] associated_users = User.objects.filter(email=email) if associated_users.exists(): for user in associated_users: subject = "Password Reset Requested" email_template_name = "cbt/password/password_email.txt" c = { "email":user.email, 'domain':'127.0.0.1:8000', 'site_name': 'Website', "uid": urlsafe_base64_encode(force_bytes(user.pk)), "user": user, 'token': default_token_generator.make_token(user), 'protocol': 'http', } email = render_to_string(email_template_name, c) try: send_mail(subject, email, 'admin@example.com' , [user.email], fail_silently=False) except BadHeaderError: return HttpResponse('Invalid header found.') return redirect('password_reset_done') password_reset_form = PasswordResetForm() context = {'password_reset_form':password_reset_form} return render(request, 'cbt/password/password_reset.html', context)
true
true
f716af9d8b3eb607fff52cdc80c5efef400bad64
664
py
Python
exp/alto/tools/create_terminals_bi.py
Hollo1996/4lang
d167110f619e652a5cce723d211946baeae077ea
[ "MIT" ]
20
2016-03-01T07:34:17.000Z
2021-09-06T11:08:11.000Z
exp/alto/tools/create_terminals_bi.py
Hollo1996/4lang
d167110f619e652a5cce723d211946baeae077ea
[ "MIT" ]
103
2015-02-03T13:34:55.000Z
2020-07-13T11:21:22.000Z
exp/alto/tools/create_terminals_bi.py
Hollo1996/4lang
d167110f619e652a5cce723d211946baeae077ea
[ "MIT" ]
14
2015-02-03T09:00:17.000Z
2021-12-15T11:26:30.000Z
#!/usr/bin/env python import sys from hunmisc.corpustools.tsv_tools import sentence_iterator from common import sanitize_word TEMPLATE = ('{0} -> {1}_{0}\n[graph] "({1}<root> / {1})"\n' + '[fourlang] "({1}<root> / {1})"\n') def main(): seen = set() with open(sys.argv[1]) as stream: for sentence in sentence_iterator(stream, comment_tag='#'): for tok in sentence: word = sanitize_word(tok[1]) pos = tok[3] if (word, pos) not in seen: print(TEMPLATE.format(pos, word)) seen.add((word, pos)) if __name__ == "__main__": main()
24.592593
67
0.536145
import sys from hunmisc.corpustools.tsv_tools import sentence_iterator from common import sanitize_word TEMPLATE = ('{0} -> {1}_{0}\n[graph] "({1}<root> / {1})"\n' + '[fourlang] "({1}<root> / {1})"\n') def main(): seen = set() with open(sys.argv[1]) as stream: for sentence in sentence_iterator(stream, comment_tag='#'): for tok in sentence: word = sanitize_word(tok[1]) pos = tok[3] if (word, pos) not in seen: print(TEMPLATE.format(pos, word)) seen.add((word, pos)) if __name__ == "__main__": main()
true
true
f716afd528fd4bac2c2ee7f4d45b47e27b40ca16
1,143
py
Python
HDPython/tests/test_axi_fifo.py
HardwareDesignWithPython/HDPython
aade03aaa092b1684fa12bffd17674cf1c45f5ac
[ "MIT" ]
null
null
null
HDPython/tests/test_axi_fifo.py
HardwareDesignWithPython/HDPython
aade03aaa092b1684fa12bffd17674cf1c45f5ac
[ "MIT" ]
null
null
null
HDPython/tests/test_axi_fifo.py
HardwareDesignWithPython/HDPython
aade03aaa092b1684fa12bffd17674cf1c45f5ac
[ "MIT" ]
1
2021-10-20T20:08:16.000Z
2021-10-20T20:08:16.000Z
from HDPython import * from HDPython.examples import * from .helpers import Folders_isSame, vhdl_conversion, do_simulation,printf from HDPython.test_handler import add_test class test_bench_axi_fifo(v_entity): def __init__(self): super().__init__() self.architecture() def architecture(self): clkgen = clk_generator() maxCount = v_slv(32,20) pipe1 = rollingCounter(clkgen.clk,maxCount) \ | axiFifo(clkgen.clk) \ | axiFifo(clkgen.clk, depth = 5) \ | axiPrint(clkgen.clk) end_architecture() @do_simulation def test_bench_axi_fifo_sim(OutputPath, f= None): tb = test_bench_axi_fifo() return tb def test_test_bench_axi_fifo_sim(): return test_bench_axi_fifo_sim("tests/axi_fifo_sim/") add_test("axi_fifo_sim", test_test_bench_axi_fifo_sim) @vhdl_conversion def test_bench_axi_fifo_2vhdl(OutputPath, f= None): tb = test_bench_axi_fifo() return tb def test_test_bench_axi_fifo_2vhdl(): return test_bench_axi_fifo_2vhdl("tests/axi_fifo/") add_test("axi_fifo_2vhdl", test_test_bench_axi_fifo_2vhdl)
23.326531
74
0.707787
from HDPython import * from HDPython.examples import * from .helpers import Folders_isSame, vhdl_conversion, do_simulation,printf from HDPython.test_handler import add_test class test_bench_axi_fifo(v_entity): def __init__(self): super().__init__() self.architecture() def architecture(self): clkgen = clk_generator() maxCount = v_slv(32,20) pipe1 = rollingCounter(clkgen.clk,maxCount) \ | axiFifo(clkgen.clk) \ | axiFifo(clkgen.clk, depth = 5) \ | axiPrint(clkgen.clk) end_architecture() @do_simulation def test_bench_axi_fifo_sim(OutputPath, f= None): tb = test_bench_axi_fifo() return tb def test_test_bench_axi_fifo_sim(): return test_bench_axi_fifo_sim("tests/axi_fifo_sim/") add_test("axi_fifo_sim", test_test_bench_axi_fifo_sim) @vhdl_conversion def test_bench_axi_fifo_2vhdl(OutputPath, f= None): tb = test_bench_axi_fifo() return tb def test_test_bench_axi_fifo_2vhdl(): return test_bench_axi_fifo_2vhdl("tests/axi_fifo/") add_test("axi_fifo_2vhdl", test_test_bench_axi_fifo_2vhdl)
true
true
f716b0186a220c17477a6aa6ebacf35273a20db8
1,161
py
Python
test/geocoders/geonames.py
navidata/geopy
2c8e441cfb1a813fb2ab34fd41386204ad18f872
[ "MIT" ]
null
null
null
test/geocoders/geonames.py
navidata/geopy
2c8e441cfb1a813fb2ab34fd41386204ad18f872
[ "MIT" ]
null
null
null
test/geocoders/geonames.py
navidata/geopy
2c8e441cfb1a813fb2ab34fd41386204ad18f872
[ "MIT" ]
null
null
null
# -*- coding: UTF-8 -*- import unittest from geopy.compat import u from geopy.geocoders import GeoNames from test.geocoders.util import GeocoderTestBase, env @unittest.skipUnless( # pylint: disable=R0904,C0111 bool(env.get('GEONAMES_USERNAME')), "No GEONAMES_USERNAME env variable set" ) class GeoNamesTestCase(GeocoderTestBase): @classmethod def setUpClass(cls): cls.delta = 0.04 def test_unicode_name(self): """ GeoNames.geocode unicode """ # work around ConfigurationError raised in GeoNames init self.geocoder = GeoNames(username=env['GEONAMES_USERNAME']) self.geocode_run( {"query": "Mount Everest, Nepal"}, {"latitude": 27.987, "longitude": 86.925}, ) def test_reverse(self): """ GeoNames.reverse """ # work around ConfigurationError raised in GeoNames init self.geocoder = GeoNames(username=env['GEONAMES_USERNAME']) self.reverse_run( {"query": "40.75376406311989, -73.98489005863667"}, {"latitude": 40.75376406311989, "longitude": -73.98489005863667}, )
29.025
77
0.634798
import unittest from geopy.compat import u from geopy.geocoders import GeoNames from test.geocoders.util import GeocoderTestBase, env @unittest.skipUnless( bool(env.get('GEONAMES_USERNAME')), "No GEONAMES_USERNAME env variable set" ) class GeoNamesTestCase(GeocoderTestBase): @classmethod def setUpClass(cls): cls.delta = 0.04 def test_unicode_name(self): self.geocoder = GeoNames(username=env['GEONAMES_USERNAME']) self.geocode_run( {"query": "Mount Everest, Nepal"}, {"latitude": 27.987, "longitude": 86.925}, ) def test_reverse(self): self.geocoder = GeoNames(username=env['GEONAMES_USERNAME']) self.reverse_run( {"query": "40.75376406311989, -73.98489005863667"}, {"latitude": 40.75376406311989, "longitude": -73.98489005863667}, )
true
true
f716b2f64bf5f1efd19c1fea7576ec774a9a9154
532
py
Python
School/Average mark.py
Bamgm14/My-Random-Work
b9678a3a84dd8ff00efd638890cff76eb6967c1b
[ "MIT" ]
null
null
null
School/Average mark.py
Bamgm14/My-Random-Work
b9678a3a84dd8ff00efd638890cff76eb6967c1b
[ "MIT" ]
null
null
null
School/Average mark.py
Bamgm14/My-Random-Work
b9678a3a84dd8ff00efd638890cff76eb6967c1b
[ "MIT" ]
null
null
null
#To accepts marks in 5 subjects and displays the total and average mark #Above 90% Grade A* #90 - 80 % Grade A #70 – 80 % Grade B #60 – 70 % Grade C #Less than 60 Grade D a=float(input("Enter Mark(1):")) b=float(input("Enter Mark(2):")) c=float(input("Enter Mark(3):")) d=float(input("Enter Mark(4):")) e=float(input("Enter Mark(5):")) f=(a+b+c+d+e)/5 if f>=90: print ("A+") elif f<90 and f>=80: print ("A") elif f<80 and f>=70: print ("B") elif f<70 and f>=60: print ("C") else: print ("D")
22.166667
71
0.577068
a=float(input("Enter Mark(1):")) b=float(input("Enter Mark(2):")) c=float(input("Enter Mark(3):")) d=float(input("Enter Mark(4):")) e=float(input("Enter Mark(5):")) f=(a+b+c+d+e)/5 if f>=90: print ("A+") elif f<90 and f>=80: print ("A") elif f<80 and f>=70: print ("B") elif f<70 and f>=60: print ("C") else: print ("D")
true
true
f716b6b7fe18e32ba821b081861b3329d11d7a78
1,125
py
Python
csvObject/csvWriter.py
sbaker-dev/csvObject
e31668c9b71284c7e7f6516e61c9617ad7abb7b1
[ "MIT" ]
null
null
null
csvObject/csvWriter.py
sbaker-dev/csvObject
e31668c9b71284c7e7f6516e61c9617ad7abb7b1
[ "MIT" ]
null
null
null
csvObject/csvWriter.py
sbaker-dev/csvObject
e31668c9b71284c7e7f6516e61c9617ad7abb7b1
[ "MIT" ]
null
null
null
import csv def write_csv(write_out_path, name, headers, rows_to_write): """ Purpose ------- This writes out a csv file of row data with an optional header. If you don't want a header, pass None to headers Parameters ---------- :param name: The file name :type name: str :param write_out_path: The write directory :type write_out_path: str :param headers: The headers for the columns you want to write :type headers: list :param rows_to_write: A list of row data to write, each columns row should be an individual element of a list. :type rows_to_write: list :return: Nothing, just write out the file to the specified directory named the specified name :rtype: None """ if type(rows_to_write[0]) != list: rows_to_write = [[row] for row in rows_to_write] with open(f"{write_out_path}/{name}.csv", "w", newline="", encoding="utf-8") as csv_reader: csv_writer = csv.writer(csv_reader) if len(headers) > 0: csv_writer.writerow(headers) for row in rows_to_write: csv_writer.writerow(row)
28.846154
116
0.657778
import csv def write_csv(write_out_path, name, headers, rows_to_write): if type(rows_to_write[0]) != list: rows_to_write = [[row] for row in rows_to_write] with open(f"{write_out_path}/{name}.csv", "w", newline="", encoding="utf-8") as csv_reader: csv_writer = csv.writer(csv_reader) if len(headers) > 0: csv_writer.writerow(headers) for row in rows_to_write: csv_writer.writerow(row)
true
true
f716b763cbd4d44af6bf17e00908e21772e85af9
851
py
Python
service/test_service.py
theBraindonor/la-parking-tickets
9537900c54c0fb4a5ca27d2828621b9b8a5ede73
[ "CC-BY-4.0" ]
null
null
null
service/test_service.py
theBraindonor/la-parking-tickets
9537900c54c0fb4a5ca27d2828621b9b8a5ede73
[ "CC-BY-4.0" ]
9
2020-03-24T16:55:25.000Z
2022-02-17T21:56:35.000Z
service/test_service.py
theBraindonor/la-parking-tickets
9537900c54c0fb4a5ca27d2828621b9b8a5ede73
[ "CC-BY-4.0" ]
null
null
null
#!/usr/bin/env python # -*- coding: utf-8 -*- """ Test the ticket model being serviced on localhost:8080 """ __author__ = "John Hoff" __email__ = "john.hoff@braindonor.net" __copyright__ = "Copyright 2019, John Hoff" __license__ = "Creative Commons Attribution-ShareAlike 4.0 International License" __version__ = "1.0.0" import json import requests from utility import use_project_path from model import load_sample_data_frame if __name__ == '__main__': use_project_path() for index, row in load_sample_data_frame().iterrows(): print(row.to_json()) headers = {'Content-type': 'application/json', 'Accept': 'text/plain'} response = requests.post('http://127.0.0.1:8080/ticketPrediction', data=row.to_json(), headers=headers) print(json.loads(response.text)) if index > 100: break
26.59375
111
0.692127
__author__ = "John Hoff" __email__ = "john.hoff@braindonor.net" __copyright__ = "Copyright 2019, John Hoff" __license__ = "Creative Commons Attribution-ShareAlike 4.0 International License" __version__ = "1.0.0" import json import requests from utility import use_project_path from model import load_sample_data_frame if __name__ == '__main__': use_project_path() for index, row in load_sample_data_frame().iterrows(): print(row.to_json()) headers = {'Content-type': 'application/json', 'Accept': 'text/plain'} response = requests.post('http://127.0.0.1:8080/ticketPrediction', data=row.to_json(), headers=headers) print(json.loads(response.text)) if index > 100: break
true
true
f716b8d03170ced406cf21f6d4db40052bbb480d
2,560
py
Python
notebooks/shared/nbconvert/exporters/script.py
leonbett/debuggingbook
ae1fa940c306160429232fbc93a7a7f14b44efb7
[ "MIT" ]
728
2018-09-21T03:51:04.000Z
2022-03-28T09:35:04.000Z
notebooks/shared/nbconvert/exporters/script.py
leonbett/debuggingbook
ae1fa940c306160429232fbc93a7a7f14b44efb7
[ "MIT" ]
103
2018-09-02T12:26:32.000Z
2022-02-09T07:19:08.000Z
notebooks/shared/nbconvert/exporters/script.py
leonbett/debuggingbook
ae1fa940c306160429232fbc93a7a7f14b44efb7
[ "MIT" ]
157
2018-09-02T08:00:50.000Z
2022-03-27T22:04:50.000Z
"""Generic script exporter class for any kernel language""" # Copyright (c) Jupyter Development Team. # Distributed under the terms of the Modified BSD License. import entrypoints from .templateexporter import TemplateExporter from traitlets import Dict, default from .base import get_exporter class ScriptExporter(TemplateExporter): # Caches of already looked-up and instantiated exporters for delegation: _exporters = Dict() _lang_exporters = Dict() @default('template_file') def _template_file_default(self): return 'script.tpl' def _get_language_exporter(self, lang_name): """Find an exporter for the language name from notebook metadata. Uses the nbconvert.exporters.script group of entry points. Returns None if no exporter is found. """ if lang_name not in self._lang_exporters: try: Exporter = entrypoints.get_single( 'nbconvert.exporters.script', lang_name).load() except entrypoints.NoSuchEntryPoint: self._lang_exporters[lang_name] = None else: self._lang_exporters[lang_name] = Exporter(parent=self) return self._lang_exporters[lang_name] def from_notebook_node(self, nb, resources=None, **kw): langinfo = nb.metadata.get('language_info', {}) # delegate to custom exporter, if specified exporter_name = langinfo.get('nbconvert_exporter') if exporter_name and exporter_name != 'script': self.log.debug("Loading script exporter: %s", exporter_name) if exporter_name not in self._exporters: Exporter = get_exporter(exporter_name) self._exporters[exporter_name] = Exporter(parent=self) exporter = self._exporters[exporter_name] return exporter.from_notebook_node(nb, resources, **kw) # Look up a script exporter for this notebook's language lang_name = langinfo.get('name') if lang_name: self.log.debug("Using script exporter for language: %s", lang_name) exporter = self._get_language_exporter(lang_name) if exporter is not None: return exporter.from_notebook_node(nb, resources, **kw) # Fall back to plain script export self.file_extension = langinfo.get('file_extension', '.txt') self.output_mimetype = langinfo.get('mimetype', 'text/plain') return super(ScriptExporter, self).from_notebook_node(nb, resources, **kw)
40.634921
82
0.664844
import entrypoints from .templateexporter import TemplateExporter from traitlets import Dict, default from .base import get_exporter class ScriptExporter(TemplateExporter): _exporters = Dict() _lang_exporters = Dict() @default('template_file') def _template_file_default(self): return 'script.tpl' def _get_language_exporter(self, lang_name): if lang_name not in self._lang_exporters: try: Exporter = entrypoints.get_single( 'nbconvert.exporters.script', lang_name).load() except entrypoints.NoSuchEntryPoint: self._lang_exporters[lang_name] = None else: self._lang_exporters[lang_name] = Exporter(parent=self) return self._lang_exporters[lang_name] def from_notebook_node(self, nb, resources=None, **kw): langinfo = nb.metadata.get('language_info', {}) exporter_name = langinfo.get('nbconvert_exporter') if exporter_name and exporter_name != 'script': self.log.debug("Loading script exporter: %s", exporter_name) if exporter_name not in self._exporters: Exporter = get_exporter(exporter_name) self._exporters[exporter_name] = Exporter(parent=self) exporter = self._exporters[exporter_name] return exporter.from_notebook_node(nb, resources, **kw) lang_name = langinfo.get('name') if lang_name: self.log.debug("Using script exporter for language: %s", lang_name) exporter = self._get_language_exporter(lang_name) if exporter is not None: return exporter.from_notebook_node(nb, resources, **kw) # Fall back to plain script export self.file_extension = langinfo.get('file_extension', '.txt') self.output_mimetype = langinfo.get('mimetype', 'text/plain') return super(ScriptExporter, self).from_notebook_node(nb, resources, **kw)
true
true
f716ba644f454c2eb8166dff1191bf0ce61c89b2
598
py
Python
derg/forms.py
mihail4216/myter
4ed1e8abc3f57595858347b21c86a9a10b3ff4a4
[ "MIT" ]
null
null
null
derg/forms.py
mihail4216/myter
4ed1e8abc3f57595858347b21c86a9a10b3ff4a4
[ "MIT" ]
null
null
null
derg/forms.py
mihail4216/myter
4ed1e8abc3f57595858347b21c86a9a10b3ff4a4
[ "MIT" ]
null
null
null
# -*- coding:utf-8 -*- from django import forms # class username(forms.Form): # usernames = forms.CharField(max_length=100) class LoginForm(forms.Form): """ Лучше пользоваться формой и объявлять ее тут, тут можно менять сам тип поля, тест, число, визивик повесить на поле В самом html этого не сможем сделать, но там можно как вначале ты делал так объявлять, для простых форм """ username = forms.CharField(label=u'Имя пользователя') password = forms.CharField(label=u'Пароль', widget=forms.PasswordInput()) class LogoutForm(forms.Form): logout = forms.BooleanField
35.176471
122
0.727425
from django import forms class LoginForm(forms.Form): username = forms.CharField(label=u'Имя пользователя') password = forms.CharField(label=u'Пароль', widget=forms.PasswordInput()) class LogoutForm(forms.Form): logout = forms.BooleanField
true
true
f716ba93e72c41d843f4c01e2abc5c7f8996c487
148
py
Python
petstagram/common/urls.py
DimAntDim/SoftUni_Petstagram_Workshop
b4d6da5fa0d19de4b434046d0b7c73a40c8343b5
[ "MIT" ]
1
2021-06-14T19:50:52.000Z
2021-06-14T19:50:52.000Z
petstagram/common/urls.py
ArifRasim/Petstagram
dc754ecc2ee7184563b26d2ba3f795c2fc767b93
[ "MIT" ]
1
2021-08-09T16:31:13.000Z
2021-08-09T16:31:13.000Z
petstagram/common/urls.py
ArifRasim/Petstagram
dc754ecc2ee7184563b26d2ba3f795c2fc767b93
[ "MIT" ]
1
2022-03-15T13:50:30.000Z
2022-03-15T13:50:30.000Z
from django.urls import path from petstagram.common.views import LandingPage urlpatterns = [ path('', LandingPage.as_view(), name='index'), ]
18.5
50
0.72973
from django.urls import path from petstagram.common.views import LandingPage urlpatterns = [ path('', LandingPage.as_view(), name='index'), ]
true
true
f716bbce2341f6be93b2694916cecbdef85fba95
1,261
py
Python
demo/filepicker_demo/migrations/0002_auto_20150323_1549.py
aaronang/filepicker-django
9de61e9184ae93db9b260764cc2f45a38cb48400
[ "MIT" ]
15
2015-03-25T14:00:16.000Z
2021-04-15T17:47:02.000Z
demo/filepicker_demo/migrations/0002_auto_20150323_1549.py
aaronang/filepicker-django
9de61e9184ae93db9b260764cc2f45a38cb48400
[ "MIT" ]
3
2015-07-14T08:33:37.000Z
2018-12-15T12:58:52.000Z
demo/filepicker_demo/migrations/0002_auto_20150323_1549.py
Ink/django-filepicker
9de61e9184ae93db9b260764cc2f45a38cb48400
[ "MIT" ]
5
2015-07-14T13:30:38.000Z
2018-09-30T19:56:29.000Z
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import models, migrations import django_filepicker.models class Migration(migrations.Migration): dependencies = [ ('filepicker_demo', '0001_initial'), ] operations = [ migrations.CreateModel( name='BasicFilesModel', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('text', models.CharField(max_length=64)), ], options={ }, bases=(models.Model,), ), migrations.CreateModel( name='FileModel', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('fpfile', django_filepicker.models.FPFileField(upload_to=b'uploads')), ('fpurl', models.URLField(max_length=255, null=True, blank=True)), ('mid', models.ForeignKey(to='filepicker_demo.BasicFilesModel')), ], options={ }, bases=(models.Model,), ), migrations.DeleteModel( name='TestModel', ), ]
30.756098
114
0.552736
from __future__ import unicode_literals from django.db import models, migrations import django_filepicker.models class Migration(migrations.Migration): dependencies = [ ('filepicker_demo', '0001_initial'), ] operations = [ migrations.CreateModel( name='BasicFilesModel', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('text', models.CharField(max_length=64)), ], options={ }, bases=(models.Model,), ), migrations.CreateModel( name='FileModel', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('fpfile', django_filepicker.models.FPFileField(upload_to=b'uploads')), ('fpurl', models.URLField(max_length=255, null=True, blank=True)), ('mid', models.ForeignKey(to='filepicker_demo.BasicFilesModel')), ], options={ }, bases=(models.Model,), ), migrations.DeleteModel( name='TestModel', ), ]
true
true
f716bc49f322c1fc8a1504767c31a6b027dcd198
583
py
Python
utils/pbtxt.py
Sarmyt/masters_python_code
800d22624e9a9c4ae023f1c5ef40bd0efee5366b
[ "MIT" ]
null
null
null
utils/pbtxt.py
Sarmyt/masters_python_code
800d22624e9a9c4ae023f1c5ef40bd0efee5366b
[ "MIT" ]
null
null
null
utils/pbtxt.py
Sarmyt/masters_python_code
800d22624e9a9c4ae023f1c5ef40bd0efee5366b
[ "MIT" ]
1
2021-06-08T18:02:53.000Z
2021-06-08T18:02:53.000Z
from object_detection.protos.string_int_label_map_pb2 import StringIntLabelMap, StringIntLabelMapItem from google.protobuf import text_format def convert_classes(classes, start=1): msg = StringIntLabelMap() for id, name in enumerate(classes, start=start): msg.item.append(StringIntLabelMapItem(id=id, name=name)) text = str(text_format.MessageToBytes(msg, as_utf8=True), 'utf-8') return text if __name__ == '__main__': txt = convert_classes(['Agent']) print(txt) with open('label_map.pbtxt', 'w') as f: f.write(txt)
30.684211
102
0.698113
from object_detection.protos.string_int_label_map_pb2 import StringIntLabelMap, StringIntLabelMapItem from google.protobuf import text_format def convert_classes(classes, start=1): msg = StringIntLabelMap() for id, name in enumerate(classes, start=start): msg.item.append(StringIntLabelMapItem(id=id, name=name)) text = str(text_format.MessageToBytes(msg, as_utf8=True), 'utf-8') return text if __name__ == '__main__': txt = convert_classes(['Agent']) print(txt) with open('label_map.pbtxt', 'w') as f: f.write(txt)
true
true
f716bc57ce244e895a0fbf1e2a341ecb30e07e9c
199
py
Python
profiles/apps.py
javokhirbek1999/pet-finder-rest-api
67e926ad7b9aa4cb03a35f69e5a52b48dc776c62
[ "PostgreSQL", "Unlicense" ]
1
2021-08-22T22:44:41.000Z
2021-08-22T22:44:41.000Z
profiles/apps.py
javokhirbek1999/pet-finder-rest-api
67e926ad7b9aa4cb03a35f69e5a52b48dc776c62
[ "PostgreSQL", "Unlicense" ]
null
null
null
profiles/apps.py
javokhirbek1999/pet-finder-rest-api
67e926ad7b9aa4cb03a35f69e5a52b48dc776c62
[ "PostgreSQL", "Unlicense" ]
null
null
null
from django.apps import AppConfig class ProfilesConfig(AppConfig): default_auto_field = 'django.db.models.BigAutoField' name = 'profiles' def ready(self): from . import signals
22.111111
56
0.713568
from django.apps import AppConfig class ProfilesConfig(AppConfig): default_auto_field = 'django.db.models.BigAutoField' name = 'profiles' def ready(self): from . import signals
true
true
f716bc7d927d8f7d83db1221db13eba13a2be4a7
958
py
Python
assets/Point3D.py
sanils2002/PYTHON-CODES
607fadc2cba4b185a5529bd101faefa08f4c3469
[ "MIT" ]
null
null
null
assets/Point3D.py
sanils2002/PYTHON-CODES
607fadc2cba4b185a5529bd101faefa08f4c3469
[ "MIT" ]
null
null
null
assets/Point3D.py
sanils2002/PYTHON-CODES
607fadc2cba4b185a5529bd101faefa08f4c3469
[ "MIT" ]
null
null
null
#Define a Point3D class that inherits from object #Inside the Point3D class, define an __init__() function that accepts self, x, y, and z, and assigns these numbers to the member variables self.x, self.y, self.z #Define a __repr__() method that returns "(%d, %d, %d)" % (self.x, self.y, self.z). This tells Python to represent this object in the following format: (x, y, z). #Outside the class definition, create a variable named my_point containing a new instance of Point3D with x=1, y=2, and z=3. #Finally, print my_point. import os from time import sleep def screen_clear(): if os.name == 'posix': _ = os.system('clear') else: _ = os.system('cls') sleep(1) screen_clear() class Point3D(object): def __init__(self, x, y, z): self.x = x self.y = y self.z = z def __repr__(self): return "(%d, %d, %d)" % (self.x, self.y, self.z) my_point = Point3D(1,2,3) print(my_point)
34.214286
166
0.64405
import os from time import sleep def screen_clear(): if os.name == 'posix': _ = os.system('clear') else: _ = os.system('cls') sleep(1) screen_clear() class Point3D(object): def __init__(self, x, y, z): self.x = x self.y = y self.z = z def __repr__(self): return "(%d, %d, %d)" % (self.x, self.y, self.z) my_point = Point3D(1,2,3) print(my_point)
true
true
f716bd1d23d98952f326deff4604828c2677d9b6
2,934
py
Python
src/crawl_data/crawl_data/spiders/XinjiangSpider.py
SmartDataLab/Policy_crawler
fb9fcb7ab701dfb98606afe9f7260f2f2e857506
[ "MIT" ]
3
2020-05-06T06:11:46.000Z
2020-05-24T15:07:22.000Z
src/crawl_data/crawl_data/spiders/XinjiangSpider.py
SmartDataLab/Policy_crawler
fb9fcb7ab701dfb98606afe9f7260f2f2e857506
[ "MIT" ]
2
2020-04-02T14:14:28.000Z
2020-04-27T12:45:48.000Z
src/crawl_data/crawl_data/spiders/XinjiangSpider.py
SmartDataLab/Policy_crawler
fb9fcb7ab701dfb98606afe9f7260f2f2e857506
[ "MIT" ]
2
2020-04-04T09:32:07.000Z
2020-07-07T09:54:23.000Z
import scrapy import pickle import os import ast from urllib import parse from scrapy.selector import Selector class XinjiangSpider(scrapy.Spider): name = "Xinjiang" if not os.path.exists('../../data/HTML_pk/%s' % name): os.makedirs('../../data/HTML_pk/%s' % name) if not os.path.exists('../../data/text/%s' % name): os.makedirs('../../data/text/%s' % name) def start_requests(self): total_page = 34 # total_page = 3 url_base = 'http://www.xinjiang.gov.cn/xinjiang/gfxwj/zfxxgk_gknrz{0}.shtml' for i in range(total_page): page = '_'+ str(i+1) if i > 0 else '' yield scrapy.Request(url=url_base.format(page), callback=self.parse) def parse(self,response): detail_page_links = [] for dd in response.css('div.gknr_list dd'): url = response.urljoin(dd.css('a::attr(href)').get()) UID = url.split('/')[-1][:-6] if '?' not in UID: detail_page_links.append(url) yield { 'UID': UID, 'title': dd.css('a::attr(title)').get(), 'date': dd.css('span::text').get(), 'FileNumber':None, 'text length':0, 'url': url, 'crawl state':'half' } yield from response.follow_all(detail_page_links, callback = self.parse_content) def parse_content(self, response): UID = response.url.split('/')[-1][:-6] doc_info_dict = {} for li in response.css('ul.clearfix li'): tmp_l = li.css('*::text').getall() if len(tmp_l) == 2: doc_info_dict[tmp_l[0]] = tmp_l[1] else: tmp_l = tmp_l[0].split(':') if len(tmp_l) == 2: doc_info_dict[tmp_l[0]] = tmp_l[1] File_num = None if '发文字号' in doc_info_dict.keys(): File_num = doc_info_dict['发文字号'] paragraph_list = response.css('div.gknbxq_detail p *::text').getall() attachment_link = response.css('div.ewebeditor_doc img::attr(src)').getall() if len(paragraph_list) == 0: paragraph_list = response.css('p *::text').getall() length = len(''.join(paragraph_list)) if length > 0: state = 'full' with open('../../data/HTML_pk/%s/%s.pkl' % (self.name,UID), 'wb') as f: pickle.dump(response.text,f) with open('../../data/text/%s/%s.txt' % (self.name,UID), 'w') as f: f.write('\n'.join(paragraph_list)) else: state = 'empty' return { 'UID': UID, 'FileNumber':File_num, 'mainText': paragraph_list, 'attachment_link': attachment_link, 'doc_info_dict':doc_info_dict, 'crawl state':state, 'text length':length, }
38.605263
92
0.519087
import scrapy import pickle import os import ast from urllib import parse from scrapy.selector import Selector class XinjiangSpider(scrapy.Spider): name = "Xinjiang" if not os.path.exists('../../data/HTML_pk/%s' % name): os.makedirs('../../data/HTML_pk/%s' % name) if not os.path.exists('../../data/text/%s' % name): os.makedirs('../../data/text/%s' % name) def start_requests(self): total_page = 34 url_base = 'http://www.xinjiang.gov.cn/xinjiang/gfxwj/zfxxgk_gknrz{0}.shtml' for i in range(total_page): page = '_'+ str(i+1) if i > 0 else '' yield scrapy.Request(url=url_base.format(page), callback=self.parse) def parse(self,response): detail_page_links = [] for dd in response.css('div.gknr_list dd'): url = response.urljoin(dd.css('a::attr(href)').get()) UID = url.split('/')[-1][:-6] if '?' not in UID: detail_page_links.append(url) yield { 'UID': UID, 'title': dd.css('a::attr(title)').get(), 'date': dd.css('span::text').get(), 'FileNumber':None, 'text length':0, 'url': url, 'crawl state':'half' } yield from response.follow_all(detail_page_links, callback = self.parse_content) def parse_content(self, response): UID = response.url.split('/')[-1][:-6] doc_info_dict = {} for li in response.css('ul.clearfix li'): tmp_l = li.css('*::text').getall() if len(tmp_l) == 2: doc_info_dict[tmp_l[0]] = tmp_l[1] else: tmp_l = tmp_l[0].split(':') if len(tmp_l) == 2: doc_info_dict[tmp_l[0]] = tmp_l[1] File_num = None if '发文字号' in doc_info_dict.keys(): File_num = doc_info_dict['发文字号'] paragraph_list = response.css('div.gknbxq_detail p *::text').getall() attachment_link = response.css('div.ewebeditor_doc img::attr(src)').getall() if len(paragraph_list) == 0: paragraph_list = response.css('p *::text').getall() length = len(''.join(paragraph_list)) if length > 0: state = 'full' with open('../../data/HTML_pk/%s/%s.pkl' % (self.name,UID), 'wb') as f: pickle.dump(response.text,f) with open('../../data/text/%s/%s.txt' % (self.name,UID), 'w') as f: f.write('\n'.join(paragraph_list)) else: state = 'empty' return { 'UID': UID, 'FileNumber':File_num, 'mainText': paragraph_list, 'attachment_link': attachment_link, 'doc_info_dict':doc_info_dict, 'crawl state':state, 'text length':length, }
true
true
f716bfb88f86b0dedbf9b46d2a9ed40caaf4e047
405
py
Python
tests/benchmarkstt/test_modules.py
ioannisNoukakis/benchmarkstt
41074c9b89632e8d9ff8e0ee72187211052bfb04
[ "MIT" ]
1
2019-02-01T10:37:12.000Z
2019-02-01T10:37:12.000Z
tests/benchmarkstt/test_modules.py
ioannisNoukakis/benchmarkstt
41074c9b89632e8d9ff8e0ee72187211052bfb04
[ "MIT" ]
null
null
null
tests/benchmarkstt/test_modules.py
ioannisNoukakis/benchmarkstt
41074c9b89632e8d9ff8e0ee72187211052bfb04
[ "MIT" ]
null
null
null
from benchmarkstt.modules import Modules from benchmarkstt.normalization import cli def test_module(): modules = Modules('cli') assert modules['normalization'] is cli assert modules.normalization is cli for k, v in modules: assert modules[k] is v assert getattr(modules, k) is v keys = modules.keys() assert type(keys) is list assert 'normalization' in keys
25.3125
42
0.696296
from benchmarkstt.modules import Modules from benchmarkstt.normalization import cli def test_module(): modules = Modules('cli') assert modules['normalization'] is cli assert modules.normalization is cli for k, v in modules: assert modules[k] is v assert getattr(modules, k) is v keys = modules.keys() assert type(keys) is list assert 'normalization' in keys
true
true
f716c03e06bd0f761318cae39eb26bd38855049d
3,328
py
Python
src/sst/elements/merlin/interfaces/pymerlin-interface.py
vjleung/sst-elements
b2d4a41f1cd152ac96c9eca54000980a26a757d3
[ "BSD-3-Clause" ]
2
2019-06-10T15:32:03.000Z
2019-06-11T14:17:32.000Z
src/sst/elements/merlin/interfaces/pymerlin-interface.py
plavin/sst-elements
a84c63fa024782383272fb32ca24eb668f25b1c7
[ "BSD-3-Clause" ]
null
null
null
src/sst/elements/merlin/interfaces/pymerlin-interface.py
plavin/sst-elements
a84c63fa024782383272fb32ca24eb668f25b1c7
[ "BSD-3-Clause" ]
1
2019-09-24T13:41:56.000Z
2019-09-24T13:41:56.000Z
#!/usr/bin/env python # # Copyright 2009-2020 NTESS. Under the terms # of Contract DE-NA0003525 with NTESS, the U.S. # Government retains certain rights in this software. # # Copyright (c) 2009-2020, NTESS # All rights reserved. # # Portions are copyright of other developers: # See the file CONTRIBUTORS.TXT in the top level directory # the distribution for more information. # # This file is part of the SST software package. For license # information, see the LICENSE file in the top level directory of the # distribution. import sst from sst.merlin.base import * class LinkControl(NetworkInterface): def __init__(self): NetworkInterface.__init__(self) self._declareParams("params",["link_bw","input_buf_size","output_buf_size","vn_remap"]) self._subscribeToPlatformParamSet("network_interface") # returns subcomp, port_name def build(self,comp,slot,slot_num,job_id,job_size,logical_nid,use_nid_remap = False): sub = comp.setSubComponent(slot,"merlin.linkcontrol",slot_num) self._applyStatisticsSettings(sub) sub.addParams(self._getGroupParams("params")) sub.addParam("job_id",job_id) sub.addParam("job_size",job_size) sub.addParam("use_nid_remap",use_nid_remap) sub.addParam("logical_nid",logical_nid) return sub,"rtr_port" class ReorderLinkControl(NetworkInterface): def __init__(self): NetworkInterface.__init__(self) self._declareClassVariables(["network_interface"]) self._setCallbackOnWrite("network_interface",self._network_interface_callback) self.network_interface = PlatformDefinition.getPlatformDefinedClassInstance("reorderlinkcontrol_network_interface") if not self.network_interface: self.network_interface = LinkControl() # This is just a default, can be overwritten self._unlockVariable("network_interface") def _network_interface_callback(self, variable_name, value): if not value: return self._lockVariable("network_interface") self._setPassthroughTarget(value) def setNetworkInterface(self,interface): self.network_interface = interface def build(self,comp,slot,slot_num,job_id,job_size,nid,use_nid_map = False): sub = comp.setSubComponent(slot,"merlin.reorderlinkcontrol",slot_num) #self._applyStatisticsSettings(sub) #sub.addParams(self._params) return self.network_interface.build(sub,"networkIF",0,job_id,job_size,nid,use_nid_map) # Functions to enable statistics def enableAllStatistics(self,stat_params,apply_to_children=False): # no stats of our own, simply pass to network interface if self.network_interface: self.network_interface.enableAllStatistics(stat_params,apply_to_children) def enableStatistics(self,stats,stat_params,apply_to_children=False): # no stats of our own, simply pass to network interface if self.network_interface: self.network_interface.enableStatistics(stats,stat_params,apply_to_children) def setStatisticLoadLevel(self,level,apply_to_children=False): # no stats of our own, simply pass to network interface if self.network_interface: self.network_intrface.setStatisticLoadLevel(level,apply_to_children)
41.08642
123
0.735577
import sst from sst.merlin.base import * class LinkControl(NetworkInterface): def __init__(self): NetworkInterface.__init__(self) self._declareParams("params",["link_bw","input_buf_size","output_buf_size","vn_remap"]) self._subscribeToPlatformParamSet("network_interface") def build(self,comp,slot,slot_num,job_id,job_size,logical_nid,use_nid_remap = False): sub = comp.setSubComponent(slot,"merlin.linkcontrol",slot_num) self._applyStatisticsSettings(sub) sub.addParams(self._getGroupParams("params")) sub.addParam("job_id",job_id) sub.addParam("job_size",job_size) sub.addParam("use_nid_remap",use_nid_remap) sub.addParam("logical_nid",logical_nid) return sub,"rtr_port" class ReorderLinkControl(NetworkInterface): def __init__(self): NetworkInterface.__init__(self) self._declareClassVariables(["network_interface"]) self._setCallbackOnWrite("network_interface",self._network_interface_callback) self.network_interface = PlatformDefinition.getPlatformDefinedClassInstance("reorderlinkcontrol_network_interface") if not self.network_interface: self.network_interface = LinkControl() self._unlockVariable("network_interface") def _network_interface_callback(self, variable_name, value): if not value: return self._lockVariable("network_interface") self._setPassthroughTarget(value) def setNetworkInterface(self,interface): self.network_interface = interface def build(self,comp,slot,slot_num,job_id,job_size,nid,use_nid_map = False): sub = comp.setSubComponent(slot,"merlin.reorderlinkcontrol",slot_num) return self.network_interface.build(sub,"networkIF",0,job_id,job_size,nid,use_nid_map) def enableAllStatistics(self,stat_params,apply_to_children=False): if self.network_interface: self.network_interface.enableAllStatistics(stat_params,apply_to_children) def enableStatistics(self,stats,stat_params,apply_to_children=False): if self.network_interface: self.network_interface.enableStatistics(stats,stat_params,apply_to_children) def setStatisticLoadLevel(self,level,apply_to_children=False): if self.network_interface: self.network_intrface.setStatisticLoadLevel(level,apply_to_children)
true
true
f716c359878a902aee90be96fdffab16acbcdd2b
110,391
py
Python
articles/inversion.py
Solara570/demo-solara
3ce6df1fd68089c427bbd46fb0857e8b76428ca6
[ "MIT" ]
79
2017-09-25T04:42:05.000Z
2022-03-24T06:10:56.000Z
articles/inversion.py
Solara570/demo-solara
3ce6df1fd68089c427bbd46fb0857e8b76428ca6
[ "MIT" ]
1
2018-04-13T14:12:00.000Z
2018-04-13T14:12:00.000Z
articles/inversion.py
Solara570/demo-solara
3ce6df1fd68089c427bbd46fb0857e8b76428ca6
[ "MIT" ]
13
2017-09-29T03:20:20.000Z
2022-03-07T13:18:16.000Z
#coding=utf-8 ################################################################################################ # A 3-part series on circle inversion, Descartes' theorem along with its variants, and more! # # # # Part 1: An Introduction to Circle Inversion - https://zhuanlan.zhihu.com/p/86644341 # # Part 2: Four Circles & Descartes' Theorem (1) - https://zhuanlan.zhihu.com/p/105819963 # # Part 3: Four Circles & Descartes' Theorem (2) - https://zhuanlan.zhihu.com/p/106874090 # ################################################################################################ import numpy as np import itertools as it from manimlib.constants import * from manimlib.utils.color import * from manimlib.utils.space_ops import * from manimlib.utils.simple_functions import * from manimlib.animation.composition import AnimationGroup from manimlib.animation.creation import ShowCreation, Write, DrawBorderThenFill from manimlib.animation.fading import FadeOut, FadeInFromDown from manimlib.animation.transform import Transform, ReplacementTransform, MoveToTarget, ApplyMethod from manimlib.mobject.mobject import Mobject from manimlib.mobject.coordinate_systems import Axes, NumberPlane, ThreeDAxes from manimlib.mobject.geometry import Circle, Line, Dot, SmallDot, Square, Polygon, RegularPolygon, \ Arrow, Sector, Vector from manimlib.mobject.numbers import DecimalNumber from manimlib.mobject.value_tracker import ValueTracker from manimlib.mobject.shape_matchers import BackgroundRectangle, SurroundingRectangle from manimlib.mobject.three_dimensions import Sphere from manimlib.mobject.svg.brace import Brace from manimlib.mobject.svg.tex_mobject import TexMobject, TextMobject from manimlib.mobject.types.vectorized_mobject import VMobject, VGroup, VectorizedPoint, DashedVMobject from manimlib.scene.scene import Scene from manimlib.scene.three_d_scene import ThreeDScene from short.apollonian_gasket import calc_centers_by_radii, calc_new_agc_info, AGCircle, \ ApollonianGasket, ApollonianGasketScene from short.ford_circles import get_coprime_numers_by_denom, get_stroke_width_by_height, \ AssembledFraction, ZoomInOnFordCircles ##### ## Constants MAX_NORM = 1e2 CB_DARK = "#825201" CB_LIGHT = "#B69B4C" ##### ## General Methods def complex_inversion(z, z0, r): return z0 + np.conjugate(r**2 / (z-z0)) def R3_inversion(point, inv_center, radius): z = R3_to_complex(point) z0 = R3_to_complex(inv_center) w = complex_inversion(z, z0, radius) return complex_to_R3(w) def inversion(point, inv_center, radius): # Just a rename return R3_inversion(point, inv_center, radius) def is_close_in_R3(p1, p2, thres = 1e-6): """Check if two points are close in R^3.""" return np.linalg.norm(p1 - p2) < thres def is_close(z1, z2, thres = 1e-6): """Check if two complex numbers are close to each other.""" return np.abs(z1 - z2) < thres def get_tangent_point(c1, c2, thres = 1e-4): """Return the tangency point of circles 'c1' and 'c2'.""" p1 = c1.get_center() p2 = c2.get_center() r1 = c1.get_height() / 2 r2 = c2.get_height() / 2 d = get_norm(p2 - p1) if is_close(d, r1-r2, thres): return p1 + r1*normalize(p2-p1) elif is_close(d, r2-r1, thres): return p2 + r2*normalize(p1-p2) elif is_close(d, r1+r2, thres): return (r1*p2+r2*p1) / (r1+r2) else: raise Exception("These two circles aren't tangent.") def get_para_and_perp_components(point, lp1, lp2): v = lp2 - point v0 = lp2 - lp1 v_para = fdiv(np.dot(v, v0), np.dot(v0, v0)) * v0 v_perp = v - v_para return v_para, v_perp def distance_to_the_line(point, lp1, lp2): """Return the distance from 'point' to the line given by 'lp1' and 'lp2'.""" v_para, v_perp = get_para_and_perp_components(point, lp1, lp2) return np.linalg.norm(v_perp) def is_on_the_line(point, lp1, lp2, thres = 1e-6): """Check if 'point' is on the line given by two points 'lp1' and 'lp2'.""" return is_close(distance_to_the_line(point, lp1, lp2), thres) def get_random_vector(max_step): """Return a random vector with a maximum length of 'max_step'.""" return max_step*np.random.random() * rotate_vector(RIGHT, TAU*np.random.random()) def get_nearest_int(num): return int(np.round(num, 0)) def solve_quadratic_equation(a, b, c): delta = b**2 - 4*a*c x1 = (-b-np.sqrt(delta)) /(2*a) x2 = (-b+np.sqrt(delta)) /(2*a) print(a, b, c, x1, x2) return x1, x2 def get_next_terms(k1, k2, k3): """Return two adjacent terms in the loxodromic sequence.""" b = -2*(k1+k2+k3) c = 2*(k1**2+k2**2+k3**2) - (k1+k2+k3)**2 return list(map(get_nearest_int, solve_quadratic_equation(1, b, c))) def get_sequence_string(arr): arr_copy = list(map(str, arr)) arr_copy.insert(0, "...") arr_copy.append("...") return ", ".join(arr_copy) ##### ## Mobjects class FineCircle(Circle): CONFIG = { # In manim, circles are approximated by multiple cubic Beziers, # so it's necessary to increase the number of components for # high-precision calculations. "num_components": 100, } class ExtendedLine(Line): def __init__(self, sp, ep, n = 10, **kwargs): unit_vec = normalize(ep - sp) new_sp = sp - n * unit_vec new_ep = ep + n * unit_vec Line.__init__(self, new_sp, new_ep, **kwargs) class DotLabel(VMobject): CONFIG = { "position" : UP, "label_buff" : 0.25, } def __init__(self, label_text, dot, **kwargs): VMobject.__init__(self, **kwargs) self.dot = dot label = TexMobject(label_text, **kwargs) if self.position is not None: label.add_updater( lambda l: l.next_to(self.dot.get_center(), self.position, buff = self.label_buff) ) self.add(label) def set_label(self, label): label.next_to(self.dot.get_center()) class TwoDotsSegment(Line): def __init__(self, dot_1, dot_2, **kwargs): self.dot_1 = dot_1 self.dot_2 = dot_2 sp, ep = self.get_dots_centers() Line.__init__(self, start = sp, end = ep, **kwargs) self.add_updater(self.set_start_and_end) def get_dots_centers(self): return self.dot_1.get_center(), self.dot_2.get_center() def set_start_and_end(self, line_mob): sp, ep = self.get_dots_centers() line_mob.put_start_and_end_on(sp, ep) class LengthLabel(DecimalNumber): CONFIG = { "num_decimal_places" : 3, "label_height" : 0.3, "label_buff" : 0.3, "offset" : 0, "is_on_opposite_side" : False, } def __init__(self, line_mob, **kwargs): DecimalNumber.__init__(self, **kwargs) self.line_mob = line_mob self.add_updater(self.set_label) def set_label(self, label): label.set_value(self.line_mob.get_length()) label.set_height(self.label_height) label.rotate(self.line_mob.get_angle()) side_factor = -1 if self.is_on_opposite_side else 1 label.move_to( self.line_mob.get_center() \ + self.line_mob.get_vector() / 2 * self.offset \ + side_factor * rotate_vector(self.line_mob.get_unit_vector(), PI/2) * self.label_buff ) def set_offset(self, offset): self.offset = offset return self def switch_side(self): self.is_on_opposite_side = not self.is_on_opposite_side return self class ManyDotsPolygon(VMobject): def __init__(self, *dots, **kwargs): VMobject.__init__(self, **kwargs) self.dots = dots dots_centers = self.get_dots_centers() polygon = Polygon(*dots_centers, **kwargs) polygon.add_updater(self.set_vertices) self.add(polygon) def get_dots_centers(self): return [dot.get_center() for dot in self.dots] def set_vertices(self, polygon_mob): vertices = self.get_dots_centers() polygon_mob.set_points_as_corners([*vertices, vertices[0]]) class AngleIndicator(VMobject): CONFIG = { "color" : RED, "radius" : 0.2, "fill_opacity" : 0.6, "is_minor_arc" : True, } def __init__(self, dot_A, dot_C, dot_B, **kwargs): VMobject.__init__(self, **kwargs) self.dot_A = dot_A self.dot_C = dot_C self.dot_B = dot_B sector = Sector() sector.add_updater(self.set_sector) self.add(sector) self.sector = sector def get_point_center(self, point_or_mob): if isinstance(point_or_mob, Mobject): return point_or_mob.get_center() else: return point_or_mob def get_point_centers(self): return tuple(map(self.get_point_center, [self.dot_A, self.dot_C, self.dot_B])) def set_sector(self, mob): pt_A, pt_C, pt_B = self.get_point_centers() start_angle, angle = self.get_angles() outer_radius = min([self.radius, get_norm(pt_C - pt_A)/2, get_norm(pt_C - pt_B)/2]) new_sector = Sector( start_angle = start_angle, angle = angle, outer_radius = outer_radius, color = self.color, fill_opacity = self.fill_opacity, stroke_width = 0 ) new_sector.move_arc_center_to(self.get_point_center(self.dot_C)) mob.become(new_sector) def get_angles(self): pt_A, pt_C, pt_B = self.get_point_centers() start_angle = angle_of_vector(pt_A - pt_C) end_angle = angle_of_vector(pt_B - pt_C) angle = (end_angle - start_angle) % TAU if self.is_minor_arc and angle > PI: angle -= TAU return start_angle, angle class RightAngleIndicator(VMobject): CONFIG = { "color" : WHITE, "side_length" : 0.2, "line_width" : 1, "square_opacity" : 0.5, } def __init__(self, dot_A, dot_C, dot_B, **kwargs): VMobject.__init__(self, **kwargs) self.dot_A = dot_A self.dot_C = dot_C self.dot_B = dot_B line = VMobject(stroke_width = self.line_width, fill_opacity = 0) square = VMobject(stroke_width = 0, fill_color = self.color, fill_opacity = self.square_opacity) line.add_updater(self.set_line) square.add_updater(self.set_square) self.add(square, line) self.line = line self.square = square def get_point_center(self, point_or_mob): if isinstance(point_or_mob, Mobject): return point_or_mob.get_center() else: return point_or_mob def get_point_centers(self): return tuple(map(self.get_point_center, [self.dot_A, self.dot_C, self.dot_B])) def get_norm_vectors(self): pt_A, pt_C, pt_B = self.get_point_centers() norm_vec_CA = normalize(pt_A - pt_C) norm_vec_CB = normalize(pt_B - pt_C) return norm_vec_CA, norm_vec_CB def get_corner_points(self): pt_A, pt_C, pt_B = self.get_point_centers() norm_vec_CA, norm_vec_CB = self.get_norm_vectors() side_length = min([self.side_length, get_norm(pt_A - pt_C)/2, get_norm(pt_B - pt_C)/2]) return ( pt_C, pt_C + norm_vec_CA * side_length, pt_C + norm_vec_CA * side_length + norm_vec_CB * side_length, pt_C + norm_vec_CB * side_length ) def set_line(self, line_mob): p, q, r, s = self.get_corner_points() line_mob.set_points_as_corners([q, r, s]) def set_square(self, square_mob): p, q, r, s = self.get_corner_points() square_mob.set_points_as_corners([p, q, r, s, p]) class InversedDot(VMobject): CONFIG = { "color" : PINK, "stroke_width" : 3, "fill_opacity" : 1, "is_hollow" : True, "center_color" : BLACK, } def __init__(self, orig_dot, circle, **kwargs): self.orig_dot = orig_dot self.circle = circle VMobject.__init__(self, **kwargs) def generate_points(self): if self.is_hollow: self.fill_color = self.center_color else: self.fill_color = self.color self.stroke_width = 0 inv_dot = Dot(ORIGIN, color = self.color) self.inv_dot = inv_dot self.add(inv_dot) self.add_updater_to_inversed_dot() def add_updater_to_inversed_dot(self): self.inv_dot.add_updater(self.move_inversed_dot) def move_inversed_dot(self, inv_dot): point = self.orig_dot.get_center() inv_center = self.circle.get_center() radius = self.circle.get_height() / 2. if is_close_in_R3(point, inv_center): pass else: inv_dot.move_to(inversion(point, inv_center, radius)) class InversedVMobject(VMobject): CONFIG = { "is_analytical" : True, "match_original_style" : False, "use_dashed_vmob" : True, "dashed_vmob_config": { "num_dashes" : 50, "positive_space_ratio" : 0.6, }, } def __init__(self, orig_vmob, circle, **kwargs): VMobject.__init__(self, **kwargs) self.orig_vmob = orig_vmob self.circle = circle self.orig_vmob_type = "Others" self.initialize_orig_vmob_type() self.add_updater_to_inversed_vmobject() def add_updater_to_inversed_vmobject(self): self.add_updater(self.set_inversed_vmobject) def initialize_orig_vmob_type(self): if isinstance(self.orig_vmob, Line): self.orig_vmob_type = "Line" elif isinstance(self.orig_vmob, Circle): self.orig_vmob_type = "Circle" else: self.orig_vmob_type = "Others" def set_orig_vmob_type(self, orig_vmob_type): self.orig_vmob_type = orig_vmob_type def set_inversed_vmobject(self, inv_vmob): inv_center = self.circle.get_center() radius = self.circle.get_height() / 2. if self.is_analytical and self.orig_vmob_type == "Line": # If it's a line... lp1, lp2 = self.orig_vmob.get_start_and_end() if is_on_the_line(inv_center, lp1, lp2): # If it's a line passing through the inversion center, # then the inversion is just the line itself. temp_vmob = ExtendedLine(lp1, lp2) else: # If it's a line NOT through the inversion center, # then the inversion is a circle passing through the inversion center. v_para, v_perp = get_para_and_perp_components(inv_center, lp1, lp2) d = distance_to_the_line(inv_center, lp1, lp2) # d = np.linalg.norm(v_perp) inv_vmob_radius = fdiv(radius**2, 2*d) closepoint = inv_center + v_perp inv_vmob_closepoint = inversion(closepoint, inv_center, radius) inv_vmob_center = (inv_center + inv_vmob_closepoint) / 2. temp_vmob = FineCircle(radius = inv_vmob_radius) temp_vmob.move_to(inv_vmob_center) elif self.is_analytical and self.orig_vmob_type == "Circle": # If it's a circle... orig_vmob_center = self.orig_vmob.get_center() orig_vmob_radius = self.orig_vmob.get_height() / 2. center_vec = orig_vmob_center - inv_center d = get_norm(center_vec) if is_close(orig_vmob_radius, d): # If it's a circle passing through the inversion center, # then the inversion is a line perps to the line through the circle centers. foot = inv_center + fdiv(radius**2, 2*d) * normalize(center_vec) lp1 = foot + rotate_vector(center_vec, PI/2) lp2 = foot + rotate_vector(center_vec, -PI/2) temp_vmob = ExtendedLine(lp1, lp2) else: # If it's a circle NOT through the inversion center, # then the inversion is a circle NOT through the inversion center. dp1 = orig_vmob_center - orig_vmob_radius * normalize(center_vec) dp2 = orig_vmob_center + orig_vmob_radius * normalize(center_vec) inv_dp1 = inversion(dp1, inv_center, radius) inv_dp2 = inversion(dp2, inv_center, radius) inv_vmob_radius = get_norm(inv_dp2 - inv_dp1) / 2. inv_vmob_center = (inv_dp2 + inv_dp1) / 2. temp_vmob = FineCircle(radius = inv_vmob_radius) temp_vmob.move_to(inv_vmob_center) else: temp_vmob = self.orig_vmob.copy() temp_vmob.apply_function(lambda p: inversion(p, inv_center, radius)) if self.use_dashed_vmob: temp_vmob = DashedVMobject(temp_vmob, **self.dashed_vmob_config) inv_vmob.become(temp_vmob) if self.match_original_style: inv_vmob.match_style(self.orig_vmob) class FourCirclesNormalForm(VMobject): CONFIG = { "circle_colors" : [MAROON_B, RED, GREEN, BLUE], "r" : 1.2, "l" : 9, "use_dashed_vmob" : True, "dashed_vmob_config" : { "num_dashes" : 30, "positive_space_ratio" : 0.6, } } def __init__(self, **kwargs): VMobject.__init__(self, **kwargs) c1 = Circle(radius = self.r, **kwargs).shift(self.r*LEFT) c2 = Circle(radius = self.r, **kwargs).shift(self.r*RIGHT) c3 = Line(self.l*LEFT, self.l*RIGHT, **kwargs).shift(self.r*DOWN) c4 = Line(self.l*LEFT, self.l*RIGHT, **kwargs).shift(self.r*UP) for mob, color in zip([c1, c2, c3, c4], self.circle_colors): mob.set_color(color) if self.use_dashed_vmob: self.add(DashedVMobject(mob, **self.dashed_vmob_config)) else: self.add(mob) class DescartesFourCircles(VMobject): CONFIG = { "outer_circle_index" : None, "orig_circle_color" : BLUE, "new_circle_color" : YELLOW, "show_new_circles" : True, "show_new_circles_centers" : False, } def __init__(self, ccdot1, ccdot2, ccdot3, **kwargs): self.ccdot1 = ccdot1 self.ccdot2 = ccdot2 self.ccdot3 = ccdot3 VMobject.__init__(self, **kwargs) self.add_orig_circles() self.add_orig_circles_updaters() self.generate_new_circles() if self.show_new_circles: self.add_new_circles() if self.show_new_circles_centers: self.add_new_circles_centers() def add_orig_circles(self): self.c1, self.c2, self.c3 = self.cs = VGroup(*[ Circle(arc_center = cc, radius = r, color = self.orig_circle_color) for cc, r in zip(self.get_orig_circle_centers(), self.calc_radii_by_centers()) ]) self.add(self.cs) def add_orig_circles_updaters(self): def get_center(k): return self.get_orig_circle_centers()[k] def get_abs_radius(k): return np.abs(self.calc_radii_by_centers()[k]) # Since enumerate() won't work here (seriously?), # I have to use a much more direct approach - list them all. self.c1.add_updater(lambda c: c.move_to(get_center(0))) self.c1.add_updater(lambda c: c.set_height(2*get_abs_radius(0))) self.c2.add_updater(lambda c: c.move_to(get_center(1))) self.c2.add_updater(lambda c: c.set_height(2*get_abs_radius(1))) self.c3.add_updater(lambda c: c.move_to(get_center(2))) self.c3.add_updater(lambda c: c.set_height(2*get_abs_radius(2))) def get_orig_circles(self): return self.cs def get_orig_circle_centers(self): return [dot.get_center() for dot in (self.ccdot1, self.ccdot2, self.ccdot3)] def get_orig_circle_radii(self): return self.calc_radii_by_centers() def get_orig_circle_curvatures(self): return [fdiv(1, radius) for radius in self.calc_radii_by_centers()] def calc_radii_by_centers(self): p1, p2, p3 = self.get_orig_circle_centers() d12 = get_norm(p2 - p1) d23 = get_norm(p3 - p2) d13 = get_norm(p3 - p1) sum_r = (d12 + d23 + d13) / 2. if self.outer_circle_index == 1: # If circle 1 contains other two circles... return [-sum_r, sum_r-d12, sum_r-d13] elif self.outer_circle_index == 2: # If circle 2 contains other two circles... return [sum_r-d12, -sum_r, sum_r-d23] elif self.outer_circle_index == 3: # If circle 3 contains other two circles... return [sum_r-d13, sum_r-d23, -sum_r] else: return [sum_r-d23, sum_r-d13, sum_r-d12] def generate_new_circles(self): self.c4_1, self.c4_2 = self.new_circles = VGroup(*[ Circle(arc_center = new_cc, radius = new_r, color = self.new_circle_color) for new_cc, new_r in self.calc_new_circles_centers_and_radii() ]) self.generate_new_circles_centers() self.add_new_circles_updaters() def calc_new_circles_centers_and_radii(self): k1, k2, k3 = self.get_orig_circle_curvatures() z1, z2, z3 = map(R3_to_complex, self.get_orig_circle_centers()) # Calculate the curvatures of new circles sum_k = k1 + k2 + k3 sum_k2 = k1**2 + k2**2 + k3**2 sum_k_cycle_prod = k1*k2 + k2*k3 + k3*k1 b = (-2)*sum_k c = sum_k2 - 2*sum_k_cycle_prod delta = b**2 - 4*c k4_1 = (-b + np.sqrt(delta)) / 2 k4_2 = (-b - np.sqrt(delta)) / 2 # Calculate the centers of new circles # arxiv.org/abs/math/0101066v1 - Eqn 2.3 sum_kz = k1*z1 + k2*z2 + k3*z3 sum_k2z = k1**2 * z1 + k2**2 * z2 + k3**2 * z3 coeff_1 = (sum_k - k4_1) * k4_1 const_1 = 2 * sum_k2z - (sum_k + k4_1) * sum_kz z4_1 = const_1 / coeff_1 coeff_2 = (sum_k - k4_2) * k4_2 const_2 = 2 * sum_k2z - (sum_k + k4_2) * sum_kz z4_2 = const_2 / coeff_2 return [[complex_to_R3(z4_1), fdiv(1, k4_1)], [complex_to_R3(z4_2), fdiv(1, k4_2)]] def generate_new_circles_centers(self): ccdot4_1 = Dot(color = self.new_circle_color) ccdot4_1.add_updater(lambda m: m.move_to(self.c4_1.get_center())) ccdot4_2 = Dot(color = self.new_circle_color) ccdot4_2.add_updater(lambda m: m.move_to(self.c4_2.get_center())) self.ccdot4_1 = ccdot4_1 self.ccdot4_2 = ccdot4_2 def add_new_circles_updaters(self): def get_new_center(k): return self.calc_new_circles_centers_and_radii()[k][0] def get_abs_new_radius(k): return np.abs(self.calc_new_circles_centers_and_radii()[k][1]) # Since enumerate() won't work here (seriously?), # I have to use a much more direct approach - list them all. self.c4_1.add_updater(lambda c: c.move_to(get_new_center(0))) self.c4_1.add_updater(lambda c: c.set_height(2*get_abs_new_radius(0))) self.c4_2.add_updater(lambda c: c.move_to(get_new_center(1))) self.c4_2.add_updater(lambda c: c.set_height(2*get_abs_new_radius(1))) def add_new_circles(self): if not hasattr(self, "new_circles"): self.new_circles = generate_new_circles() self.add(self.new_circles) def get_new_circles(self): if not hasattr(self, "new_circles"): self.new_circles = generate_new_circles() return self.new_circles def add_new_circles_centers(self): self.add(self.ccdot4_1, self.ccdot4_2) def remove_new_circles_center(self): self.remove(self.ccdot4_1, self.ccdot4_2) ##### ## Inversion Introduction Scenes class ConceptsInInversion(Scene): CONFIG = { "color_circle" : YELLOW, "color_radius" : RED, "color_P" : WHITE, } def construct(self): self.add_backgrounds() self.move_around_point_P() def add_backgrounds(self): circle_O = Circle(radius = 3.5, color = self.color_circle) circle_O.shift(3*LEFT) remark_circle = TextMobject("反演圆", color = self.color_circle) remark_circle.next_to(circle_O.get_bottom(), UP) dot_O = Dot(circle_O.get_center(), color = self.color_circle) label_O = DotLabel("O", dot_O, color = self.color_circle, position = DOWN) remark_O = TextMobject("反演中心", color = self.color_circle) remark_O.next_to(label_O, LEFT, buff = 0.15) radius = Line(circle_O.get_center(), circle_O.get_left()) label_radius = TexMobject("R").scale(0.8) remark_radius = TextMobject("反演幂").scale(0.8) brace_radius = Brace(radius, UP) brace_radius.put_at_tip(label_radius) remark_radius.next_to(label_radius, LEFT, buff = 0.15) group_radius = VGroup(radius, label_radius, brace_radius, remark_radius) group_radius.set_color(self.color_radius) group_radius.rotate(-PI/12, about_point = dot_O.get_center()) def_inversion = TextMobject("反演变换:$P \\mapsto P'$") rlt_inversion = TexMobject("|OP| \\times |OP'|=", "R^2") rlt_inversion.next_to(def_inversion, DOWN, aligned_edge = RIGHT) rlt_inversion[-1].set_color(self.color_radius) remarks = VGroup(def_inversion, rlt_inversion) remarks.to_corner(DR) dot_P = Dot(LEFT, color = self.color_P) label_P = DotLabel("P", dot_P, color = self.color_P, position = DL, label_buff = 0.2) dot_Pi = InversedDot(dot_P, circle_O, color = self.color_P) label_Pi = DotLabel("P'", dot_Pi, color = self.color_P, position = DR, label_buff = 0.2) line_OP = TwoDotsSegment(dot_O, dot_P, stroke_width = 2) line_OPi = TwoDotsSegment(dot_O, dot_Pi, stroke_width = 2) self.add(remarks) self.add(group_radius) self.add(circle_O, dot_O, label_O, remark_O, remark_circle) self.add(dot_P, dot_Pi, label_P, label_Pi, line_OP, line_OPi) self.circle_O = circle_O self.dot_P = dot_P def move_around_point_P(self): self.dot_P.save_state() for dx, dy in [(-0.2, 0.3), (0.1, -0.4), (4, 0.3), (1, 1)]: vec = np.array([dx, dy, 0]) self.play(self.dot_P.shift, vec, run_time = 1) self.wait() self.play(self.dot_P.move_to, self.circle_O.get_right()) self.wait() self.play(self.dot_P.restore, run_time = 1) self.wait() class InversionExamples(Scene): CONFIG = { "color_circle" : YELLOW, } def construct(self): circle_O = Circle(radius = 3.5, color = self.color_circle) circle_O.shift(3*LEFT) remark_circle = TextMobject("反演圆", color = self.color_circle) remark_circle.next_to(circle_O.get_bottom(), UP) dot_O = Dot(circle_O.get_center(), color = self.color_circle) label_O = DotLabel("O", dot_O, color = self.color_circle, position = DOWN) init_shape = Square(side_length = 1.2, color = BLUE).rotate(TAU/13) init_shape.next_to(circle_O.get_right(), LEFT, buff = 0.5) init_shape.save_state() inv_shape = InversedVMobject(init_shape, circle_O, use_dashed_vmob = False) new_shapes = [ RegularPolygon(n = 6, start_angle = PI/7, color = PINK).scale(0.8), TexMobject("42", color = RED).scale(2.5).rotate(-PI/9), TexMobject("\\pi", color = MAROON_B).scale(5).rotate(PI/15), ] self.add(circle_O, remark_circle, dot_O, label_O) self.add(init_shape, inv_shape) for new_shape in new_shapes: # new_shape.set_color(BLUE) new_shape.next_to(circle_O.get_right(), LEFT, buff = 0.6) self.play(Transform(init_shape, new_shape), run_time = 1) self.wait() init_shape.generate_target() init_shape.target.become(new_shape) init_shape.target.shift(get_random_vector(0.5)) random_angle = 0.5*np.random.random() init_shape.target.rotate(random_angle) self.play(MoveToTarget(init_shape, path_arc = random_angle, run_time = 1)), self.wait() self.play(ApplyMethod(init_shape.restore)) self.wait() class LineToLineInversion(Scene): CONFIG = { "color_circle" : YELLOW, "color_orig" : BLUE, "color_inv" : RED, } def construct(self): self.add_backgrounds() self.show_line_to_line_inversion() def add_backgrounds(self): circle_O = Circle(radius = 2.5, color = self.color_circle) remark_circle = TextMobject("反演圆", color = self.color_circle) remark_circle.next_to(circle_O.get_bottom(), UP) dot_O = Dot(circle_O.get_center(), color = self.color_circle) label_O = DotLabel("O", dot_O, color = self.color_circle, position = DOWN) conclusion = TextMobject("经过反演中心的直线", "$\\mapsto$", "经过反演中心的直线") conclusion.scale(0.8) conclusion[0].set_color(self.color_orig) conclusion[2].set_color(self.color_inv) conclusion.to_corner(DR) self.add(circle_O, remark_circle, dot_O, label_O) self.add(conclusion) self.circle_O = circle_O def show_line_to_line_inversion(self): angle_tracker = ValueTracker(-PI/11) position_tracker = ValueTracker(1.4) angle_tracker.save_state() position_tracker.save_state() orig_line = ExtendedLine(LEFT, RIGHT, color = self.color_orig, stroke_width = 8) orig_line.add_updater(lambda m: m.rotate(angle_tracker.get_value() - m.get_angle())) inv_line = ExtendedLine(LEFT, RIGHT, color = self.color_inv, stroke_width = 4) inv_line.add_updater(lambda m: m.rotate(angle_tracker.get_value() - m.get_angle())) dot_P = Dot(color = self.color_orig) dot_P.add_updater( lambda m: m.move_to( position_tracker.get_value() * rotate_vector(RIGHT, angle_tracker.get_value()) ) ) dot_Pi = InversedDot(dot_P, self.circle_O, is_hollow = False, color = self.color_inv) label_P = DotLabel("P", dot_P, position = DOWN, color = self.color_orig) label_Pi = DotLabel("P'", dot_Pi, position = DOWN, color = self.color_inv) def get_lb(): return LEFT_SIDE + UP * LEFT_SIDE[0] * np.tan(angle_tracker.get_value()) def get_rb(): return RIGHT_SIDE + UP * RIGHT_SIDE[0] * np.tan(angle_tracker.get_value()) def is_oolb(m): return m.get_right()[0] < LEFT_SIDE[0] def is_oorb(m): return m.get_left()[0] > RIGHT_SIDE[0] oolb_arrow = Arrow(ORIGIN, LEFT, color = self.color_inv).scale(2) oolb_arrow.add_updater(lambda m: m.set_angle(angle_tracker.get_value() + PI)) oolb_arrow.add_updater(lambda m: m.next_to(get_lb(), DOWN, aligned_edge = LEFT, buff = 0.2)) oorb_arrow = Arrow(ORIGIN, RIGHT, color = self.color_inv).scale(2) oorb_arrow.add_updater(lambda m: m.set_angle(angle_tracker.get_value())) oorb_arrow.add_updater(lambda m: m.next_to(get_rb(), DOWN, aligned_edge = RIGHT, buff = 0.2)) oolb_label = TexMobject("P'", color = self.color_inv, background_stroke_width = 0) oolb_label.add_updater(lambda m: m.next_to(oolb_arrow, DOWN, buff = 0.2)) oorb_label = TexMobject("P'", color = self.color_inv, background_stroke_width = 0) oorb_label.add_updater(lambda m: m.next_to(oorb_arrow, DOWN, buff = 0.2)) oolb_group = VGroup(oolb_arrow, oolb_label) oorb_group = VGroup(oorb_arrow, oorb_label) oolb_group.add_updater(lambda m: m.set_fill(opacity = 1 if is_oolb(label_Pi) else 0)) oolb_group.add_updater(lambda m: m.set_stroke(opacity = 1 if is_oolb(label_Pi) else 0)) oorb_group.add_updater(lambda m: m.set_fill(opacity = 1 if is_oorb(label_Pi) else 0)) oorb_group.add_updater(lambda m: m.set_stroke(opacity = 1 if is_oorb(label_Pi) else 0)) self.add(orig_line, inv_line, dot_P, dot_Pi, label_P, label_Pi) self.add(oolb_group, oorb_group) for d_position, d_angle in [(2, 0), (1, PI/10), (-5, 0), (-3, -PI/7), (4, PI/11)]: self.play( ApplyMethod(position_tracker.increment_value, d_position), ApplyMethod(angle_tracker.increment_value, d_angle), run_time = 2, ) self.wait() self.play( ApplyMethod(angle_tracker.restore), ApplyMethod(position_tracker.restore), run_time = 2, ) self.wait() class LineToCircleInversion(Scene): CONFIG = { "color_circle" : YELLOW, "color_orig" : BLUE, "color_inv" : RED, "line_config" : { "stroke_width" : 2, "color" : WHITE, }, } def construct(self): self.add_backgrounds() self.add_shapes() self.show_line_to_circle_inversion() def add_backgrounds(self): circle_O = Circle(radius = 3, color = self.color_circle) circle_O.shift(3*LEFT+0.5*UP) remark_circle = TextMobject("反演圆", color = self.color_circle) remark_circle.next_to(circle_O.get_bottom(), UP) dot_O = Dot(circle_O.get_center(), color = self.color_circle) label_O = DotLabel("O", dot_O, color = self.color_circle, position = DOWN) conclusion1 = TextMobject("不经过反演中心的直线", "$\\mapsto$", "经过反演中心的圆") conclusion1[0].set_color(self.color_orig) conclusion1[-1].set_color(self.color_inv) conclusion2 = TextMobject("经过反演中心的圆", "$\\mapsto$", "不经过反演中心的直线") conclusion2[0].set_color(self.color_inv) conclusion2[-1].set_color(self.color_orig) conclusions = VGroup(conclusion1, conclusion2) for c in conclusions: c.scale(0.8) conclusions.arrange_submobjects(DOWN, index_of_submobject_to_align = 1) conclusions.to_corner(DR) bg_rect = BackgroundRectangle(conclusions) self.add(circle_O, remark_circle) self.add_foreground_mobjects(dot_O, label_O, bg_rect, conclusions) self.dot_O = dot_O self.circle_O = circle_O self.conclusions = conclusions self.bg_rect = bg_rect def add_shapes(self): position_tracker = ValueTracker(2) line_angle_tracker = ValueTracker(PI*9/19) circle_angle_tracker = ValueTracker(PI/5) line = ExtendedLine(LEFT, RIGHT, color = self.color_orig) line.add_updater(lambda m: m.move_to(position_tracker.get_value() * RIGHT)) line.add_updater(lambda m: m.rotate(line_angle_tracker.get_value() - m.get_angle())) inv_line = InversedVMobject(line, self.circle_O, use_dashed_vmob = False, color = self.color_inv) inv_line_center = SmallDot(color = self.color_inv) inv_line_center.add_updater(lambda m: m.move_to(inv_line.get_center())) dot_Ai = Dot(color = self.color_inv) dot_Ai.add_updater( lambda m: m.move_to(inv_line.get_center() * 2 - self.circle_O.get_center()) ) dot_Pi = Dot(color = self.color_inv) dot_Pi.add_updater( lambda m: m.move_to( inv_line.get_center() \ + rotate_vector( inv_line.get_center() - self.circle_O.get_center(), circle_angle_tracker.get_value() ) ) ) dot_P = InversedDot(dot_Pi, self.circle_O, is_hollow = False, color = self.color_orig) dot_A = InversedDot(dot_Ai, self.circle_O, is_hollow = False, color = self.color_orig) line_OA, line_OAi, line_OP, line_OPi, line_AP, line_AiPi = aux_lines = VGroup(*[ TwoDotsSegment(pt_1, pt_2, **self.line_config) for pt_1, pt_2 in [ (self.dot_O, dot_A), (self.dot_O, dot_Ai), (self.dot_O, dot_P), (self.dot_O, dot_Pi), (dot_A, dot_P), (dot_Ai, dot_Pi) ] ]) ai_AiOPi = AngleIndicator(dot_Ai, self.dot_O, dot_Pi, color = MAROON_B, radius = 0.8) rtai_OAP = RightAngleIndicator(self.dot_O, dot_A, dot_P) rtai_OPiAi = RightAngleIndicator(self.dot_O, dot_Pi, dot_Ai) label_P = TexMobject("P", color = self.color_orig) label_Pi = TexMobject("P'", color = self.color_inv) label_A = TexMobject("A", color = self.color_orig) label_Ai = TexMobject("A'", color = self.color_inv) label_A.add_updater( lambda m: m.move_to( dot_A.get_center() + 0.3 * normalize(dot_A.get_center() - self.dot_O.get_center()) ) ) label_P.add_updater( lambda m: m.move_to( dot_P.get_center() + 0.3 * normalize(dot_A.get_center() - self.dot_O.get_center()) ) ) label_Ai.add_updater( lambda m: m.move_to( dot_Ai.get_center() + 0.4 * rotate_vector( normalize(dot_Ai.get_center() - inv_line_center.get_center()), -PI/4 ) ) ) label_Pi.add_updater( lambda m: m.move_to( dot_Pi.get_center() + 0.4 * normalize(dot_Pi.get_center() - inv_line_center.get_center()) ) ) def get_ub(): return line.get_center() + TOP + RIGHT * TOP[1] / np.tan(line_angle_tracker.get_value()) def get_bb(): return line.get_center() + BOTTOM + RIGHT * BOTTOM[1] / np.tan(line_angle_tracker.get_value()) def is_ooub(m): return m.get_bottom()[1] > TOP[1] def is_oobb(m): return m.get_top()[1] < BOTTOM[1] ooub_arrow = Arrow(ORIGIN, LEFT, color = self.color_orig).scale(2) ooub_arrow.add_updater(lambda m: m.set_angle(line_angle_tracker.get_value())) ooub_arrow.add_updater(lambda m: m.next_to(get_ub(), RIGHT, aligned_edge = TOP, buff = 0.2)) oobb_arrow = Arrow(ORIGIN, RIGHT, color = self.color_orig).scale(2) oobb_arrow.add_updater(lambda m: m.set_angle(line_angle_tracker.get_value() + PI)) oobb_arrow.add_updater(lambda m: m.next_to(get_bb(), RIGHT, aligned_edge = BOTTOM, buff = 0.2)) oolb_label = TexMobject("P", color = self.color_orig, background_stroke_width = 0) oolb_label.add_updater(lambda m: m.next_to(ooub_arrow, RIGHT, buff = 0.2)) oorb_label = TexMobject("P", color = self.color_orig, background_stroke_width = 0) oorb_label.add_updater(lambda m: m.next_to(oobb_arrow, RIGHT, buff = 0.2)) ooub_group = VGroup(ooub_arrow, oolb_label) oobb_group = VGroup(oobb_arrow, oorb_label) ooub_group.add_updater(lambda m: m.set_fill(opacity = 1 if is_ooub(label_P) else 0)) ooub_group.add_updater(lambda m: m.set_stroke(opacity = 1 if is_ooub(label_P) else 0)) oobb_group.add_updater(lambda m: m.set_fill(opacity = 1 if is_oobb(label_P) else 0)) oobb_group.add_updater(lambda m: m.set_stroke(opacity = 1 if is_oobb(label_P) else 0)) self.add(line, inv_line) self.add(dot_A, dot_P, dot_Ai, dot_Pi) self.add(label_P, label_Pi, label_A, label_Ai) self.add(aux_lines) self.add(ai_AiOPi, rtai_OAP, rtai_OPiAi) self.add(ooub_group, oobb_group) self.position_tracker = position_tracker self.line_angle_tracker = line_angle_tracker self.circle_angle_tracker = circle_angle_tracker def show_line_to_circle_inversion(self): play_args = [ [0, PI/12, 0, 2], [0, 0, PI*7/5, 4], [-2, PI/8, -PI/5, 3], [0, 0, PI*19/10, 6], [1.5, -PI/7, PI*2/5, 4], ] restore_arg = [ -sum([arg[k] for arg in play_args]) for k in range(len(play_args[0])) ] restore_arg[1] = (restore_arg[1] + PI) % (2*PI) - PI restore_arg[2] = (restore_arg[2] + PI) % (2*PI) - PI restore_arg[-1] = 3 play_args.append(restore_arg) for d_center, d_line_angle, d_circle_angle, run_time in play_args: self.play( ApplyMethod(self.position_tracker.increment_value, d_center), ApplyMethod(self.line_angle_tracker.increment_value, d_line_angle), ApplyMethod(self.circle_angle_tracker.increment_value, d_circle_angle), run_time = run_time, ) self.wait() class InversionCreateSimilarTriangles(Scene): CONFIG = { "random_seed" : 5+7-0, "num_of_nudges" : 5, "max_step" : 1, "color_A" : RED, "color_B" : BLUE, "color_combined" : MAROON_B, "color_circle": YELLOW, } def construct(self): self.add_remark() self.show_figure_animation() def add_remark(self): cond_1 = TexMobject("{|OP|", "\\over", "|OQ|}", "=", "{|OQ'|", "\\over", "|OP'|}") cond_2 = TexMobject("\\angle POQ", "=", "\\angle Q'OP'") conds = VGroup(cond_1, cond_2) conds.arrange_submobjects(DOWN, buff = 0.5) conds_rect = SurroundingRectangle(conds, color = WHITE) arrow = TexMobject("\\Downarrow") arrow.next_to(conds_rect, DOWN) concl = TexMobject("\\triangle OPQ", "\\sim", "\\triangle OQ'P'") concl.next_to(arrow, DOWN) for mob in (cond_1[0], cond_1[2], concl[0]): mob.set_color(self.color_A) for mob in (cond_1[-1], cond_1[-3], concl[-1]): mob.set_color(self.color_B) for mob in (cond_2[0], cond_2[-1]): mob.set_color(self.color_combined) remark = VGroup(conds, conds_rect, arrow, concl) remark.to_corner(DR) self.add(remark) def show_figure_animation(self): circle = Circle(radius = 3, color = self.color_circle) circle.move_to(3.5*LEFT) dot_O = Dot(color = self.color_combined) dot_O.add_updater(lambda m: m.move_to(circle.get_center())) dot_P = Dot(point = 1.2*UP+LEFT, color = self.color_A) dot_Q = Dot(point = 0.5*DOWN+1.9*LEFT, color = self.color_A) dot_Pi = InversedDot(dot_P, circle, is_hollow = False, color = self.color_B) dot_Qi = InversedDot(dot_Q, circle, is_hollow = False, color = self.color_B) triangle_OPQ = ManyDotsPolygon( dot_O, dot_P, dot_Q, color = self.color_A, stroke_width = 5, fill_opacity = 0.4 ) triangle_OPiQi = ManyDotsPolygon( dot_O, dot_Pi, dot_Qi, color = self.color_B, stroke_width = 2, fill_opacity = 0.3 ) label_O, label_P, label_Pi, label_Q, label_Qi = ( DotLabel( text, dot, color = color, position = position, background_stroke_width = 5, ).scale(0.8) for text, dot, color, position in zip( ["O", "P", "P'", "Q", "Q'"], [dot_O, dot_P, dot_Pi, dot_Q, dot_Qi], [self.color_combined, self.color_A, self.color_B, self.color_A, self.color_B], [LEFT, UP, UP, DOWN, DOWN] ) ) self.add(dot_O, dot_P, dot_Q, dot_Pi, dot_Qi) self.add(circle, triangle_OPQ, triangle_OPiQi) self.add(label_O, label_P, label_Pi, label_Q, label_Qi) dot_P.save_state() dot_Q.save_state() for k in range(self.num_of_nudges): nudge_P = get_random_vector(self.max_step) nudge_Q = get_random_vector(self.max_step) self.play( ApplyMethod(dot_P.shift, nudge_P), ApplyMethod(dot_Q.shift, nudge_Q), run_time = 2 ) self.wait() self.play(dot_P.restore, dot_Q.restore, run_time = 2) self.wait() class CircleToCircleInversionProof(Scene): CONFIG = { "color_O" : YELLOW, "color_A" : RED, "color_B" : BLUE, "color_combined" : MAROON_B, "label_buff" : 0.1, "label_scaling_factor" : 0.75, "line_config" : { "stroke_width" : 2, "color" : WHITE, }, } def construct(self): self.add_backgrounds() self.show_left_and_right_points() self.show_random_point() self.show_similar_triangles() self.show_complementary_property() self.show_inversion_result() def add_backgrounds(self): circle_O = Circle(radius = 3.2, color = self.color_O) circle_O.shift(3.5*LEFT) dot_O = Dot(circle_O.get_center(), color = self.color_O) remark_O = TextMobject("反演圆", color = YELLOW) remark_O.next_to(circle_O.get_bottom(), UP, buff = 0.4) circle_C = Circle(radius = 0.8, stroke_width = 2) circle_C.next_to(circle_O.get_right(), LEFT, buff = 0.5) dot_C = Dot(circle_C.get_center()) label_O, label_C = ( DotLabel( text, dot, color = color, position = DOWN, label_buff = self.label_buff ).scale(self.label_scaling_factor) for text, dot, color in zip(["O", "C"], [dot_O, dot_C], [self.color_O, WHITE]) ) for orig_mob in (circle_C, dot_C, label_C): orig_mob.set_sheen_direction(RIGHT) orig_mob.set_color([self.color_A, self.color_B]) inv_circle_template = InversedVMobject(circle_C, circle_O, use_dashed_vmob = False) inv_circle = Circle(radius = inv_circle_template.get_width()/2) inv_circle.move_to(inv_circle_template.get_center()) inv_circle.set_sheen_direction(LEFT) inv_circle.set_color([self.color_A, self.color_B]) self.add(circle_O, dot_O, circle_C, dot_C) self.add(label_O, label_C) self.add(remark_O) self.wait() self.circle_O = circle_O self.dot_O = dot_O self.remark_O = remark_O self.circle_C = circle_C self.dot_C = dot_C self.inv_circle = inv_circle def show_left_and_right_points(self): dot_A = Dot(color = self.color_A) dot_A.move_to(self.circle_C.get_left()) dot_B = Dot(color = self.color_B) dot_B.move_to(self.circle_C.get_right()) dot_Ai = InversedDot(dot_A, self.circle_O, is_hollow = False, color = self.color_A) dot_Bi = InversedDot(dot_B, self.circle_O, is_hollow = False, color = self.color_B) dot_Q = Dot((dot_Ai.get_center() + dot_Bi.get_center()) / 2) line_OB = Line(self.dot_O.get_center(), dot_B.get_center(), **self.line_config) line_OAi = Line(self.dot_O.get_center(), dot_Ai.get_center(), **self.line_config) label_A, label_Ai, label_B, label_Bi = ( DotLabel( text, dot, color = color, position = position, label_buff = self.label_buff ).scale(self.label_scaling_factor) for text, dot, color, position in zip( ["A", "A'", "B", "B'"], [dot_A, dot_Ai, dot_B, dot_Bi], [self.color_A, self.color_A, self.color_B, self.color_B], [DL, DR, DR, DL] ) ) remark_AB = TextMobject("圆心连线 \\\\ 的交点...").scale(0.6) remark_AB.next_to(VGroup(dot_A, dot_B), DOWN, buff = 1) arrows_AB = VGroup(*[ Arrow(remark_AB.get_critical_point(direction), dot, buff = 0.1) for direction, dot in zip([UL, UR], [dot_A, dot_B]) ]) remark_AiBi = TextMobject("...以及它们的反点").scale(0.8) remark_AiBi.next_to(VGroup(dot_Ai, dot_Bi), DOWN, buff = 1) arrows_AiBi = VGroup(*[ Arrow(remark_AiBi.get_critical_point(direction), dot, buff = 0.1) for direction, dot in zip([UR, UL], [dot_Ai, dot_Bi]) ]) self.play(ShowCreation(line_OB)) self.play(Write(dot_A), Write(dot_B), Write(label_A), Write(label_B)) self.wait() self.play(Write(remark_AB), ShowCreation(arrows_AB)) self.wait() self.play( ReplacementTransform(dot_A.deepcopy(), dot_Ai), ReplacementTransform(dot_B.deepcopy(), dot_Bi), ) self.play(Write(label_Ai), Write(label_Bi)) self.wait() self.play( ReplacementTransform(remark_AB, remark_AiBi), ReplacementTransform(arrows_AB, arrows_AiBi) ) self.play(ReplacementTransform(line_OB, line_OAi)) self.play(FadeOut(VGroup(remark_AiBi, arrows_AiBi))) self.wait() self.dot_A = dot_A self.dot_Ai = dot_Ai self.dot_B = dot_B self.dot_Bi = dot_Bi self.dot_Q = dot_Q self.line_OAi = line_OAi self.dots_AB = VGroup(dot_A, dot_Ai, dot_B, dot_Bi) self.labels_AB = VGroup(label_A, label_Ai, label_B, label_Bi) def show_random_point(self): angle_tracker = ValueTracker(PI/3) dot_P = Dot() dot_P.add_updater( lambda m: m.move_to( self.circle_C.point_at_angle(angle_tracker.get_value() % TAU) ) ) dot_P.add_updater( lambda m: m.set_color( interpolate_color( self.color_A, self.color_B, (dot_P.get_center()[0] - self.dot_A.get_center()[0]) / (self.dot_B.get_center()[0] - self.dot_A.get_center()[0]) ) ) ) label_P = DotLabel("P", dot_P, position = None) label_P.scale(0.8) label_P.add_updater(lambda m: m.set_color(dot_P.get_color())) label_P.add_updater( lambda m: m.move_to(dot_P.get_center() * 1.4 - self.dot_C.get_center() * 0.4) ) arrow_P = Vector(DR, buff = 0, color = WHITE).scale(0.5) arrow_P.add_updater(lambda m: m.next_to(dot_P, UL, buff = 0.1)) remark_P = TextMobject("圆上任意一点...").scale(0.75) remark_P.add_updater(lambda m: m.next_to(arrow_P, UL, buff = 0.1)) dot_Pi = InversedDot(dot_P, self.circle_O, is_hollow = False) dot_Pi.add_updater(lambda m: m.set_color(dot_P.get_color())) label_Pi = DotLabel("P'", dot_Pi, position = None) label_Pi.scale(0.8) label_Pi.add_updater(lambda m: m.set_color(dot_Pi.get_color())) label_Pi.add_updater( lambda m: m.move_to(dot_Pi.get_center() * 1.1 - self.inv_circle.get_center() * 0.1) ) arrow_Pi = Vector(DL, buff = 0, color = WHITE).scale(0.5) arrow_Pi.add_updater(lambda m: m.next_to(dot_Pi, UR, buff = 0.1)) remark_Pi = TextMobject("...以及它的反点").scale(0.75) remark_Pi.add_updater(lambda m: m.next_to(arrow_Pi, UR, buff = 0.1)) line_OP, line_OPi, line_AP, line_AiPi, line_BP, line_BiPi = aux_lines = VGroup(*[ TwoDotsSegment(pt_1, pt_2, **self.line_config) for pt_1, pt_2 in [ (self.dot_O, dot_P), (self.dot_O, dot_Pi), (self.dot_A, dot_P), (self.dot_Ai, dot_Pi), (self.dot_B, dot_P), (self.dot_Bi, dot_Pi) ] ]) rtai_APB = RightAngleIndicator(self.dot_A, dot_P, self.dot_B) rtai_BiPiAi = RightAngleIndicator(self.dot_Bi, dot_Pi, self.dot_Ai, side_length = 0.5) self.play(Write(dot_P), Write(label_P)) self.play(ShowCreation(arrow_P), Write(remark_P)) self.play(Write(line_AP), Write(line_BP)) self.play(ShowCreation(rtai_APB)) self.wait() self.play(ReplacementTransform(dot_P.deepcopy(), dot_Pi)) self.play(Write(label_Pi)) self.play( ReplacementTransform(arrow_P.deepcopy(), arrow_Pi), ReplacementTransform(remark_P.deepcopy(), remark_Pi), ) self.play(angle_tracker.increment_value, PI/6, run_time = 2) self.play(FadeOut(VGroup(arrow_P, remark_P, arrow_Pi, remark_Pi))) self.wait() self.play(Write(VGroup(line_OP, line_OPi, line_AiPi, line_BiPi))) self.wait() self.dot_P = dot_P self.dot_Pi = dot_Pi self.rtai_APB = rtai_APB self.rtai_BiPiAi = rtai_BiPiAi self.angle_tracker = angle_tracker self.aux_lines = aux_lines self.dots_P = VGroup(dot_P, dot_Pi) self.labels_P = VGroup(label_P, label_Pi) self.rtais = VGroup(self.rtai_APB, self.rtai_BiPiAi) def show_similar_triangles(self): ai_OAP = AngleIndicator(self.dot_O, self.dot_A, self.dot_P, radius = 0.3, color = self.color_A) ai_OBP = AngleIndicator(self.dot_O, self.dot_B, self.dot_P, radius = 0.4, color = self.color_B) ai_OPiAi = AngleIndicator(self.dot_O, self.dot_Pi, self.dot_Ai, radius = 0.3, color = self.color_A) ai_OPiBi = AngleIndicator(self.dot_O, self.dot_Pi, self.dot_Bi, radius = 0.4, color = self.color_B) triangle_OAP, triangle_OPiAi, triangle_OBP, triangle_OPiBi = [ ManyDotsPolygon( pt_1, pt_2, pt_3, color = self.color_combined, stroke_width = 0, fill_opacity = 0.4 ) for pt_1, pt_2, pt_3 in ( (self.dot_O, self.dot_A, self.dot_P), (self.dot_O, self.dot_Pi, self.dot_Ai), (self.dot_O, self.dot_B, self.dot_P), (self.dot_O, self.dot_Pi, self.dot_Bi), ) ] remark_sim_A = TexMobject("\\triangle OAP", "\\sim", "\\triangle OP'A'") remark_sim_B = TexMobject("\\triangle OBP", "\\sim", "\\triangle OP'B'") remark_arrow = TexMobject("\\Downarrow") remark_angle_A = TexMobject("\\angle OAP", "=", "\\angle OP'A'") remark_angle_B = TexMobject("\\angle OBP", "=", "\\angle OP'B'") remarks_A = VGroup(remark_sim_A, remark_arrow, remark_angle_A) remarks_B = VGroup(remark_sim_B, remark_arrow, remark_angle_B) remarks_A.arrange_submobjects(DOWN) remarks_A.next_to(self.dot_Q, DOWN, buff = 1) remark_sim_B.move_to(remark_sim_A.get_center()) remark_angle_B.move_to(remark_angle_A.get_center()) for remark, color in ([remark_sim_A, self.color_combined], [remark_sim_B, self.color_combined], \ [remark_angle_A, self.color_A], [remark_angle_B, self.color_B]): remark[0].set_color(color) remark[-1].set_color(color) self.play(Write(remark_sim_A)) self.play(FadeInFromDown(VGroup(remark_arrow, remark_angle_A))) self.wait() self.play(ShowCreation(triangle_OAP), ShowCreation(ai_OAP)) self.wait() self.play( ReplacementTransform(triangle_OAP, triangle_OPiAi), ReplacementTransform(ai_OAP.deepcopy(), ai_OPiAi), ) self.play(FadeOut(triangle_OPiAi)) self.wait() self.play(ReplacementTransform(remarks_A, remarks_B)) self.wait() self.play(ShowCreation(triangle_OBP), ShowCreation(ai_OBP)) self.wait() self.play( ReplacementTransform(triangle_OBP, triangle_OPiBi), ReplacementTransform(ai_OBP.deepcopy(), ai_OPiBi), ) self.play(FadeOut(remarks_B), FadeOut(triangle_OPiBi)) self.wait() self.ai_OAP = ai_OAP self.ai_OBP = ai_OBP self.ai_OPiAi = ai_OPiAi self.ai_OPiBi = ai_OPiBi self.ais = VGroup(ai_OAP, ai_OBP, ai_OPiAi, ai_OPiBi) def show_complementary_property(self): ai_OAP_copy = self.ai_OAP.deepcopy() ai_OBP_copy = self.ai_OBP.deepcopy() rtai_APB_copy = self.rtai_APB.deepcopy() for ai_copy in (ai_OAP_copy, ai_OBP_copy, rtai_APB_copy): ai_copy.clear_updaters() comp_prop = VGroup(ai_OAP_copy, TexMobject("="), ai_OBP_copy, TexMobject("+"), rtai_APB_copy) comp_prop.arrange_submobjects(RIGHT) comp_prop.scale(1.2) comp_prop.next_to(self.circle_O.get_top(), DOWN, buff = 1) self.play( ReplacementTransform(self.ai_OAP.deepcopy(), ai_OAP_copy), ReplacementTransform(self.ai_OBP.deepcopy(), ai_OBP_copy), ReplacementTransform(self.rtai_APB.deepcopy(), rtai_APB_copy), ) self.play(Write(comp_prop[1]), Write(comp_prop[3])) self.wait() self.play(ReplacementTransform(rtai_APB_copy.deepcopy(), self.rtai_BiPiAi)) self.wait() for ai in self.ais: ai.clear_updaters() self.play( FadeOut(comp_prop), FadeOut(self.ais), FadeOut(self.labels_AB), FadeOut(self.labels_P), ) self.wait() def show_inversion_result(self): inv_circle_copy = self.inv_circle.deepcopy() self.play(self.angle_tracker.set_value, PI, run_time = 2) self.wait() def update_inv_circle(inv_circle): angle = self.angle_tracker.get_value() if (angle <= -PI) or (angle > PI): alpha = 1 else: QPi = self.dot_Pi.get_center() - self.dot_Q.get_center() QAi = self.dot_Ai.get_center() - self.dot_Q.get_center() theta = angle_between(QPi, QAi) if self.dot_Pi.get_center()[1] < self.dot_Q.get_center()[1]: theta = 2*PI - theta alpha = theta / (2*PI) inv_circle.become(inv_circle_copy.get_subcurve(0, alpha)) self.inv_circle.add_updater(update_inv_circle) self.add(self.inv_circle) self.play( ApplyMethod(self.angle_tracker.increment_value, -2*PI), run_time = 5, ) self.inv_circle.clear_updaters() for line in self.aux_lines: line.clear_updaters() self.play( FadeOut(self.dots_AB), FadeOut(self.dots_P), FadeOut(self.rtais), FadeOut(self.line_OAi), FadeOut(self.aux_lines) ) self.wait() color_template = Square( stroke_width = 0, fill_opacity = 1, fill_color = [self.color_A, self.color_B] ) conclusion = TextMobject("不经过反演中心的圆", "$\\mapsto$", "不经过反演中心的圆") conclusion.scale(0.8) conclusion[0].set_color_by_gradient(self.color_A, self.color_B) conclusion[2].set_color_by_gradient(self.color_B, self.color_A) conclusion.to_corner(DR) self.play(Write(conclusion)) self.wait(3) self.play(FadeOut(conclusion), FadeOut(self.inv_circle)) self.wait() class ConcentricPropertyDoesNotHold(Scene): def setup(self): N = 8 self.circle_radii = [0.9-0.1*k for k in range(N)] self.dot_radii = [0.08-0.005*k for k in range(N)] self.circle_colors = color_gradient([BLUE, GREEN, RED], N) def construct(self): orig_circles = VGroup(*[ Circle(radius = radius, stroke_width = 1.5,color = color) for radius, color in zip(self.circle_radii, self.circle_colors)] ) orig_circles.shift(2*LEFT+0.5*DOWN) orig_circles_centers = VGroup(*[ Dot(circle.get_center(), radius = radius, color = color) for circle, radius, color in zip(orig_circles, self.dot_radii, self.circle_colors) ]) # Dot(orig_circles.get_center()) circle = Circle(radius = 3, color = YELLOW) circle.shift(3.8*LEFT+0.5*DOWN) circle_center = Dot(circle.get_center(), color = YELLOW) inv_circles = VGroup(*[ InversedVMobject(orig_circle, circle).clear_updaters().set_color(color) for orig_circle, color in zip(orig_circles, self.circle_colors) ]) inv_circles_centers = VGroup(*[ Dot(inv_circle.get_center(), color = color) for inv_circle, color in zip(inv_circles, self.circle_colors) ]) circle_text = TextMobject("反演圆", color = YELLOW) circle_text.next_to(circle.get_bottom(), UP, buff = 0.4) orig_circles_text = TextMobject("同心的圆", color = WHITE) orig_circles_text.next_to(orig_circles, UP) orig_circles_text.to_edge(UP, buff = 0.4) inv_circles_text = TextMobject("不同心的像", color = WHITE) inv_circles_text.next_to(inv_circles, UP) inv_circles_text.to_edge(UP, buff = 0.4) arrow = Arrow(orig_circles_text.get_right(), inv_circles_text.get_left()) self.add(circle, circle_center) self.add(orig_circles, orig_circles_centers) self.add(inv_circles, inv_circles_centers) self.add(circle_text, orig_circles_text, inv_circles_text, arrow) self.wait() class DemonstratePtolemyInequality(Scene): CONFIG = { "R" : 2.7, "angle_A" : -PI*2/3, "angle_B" : PI*4/5, "angle_D" : -PI/5, "radius_C" : 3.2, "angle_C" : PI/5, } def construct(self): radius_tracker = ValueTracker(self.radius_C) angle_tracker = ValueTracker(self.angle_C) circle = Circle(radius = self.R, color = WHITE, stroke_width = 1) circle.shift(DOWN) dashed_circle = DashedVMobject(circle, num_dashes = 100, positive_space_ratio = 0.5) dot_A, dot_B, dot_C, dot_D = dots = VGroup(*[ Dot(circle.point_at_angle(angle % TAU), color = WHITE) for angle in (self.angle_A, self.angle_B, self.angle_C, self.angle_D) ]) dot_C.add_updater( lambda m: m.move_to( circle.get_center() + radius_tracker.get_value() * \ rotate_vector(RIGHT, angle_tracker.get_value()) ) ) dot_labels = VGroup(*[ DotLabel(text, dot, position = position, label_buff = 0.1) for text, dot, position in zip( ["A", "B", "C", "D"], dots, [DL, UL, UR, DR] ) ]) lines = VGroup(*[ TwoDotsSegment(dot_1, dot_2) for dot_1, dot_2 in ( [dot_B, dot_A], [dot_A, dot_C], [dot_A, dot_D], [dot_B, dot_C], [dot_B, dot_D], [dot_C, dot_D], ) ]) length_labels = VGroup(*[LengthLabel(line) for line in lines]) length_labels[0].switch_side() length_labels[2].switch_side() length_labels[1].set_offset(-0.4) length_labels[-2].set_offset(-0.4) def get_sums(): AB, AC, AD, BC, BD, CD = [line.get_length() for line in lines] sum_lhs = AB * CD + AD * BC sum_rhs = AC * BD return sum_lhs, sum_rhs relation_eq = TexMobject( "|AB| \\cdot |CD| + |AD| \\cdot |BC|", "=", "|AC| \\cdot |BD|", background_stroke_width = 0, ) relation_neq = TexMobject( "|AB| \\cdot |CD| + |AD| \\cdot |BC|", ">", "|AC| \\cdot |BD|", background_stroke_width = 0, ) relation_eq[1].set_color(GREEN) relation_neq[1].set_color(RED) relation_eq.to_edge(UP, buff = 1.2) for eq_mob, neq_mob in zip(relation_eq, relation_neq): neq_mob.move_to(eq_mob.get_center()) lhs, eq_sign, rhs = relation_eq neq_sign = relation_neq[1] label_lhs = DecimalNumber(num_decimal_places = 4, show_ellipsis = True) label_rhs = DecimalNumber(num_decimal_places = 4, show_ellipsis = True) label_lhs.add_updater(lambda m: m.set_value(get_sums()[0])) label_rhs.add_updater(lambda m: m.set_value(get_sums()[1])) brace_lhs = Brace(lhs, UP, buff = 0.1) brace_rhs = Brace(rhs, UP, buff = 0.1) brace_lhs.put_at_tip(label_lhs) brace_rhs.put_at_tip(label_rhs) def get_indication_color(thres = 1e-2): return GREEN if is_close(radius_tracker.get_value(), self.R, thres = thres) else RED def get_indication_opacity(thres = 1e-2): return 0 if is_close(radius_tracker.get_value(), self.R, thres = thres) else 1 figure_group = VGroup(dashed_circle, dots, lines, length_labels, dot_labels) figure_group.add_updater(lambda m: m.set_color(get_indication_color())) relation_group = VGroup(lhs, eq_sign, rhs, neq_sign, brace_lhs, brace_rhs, label_lhs, label_rhs) label_lhs.add_updater(lambda m: m.set_color(get_indication_color())) label_rhs.add_updater(lambda m: m.set_color(get_indication_color())) eq_sign.add_updater(lambda m: m.set_opacity(1 - get_indication_opacity())) neq_sign.add_updater(lambda m: m.set_opacity(get_indication_opacity())) self.add(figure_group) self.add(relation_group) deltas = [ (0.5, -0.1), (0, -0.4), (-1, 0.3), (0, 0.4), (-1, 0), (0.3, -0.2), (0.7, -0.3), ] radius_tracker.save_state() angle_tracker.save_state() for d_radius, d_angle in deltas: self.play( ApplyMethod(radius_tracker.increment_value, d_radius), ApplyMethod(angle_tracker.increment_value, d_angle), run_time = 2, ) self.wait() self.play( ApplyMethod(radius_tracker.restore), ApplyMethod(angle_tracker.restore), run_time = 2, ) self.wait() class PtolemyInversionFigure(Scene): CONFIG = { "R" : 3.8, "r" : 1.3, "angle_A" : PI, "angle_B" : PI/3, "angle_C" : -PI/9, "angle_D" : -PI*2/7, "color_circle" : YELLOW, "color_ABD" : BLUE, } def construct(self): circle_ABD = Circle(radius = self.r, color = self.color_ABD, stroke_width = 3) circle_ABD.shift(0.2*LEFT) dot_A, dot_B, dot_C, dot_D = dots = VGroup(*[ Dot(circle_ABD.point_at_angle(angle % TAU), color = WHITE) for angle in (self.angle_A, self.angle_B, self.angle_C, self.angle_D) ]) dot_A.set_color(self.color_circle) dot_C.shift(0.4*RIGHT) circle = Circle(radius = self.R, color = self.color_circle, stroke_width = 5) circle.move_to(dot_A.get_center()) remark_circle = TextMobject("反演圆", color = self.color_circle) remark_circle.next_to(circle.get_bottom(), UP) label_A, label_B, label_C, label_D = dot_labels = VGroup(*[ DotLabel(text, dot, position = position, label_buff = 0.2) for text, dot, position in zip( ["A", "B", "C", "D"], dots, [DL, UP, DOWN, DOWN] ) ]) label_A.set_color(self.color_circle) dot_Bi, dot_Ci, dot_Di = inv_dots = VGroup(*[ InversedDot(dot, circle, is_hollow = False, color = WHITE) for dot in (dot_B, dot_C, dot_D) ]) label_Bi, label_Ci, label_Di = inv_dot_labels = VGroup(*[ DotLabel(text, dot, position = RIGHT, label_buff = 0.2) for text, dot in zip(["B'", "C'", "D'"], [dot_Bi, dot_Ci, dot_Di]) ]) lines = VGroup(*[ TwoDotsSegment(dot_1, dot_2, stroke_width = 1) for dot_1, dot_2 in ( [dot_A, dot_B], [dot_A, dot_C], [dot_A, dot_D], [dot_B, dot_C], [dot_B, dot_D], [dot_C, dot_D], [dot_A, dot_Bi], [dot_A, dot_Ci], [dot_A, dot_Di], [dot_Bi, dot_Ci], [dot_Bi, dot_Di], [dot_Ci, dot_Di], ) ]) inv_circle_ABD = InversedVMobject(circle_ABD, circle, use_dashed_vmob = False) inv_circle_ABD.add_updater(lambda m: m.set_color(self.color_ABD)) inv_circle_ABD.add_updater(lambda m: m.set_stroke(width = 2)) self.add(circle, remark_circle, circle_ABD, inv_circle_ABD) self.add(dots, dot_labels, inv_dots, inv_dot_labels, lines) self.add() self.wait() ##### ## Inversion Advanced P1 Scenes class KissingCirclesPuzzle(Scene): def construct(self): self.show_figure() self.show_question() def show_figure(self): type_text_1 = TextMobject("外切-外切-外切") type_text_2 = TextMobject("内切-内切-外切") type_text_1.move_to(LEFT_SIDE/2) type_text_2.move_to(RIGHT_SIDE/2) type_text_1.to_edge(DOWN) type_text_2.to_edge(DOWN) dot_l1, dot_l2, dot_l3 = dots_l = VGroup(*[ VectorizedPoint(np.array([coords[0], coords[1], 0]), color = BLUE) for coords in [(-3.9, 1.5), (-4.9, 0.0), (-2.8, -1.0)] ]) dot_r1, dot_r2, dot_r3 = dots_r = VGroup(*[ VectorizedPoint(np.array([coords[0], coords[1], 0]), color = BLUE) for coords in [(4.6, 0.3), (3.9, 0.6), (3.5, 1.6)] ]) dfc_l = DescartesFourCircles(*dots_l, show_new_circles = False) dfc_r = DescartesFourCircles(*dots_r, show_new_circles = False, outer_circle_index = 2) for dfc in [dfc_l, dfc_r]: for mob in dfc.get_orig_circles(): mob.set_stroke(width = 2, color = BLUE) self.add(type_text_1, type_text_2) self.add(dfc_l, dfc_r) self.dfc_l = dfc_l self.dfc_r = dfc_r self.dots_l = dots_l self.dots_r = dots_r def show_question(self): question = TextMobject("能否添加第四个圆,使之与其他三个圆都相切?") question.to_edge(UP, buff = 0.2) self.add(question) self.wait() class KissingCirclesSimplified(Scene): def construct(self): line1 = ExtendedLine(UL, UR) line2 = ExtendedLine(DL, DR) center_circle = Circle(radius = 1) figure_group = VGroup(line1, line2, center_circle) for mob in figure_group: mob.set_stroke(width = 2, color = BLUE) question = TextMobject("能否添加第四个“圆”,使之与其他三个“圆”都相切?") question.next_to(figure_group, UP, buff = 0.5) group = VGroup(question, figure_group) group.move_to(ORIGIN) self.add(group) self.wait() class KissingCirclesSimplifiedAnswer(Scene): def construct(self): line1 = ExtendedLine(UL, UR, stroke_width = 2, color = BLUE) line2 = ExtendedLine(DL, DR, stroke_width = 2, color = BLUE) center_circle = Circle(radius = 1, stroke_width = 2, color = BLUE) new_circles = VGroup(*[ Circle(radius = 1, color = color, fill_opacity = 0.1, stroke_width = 5) \ .next_to(center_circle, direction, buff = 0) for direction, color in zip([LEFT, RIGHT], [RED, ORANGE]) ]) numbers = VGroup(*[ TexMobject(f"{num}", color = circle.get_color()).move_to(circle.get_center()) for num, circle in zip(["1", "2"], new_circles) ]) group = VGroup(line1, line2, center_circle, new_circles, numbers) group.move_to(ORIGIN) self.add(group) self.wait() class KissingCirclesSimplifiedExplanation(Scene): CONFIG = { "dashed_vmob_config" : { "num_dashes" : 30, "positive_space_ratio" : 0.6, }, "line_colors" : [GREEN, BLUE], "center_color" : MAROON_B, "circle_colors" : [RED, ORANGE], } def construct(self): self.add_backgrounds() self.show_process() def add_backgrounds(self): N = 5 line1 = Line(UP + N*LEFT, UP + N*RIGHT, stroke_width = 2, color = self.line_colors[0]) line2 = Line(DOWN + N*LEFT, DOWN + N*RIGHT, stroke_width = 2, color = self.line_colors[1]) center_circle = FineCircle(radius = 1, stroke_width = 2, color = self.center_color) new_circle1 = FineCircle(radius = 1, stroke_width = 5, color = self.circle_colors[0]) new_circle1.next_to(center_circle, LEFT, buff = 0) new_circle2 = FineCircle(radius = 1, stroke_width = 5, color = self.circle_colors[1]) new_circle2.next_to(center_circle, RIGHT, buff = 0) inv_old_group = VGroup(line1, line2, center_circle) inv_new_group = VGroup(new_circle1, new_circle2) inv_group = VGroup(inv_old_group, inv_new_group) inv_group.rotate(-PI*2/5) inv_group.shift(3*RIGHT) circle = FineCircle(radius = 3.5, color = YELLOW) circle.shift(2*LEFT) circle_center = Dot(circle.get_center(), color = YELLOW) remark_circle = TextMobject("反演圆", color = YELLOW) remark_circle.next_to(circle.get_bottom(), UP) remark_center = VGroup(*[ Arrow(DL, UR, color = YELLOW, buff = 0).scale(0.3), TextMobject("反演中心", color = YELLOW).scale(0.8), ]) remark_center.arrange_submobjects(DL, buff = 0) remark_center.next_to(circle_center, DL, buff = 0.1) orig_old_group = VGroup(*[ InversedVMobject(mob, circle, use_dashed_vmob = False, match_original_style = True) for mob in inv_old_group ]) orig_new_group = VGroup(*[ InversedVMobject(mob, circle, use_dashed_vmob = False, match_original_style = True) for mob in inv_new_group ]) for mob in orig_old_group: mob.clear_updaters() mob.set_stroke(width = 2) for mob in orig_new_group: mob.clear_updaters() mob.set_stroke(width = 5) mob.set_fill(opacity = 0.1) self.add(orig_old_group) self.add(circle, circle_center, remark_circle, remark_center) self.circle = circle self.inv_old_group = inv_old_group self.inv_new_group = inv_new_group self.orig_old_group = orig_old_group self.orig_new_group = orig_new_group def show_process(self): dashed_inv_old_group = VGroup(*[ DashedVMobject(mob, **self.dashed_vmob_config) for mob in self.inv_old_group ]) dashed_inv_new_group = VGroup(*[ DashedVMobject(mob, **self.dashed_vmob_config) for mob in self.inv_new_group ]) self.play(ShowCreation(dashed_inv_old_group, lag_ratio = 0.05), run_time = 3) self.wait() dashed_copys = VGroup(*[dashed_inv_old_group[-1].deepcopy() for k in range(2)]) dashed_copys.generate_target() for mob_copy, mob_template in zip(dashed_copys.target, dashed_inv_new_group): mob_copy.match_style(mob_template) mob_copy.move_to(mob_template.get_center()) self.play(MoveToTarget(dashed_copys), run_time = 3) self.remove(dashed_copys) self.add(dashed_inv_new_group) self.wait() self.play(DrawBorderThenFill(self.orig_new_group), run_time = 3) self.wait(2) self.play( FadeOut(dashed_inv_new_group), FadeOut(dashed_inv_old_group), FadeOut(self.orig_new_group), ) self.wait() class DifferentTangentTypesWithSameConclusion(KissingCirclesPuzzle): CONFIG = { "random_seed" : 570, "num_of_nudges" : 5, "max_step" : 0.5, "color_1" : ORANGE, "color_2" : RED, } def construct(self): super().show_figure() self.dots_l.save_state() self.dots_r.save_state() for dfc in [self.dfc_l, self.dfc_r]: dfc.add_new_circles() dfc.get_orig_circles().set_stroke(width = 2) c4_1, c4_2 = dfc.get_new_circles() c4_1.set_color(self.color_1) c4_2.set_color(self.color_2) self.add(self.dfc_l, self.dfc_r) for k in range(self.num_of_nudges): for dot in it.chain(self.dots_l, self.dots_r): dot.generate_target() dot.target.shift(get_random_vector(self.max_step)) anims = AnimationGroup(*[ MoveToTarget(dot, path_arc = PI/3., run_time = 1.5) for dot in it.chain(self.dots_l, self.dots_r) ], run_time = 2) self.play(anims) self.wait() self.play(self.dots_l.restore, self.dots_r.restore, run_time = 1.5) class LineToCircleInversionRevisited(LineToCircleInversion): def construct(self): super().construct() self.remove_conclusions() self.add_explanation() def remove_conclusions(self): self.remove(self.bg_rect) self.remove(self.conclusions) def add_explanation(self): radius = Line( self.circle_O.get_left(), self.circle_O.get_center(), color = self.color_circle, stroke_width = 1, ) radius_text = TexMobject("R", color = self.color_circle) radius_text.next_to(radius, UP, buff = 0.1) radius_group = VGroup(radius, radius_text) radius_group.rotate(-PI/12, about_point = self.circle_O.get_center()) remark_length = TexMobject("|OA| = d", "\\Downarrow", "|OA'| = \dfrac{R^2}{d}") remark_length.arrange_submobjects(DOWN) remark_length.scale(1.2) remark_length[0].set_color(self.color_orig) remark_length[-1].set_color(self.color_inv) remark_length.to_edge(RIGHT) self.add(radius_group, remark_length) self.wait() class CircleToCircleInversionRevisited(CircleToCircleInversionProof): def construct(self): super().add_backgrounds() super().show_left_and_right_points() super().show_random_point() super().show_similar_triangles() self.arrange_elements() self.add_explanation() def arrange_elements(self): self.angle_tracker.set_value(PI/3) self.remove(self.remark_O) self.remove(self.ai_OAP, self.ai_OBP, self.ai_OPiAi, self.ai_OPiBi) self.add(self.inv_circle) self.add(self.dots_P, self.labels_P) self.add(self.dots_AB, self.labels_AB) self.add(self.aux_lines, self.rtais) dot_I = Dot(self.inv_circle.get_center()) label_I = DotLabel("I", dot_I, position = DOWN, label_buff = 0.15).scale(0.8) for mob in (dot_I, label_I): mob.set_sheen_direction(RIGHT) mob.set_color([self.color_B, self.color_A]) remark_I = TextMobject("反形的圆心(并非$C$的反点!)") remark_I.scale(0.5) remark_I.next_to(label_I, DOWN, buff = 0.1) self.add(dot_I, label_I, remark_I) def add_explanation(self): for circle, color, text, angle in zip( [self.circle_O, self.circle_C], [self.color_O, MAROON_B], ["R", "r"], [-PI/12, PI/3] ): radius = Line( circle.get_left(), circle.get_center(), color = color, stroke_width = 1, ) radius_text = TexMobject(text, color = color) radius_text.next_to(radius, UP, buff = 0.1) radius_group = VGroup(radius, radius_text) radius_group.rotate(angle, about_point = circle.get_center()) self.add(radius_group) remark_length_A = TexMobject("|OA| = d-r", "\\Rightarrow", "|OA'| = \dfrac{R^2}{d-r}") remark_length_B = TexMobject("|OB| = d+r", "\\Rightarrow", "|OB'| = \dfrac{R^2}{d+r}") remark_length_A[0].set_color(self.color_A) remark_length_A[-1].set_color(self.color_A) remark_length_B[0].set_color(self.color_B) remark_length_B[-1].set_color(self.color_B) length_group = VGroup(remark_length_A, remark_length_B) length_group.arrange_submobjects(DOWN, buff = 0.4) brace = Brace(length_group, RIGHT) arrow = TexMobject("\\Rightarrow") remarks = VGroup( TexMobject("|A'B'| = \\dfrac{2 R^2 r}{|d^2-r^2|}"), TexMobject("|OI| = \\dfrac{R^2 d}{|d^2-r^2|}") ) remarks.arrange_submobjects(DOWN, aligned_edge = LEFT) remarks.set_color(MAROON_B) result_group = VGroup(brace, arrow, remarks) result_group.arrange_submobjects(RIGHT) result_group.next_to(length_group, RIGHT) remark_group = VGroup(length_group, result_group) remark_group.center().to_edge(DOWN, buff = 0.2) bg_rect = BackgroundRectangle(remark_group, fill_opacity = 0.9) self.add(bg_rect, remark_group) self.wait() class DescartesTheoremExamples(Scene): CONFIG = { "circle_colors" : [MAROON_B, RED, GREEN, BLUE], "curvs_outer" : [3, 6, 7, 34], "curvs_inner" : [10, 15, 19, -6], } def setup(self): self.text_color_map = dict( zip(["{k_1}", "{k_2}", "{k_3}", "{k_4}"], self.circle_colors) ) def construct(self): self.add_title() self.add_outer_dfc() self.add_inner_dfc() def add_title(self): title = TexMobject( "\\left(", "{k_1}", "+", "{k_2}", "+", "{k_3}", "+", "{k_4}", "\\right) ^2", "= 2 \\left(", "{k_1}","^2 +","{k_2}","^2 +","{k_3}","^2 +","{k_4}","^2", "\\right)" ) title.set_color_by_tex_to_color_map(self.text_color_map) title.scale(1.2) title.to_edge(UP, buff = 0.2) self.add(title) def add_outer_dfc(self): r1, r2, r3, r4 = [1./curv for curv in self.curvs_outer] p1, p2, p3 = [ VectorizedPoint(center) for center in calc_centers_by_radii(r1, r2, r3, init_angle = PI*2/3) ] outer_dfc = DescartesFourCircles(p1, p2, p3, show_new_circles = False) c1, c2, c3 = outer_dfc.get_orig_circles() c4 = outer_dfc.get_new_circles()[0] outer_circles = VGroup(c1, c2, c3, c4) outer_circles.clear_updaters() outer_circles.set_height(5.5) outer_circles.to_corner(DL) texts = VGroup(*[ TexMobject(f"k_{num}", "=", f"{curv}") \ .scale(0.8) \ .move_to(circle.get_center()) for num, curv, circle in zip(range(1, 5), self.curvs_outer, outer_circles) ]) for circle, text, color in zip(outer_circles, texts, self.circle_colors): circle.set_color(color) text.set_color(color) texts[-1].shift(2.5*RIGHT+1.2*UP) arrow = Arrow( texts[-1].get_bottom(), outer_circles[-1].get_right(), path_arc = -PI*2/3, buff = 0.1, ).set_color(self.circle_colors[-1]) outer_group = VGroup(outer_circles, texts, arrow) self.add(outer_group) def add_inner_dfc(self): r1, r2, r3, r4 = [1./curv for curv in self.curvs_inner] p1, p2, p3 = [ VectorizedPoint(center) for center in calc_centers_by_radii(r1, r2, r3, init_angle = -PI/7) ] inner_dfc = DescartesFourCircles(p1, p2, p3, show_new_circles = False) c1, c2, c3 = inner_dfc.get_orig_circles() c4 = inner_dfc.get_new_circles()[1] inner_circles = VGroup(c1, c2, c3, c4) inner_circles.clear_updaters() inner_circles.set_height(5.5) inner_circles.to_corner(DR) inner_texts = VGroup(*[ TexMobject(f"k_{num}", "=", f"{curv}") \ .scale(0.8) \ .move_to(circle.get_center()) for num, curv, circle in zip(range(1, 5), self.curvs_inner, inner_circles) ]) for circle, text, color in zip(inner_circles, inner_texts, self.circle_colors): circle.set_color(color) text.set_color(color) inner_texts[-1].shift(2.8*LEFT+2.7*UP) inner_arrow = Arrow( inner_texts[-1].get_critical_point(DOWN), inner_texts[-1].get_critical_point(DOWN)+0.7*DR, buff = 0.1, ).set_color(self.circle_colors[-1]) inner_group = VGroup(inner_circles, inner_texts, inner_arrow) self.add(inner_group) self.wait() self.inner_circles = inner_circles self.inner_texts = inner_texts self.inner_arrow = inner_arrow class DFCInversionProofP1(DescartesTheoremExamples): CONFIG = { "remark_scale_text" : "示意图,图像并非真实比例", "orig_label_texts" : ["C_1", "C_2", "C_3", "C_4"], "inv_label_texts" : ["C_1'", "C_2'", "C_3'", "C_4'"], } def construct(self): super().add_inner_dfc() self.arrange_elements() self.add_labels() self.add_inversion_center() self.add_mapsto_symbol() self.add_not_to_scale_remark() self.wait() def arrange_elements(self): self.remove(self.inner_texts, self.inner_arrow) self.inner_circles.center().shift(4*UP) normal_form = FourCirclesNormalForm() normal_form.shift(4*DOWN) self.add(normal_form) self.normal_form = normal_form def add_labels(self): orig_labels = VGroup() for n, (circle, text) in enumerate(zip(self.inner_circles, self.orig_label_texts)): label = TexMobject(text).scale(1.2) label.set_color(circle.get_color()) label.move_to(circle.get_center()) orig_labels.add(label) inv_labels = VGroup() for n, (circle, text) in enumerate(zip(self.normal_form, self.inv_label_texts)): label = TexMobject(text).scale(1.2) label.set_color(circle.get_color()) label.move_to(circle.get_center()) inv_labels.add(label) c1, c2, c3, c4 = self.inner_circles l1, l2, l3, l4 = orig_labels c1i, c2i, c3i, c4i = self.normal_form l1i, l2i, l3i, l4i = inv_labels l4.next_to(c4.get_bottom(), UP, buff = 0.3) l3i.next_to(c3i, DOWN).to_edge(RIGHT) l4i.next_to(c4i, UP).to_edge(RIGHT) self.add(orig_labels, inv_labels) self.orig_labels = orig_labels self.inv_labels = inv_labels def add_inversion_center(self): c1, c2, c3, c4 = self.inner_circles inv_center = get_tangent_point(c3, c4) dot_O = Dot(inv_center, color = YELLOW) label_O = TexMobject("O", color = YELLOW).next_to(dot_O, UP) remark_O = TextMobject("反演中心", color = YELLOW) remark_O.next_to(dot_O, RIGHT, buff = 1.5) arrow_O = Arrow(remark_O.get_left(), dot_O.get_right(), color = YELLOW, buff = 0.2) orig_center_group = VGroup(dot_O, label_O, remark_O, arrow_O) inv_dot_O = VectorizedPoint() inv_dot_O.next_to(self.normal_form[-1], UP, buff = 1.4) inv_dot_O.shift(2*RIGHT) inv_center_group = orig_center_group.deepcopy() inv_center_group.shift(inv_dot_O.get_center() - dot_O.get_center()) self.add(orig_center_group, inv_center_group) self.orig_center_group = orig_center_group self.inv_center_group = inv_center_group def add_mapsto_symbol(self): mapsto = TexMobject("\\mapsto") mapsto.rotate(-PI/2) mapsto.scale(2.5) mapsto.next_to(self.inner_circles, DOWN) remark_mapsto = TextMobject("反演变换") remark_mapsto.next_to(mapsto, LEFT) self.add(mapsto, remark_mapsto) def add_not_to_scale_remark(self): remark_scale = TextMobject("(" + self.remark_scale_text + ")") remark_scale.scale(0.75) remark_scale.next_to(6.5*DL, RIGHT, buff = 0) self.add(remark_scale) class DFCInversionProofP2(DFCInversionProofP1): CONFIG = { "remark_scale_text" : "示意图,反演圆未标出,且图像并非真实比例", "inv_label_texts" : ["C_1'", "C_2'", "C_3':y=-1", "C_4':y=1"], "inv_center_coord_text" : "(x_0, y_0) \\, (y_0>1)", "circle_center_coord_texts" : ["(-1,0)", "(1,0)"], } def construct(self): super().construct() self.change_center_remarks() self.add_coord_system() self.change_inv_labels() self.wait() def change_center_remarks(self): for center_group in (self.orig_center_group, self.inv_center_group): dot, label, remark, arrow = center_group self.remove(remark, arrow) if center_group is self.inv_center_group: coord = TexMobject(self.inv_center_coord_text) coord.next_to(dot, RIGHT) coord.set_color(dot.get_color()) self.add(coord) def add_coord_system(self): c1, c2, c3, c4 = self.normal_form center_point = (c1.get_center() + c2.get_center()) / 2 unit_size = c1.get_height()/2 coord_system = Axes( center_point = center_point, number_line_config = {"unit_size" : unit_size}, y_min = -1.8, y_max = 2.8, ) self.add(coord_system) self.coord_system = coord_system def change_inv_labels(self): l1i, l2i, l3i, l4i = self.inv_labels for label, x_coord, coord_text in zip([l1i, l2i], [-1, 1], self.circle_center_coord_texts): center = self.coord_system.c2p(x_coord, 0) label.next_to(center, UP) dot_i = Dot(center, radius = 0.1).set_color(label.get_color()) coord_i = TexMobject(coord_text).set_color(label.get_color()).next_to(center, DOWN) self.add(dot_i, coord_i) ##### ## Inversion Advanced P2 Scenes class ApollonianGasketConstruction(ApollonianGasketScene): CONFIG = { "max_iter" : 8, "curvatures" : [2, 2, 3], "init_angle" : 0, "curv_thres" : 30000, "ag_config": { "agc_config" : { "radius_thres" : 1e-3, "circle_color" : BLUE, "label_color" : WHITE, }, }, "color_curr" : YELLOW, "wait_time" : 2, } def construct(self): r1, r2, r3 = [1./curv for curv in self.curvatures] p1, p2, p3 = calc_centers_by_radii(r1, r2, r3, init_angle = self.init_angle) agc1 = AGCircle(p1, r1, parents = None, **self.ag_config["agc_config"]) agc2 = AGCircle(p2, r2, parents = None, **self.ag_config["agc_config"]) agc3 = AGCircle(p3, r3, parents = None, **self.ag_config["agc_config"]) remark = TextMobject("(圆内数字为该圆的曲率)") remark.scale(0.75).to_corner(DL) self.add(remark) for k in range(self.max_iter): agcs_copy = [agc.deepcopy() for agc in (agc1, agc2, agc3)] ag = ApollonianGasket( *agcs_copy, num_iter = k, curv_thres = self.curv_thres, **self.ag_config ) iter_num = VGroup( TextMobject("迭代次数:"), TexMobject(f"{k}") ).arrange_submobjects(RIGHT).scale(1.5) iter_num.to_edge(LEFT, buff = 1) ag.scale(3.8) ag.shift(np.array([0, 3.8, 0]) - ag.get_top() + 3*RIGHT) VGroup(*ag.agc_list[-1]).set_color(self.color_curr) self.add(ag, iter_num) self.wait(self.wait_time) if k != self.max_iter-1: self.remove(ag, iter_num) class ApollonianGasketExample1(Scene): CONFIG = { "max_iter" : 20, "curvatures" : [3, 6, 7], "curvature_texts" : [-2, 3, 6, 7], "init_angle" : 0, "curv_thres" : 4000, "ag_config": { "agc_config" : { "radius_thres" : 1e-3, "circle_color" : BLUE, "label_color" : WHITE, }, }, "ag_scaling_factor" : 5.2, } def construct(self): r1, r2, r3 = [1./curv for curv in self.curvatures] p1, p2, p3 = calc_centers_by_radii(r1, r2, r3, init_angle = self.init_angle) agc1 = AGCircle(p1, r1, parents = None, **self.ag_config["agc_config"]) agc2 = AGCircle(p2, r2, parents = None, **self.ag_config["agc_config"]) agc3 = AGCircle(p3, r3, parents = None, **self.ag_config["agc_config"]) ag_seed = ApollonianGasket( *[agc.deepcopy() for agc in (agc1, agc2, agc3)], num_iter = 0, curv_thres = self.curv_thres, **self.ag_config ) ag_result = ApollonianGasket( *[agc.deepcopy() for agc in (agc1, agc2, agc3)], num_iter = self.max_iter, curv_thres = self.curv_thres, **self.ag_config ) ag_seed_center = ag_seed[0][0].get_right() ag_result_center = ag_result[0][0].get_right() arrow = Arrow(LEFT, RIGHT) figure_group = VGroup(ag_seed, ag_result, arrow) for ag, center, direction in zip( [ag_seed, ag_result], [ag_seed_center, ag_result_center], [4*LEFT, 4*RIGHT]): ag.scale(self.ag_scaling_factor) ag.shift(direction - center) figure_group.shift(DOWN) k1, k2, k3, k4 = list(map(str, self.curvature_texts)) title = TexMobject( f"({k1}+{k2}+{k3}+{k4})^2 = 2\\left[({k1})^2+{k2}^2+{k3}^2+{k4}^2 \\right]" ) title.set_width(13) title.set_color(YELLOW) title.to_edge(UP) self.add(figure_group, title) self.wait() class ApollonianGasketExample2(ApollonianGasketExample1): CONFIG = { "max_iter" : 20, "curvatures" : [5, 8, 12], "curvature_texts" : [-3, 5, 8, 12], "curv_thres" : 5000, "ag_config": { "agc_config" : { "radius_thres" : 5e-4, "circle_color" : BLUE, "label_color" : WHITE, }, }, "ag_scaling_factor" : 8, } class LoxodromicSpiralInTangentCircles(Scene): CONFIG = { "max_iter" : 20, "agc_config" : { "radius_thres" : 1, "circle_color" : BLUE, "label_color" : WHITE, }, "curve_config" : { "color" : YELLOW, "stroke_width" : 2, }, "alpha" : 0.6, "dashed_line_config" : { "color" : GREY, "stroke_width" : 0.5, "num_dashes" : 200, "positive_space_ratio" : 0.6, } } def construct(self): self.generate_circles() self.generate_curves() self.generate_labels() self.generate_lines() self.add_elements() self.zooming_in() def generate_circles(self): agcm2 = AGCircle(2/3.*UP, 1/3., **self.agc_config) agcm1 = AGCircle(RIGHT/2, 1/2., **self.agc_config) agczr = AGCircle(ORIGIN, -1, **self.agc_config) agcp1 = AGCircle(LEFT/2, 1/2., **self.agc_config) agcp2 = AGCircle(2/3.*DOWN, 1/3., **self.agc_config) agc_list = [agcm2, agcm1, agczr, agcp1, agcp2] for n in range(self.max_iter): A, B, C, known_agc = agc_list[:4] agc_m_k, agc_m_c = calc_new_agc_info(A, B, C, known_agc = known_agc) agc_m = AGCircle(agc_m_c, 1./agc_m_k, parents = (A, B, C), **self.agc_config) known_agc, C, B, A = agc_list[-4:] agc_p_k, agc_p_c = calc_new_agc_info(C, B, A, known_agc = known_agc) agc_p = AGCircle(agc_p_c, 1./agc_p_k, parents = (C, B, A), **self.agc_config) agc_list.insert(0, agc_m) agc_list.append(agc_p) agc_group = VGroup(*agc_list) agc_group.set_height(7.8) self.agc_list = agc_list self.agc_group = agc_group def generate_curves(self): agc_ps = self.agc_list[-self.max_iter-4:] agc_ps_points = [] loxo_curve_p_solid = VMobject(**self.curve_config) for k in range(len(agc_ps)-2): if k != 0: c1, c2, c3 = agc_ps[k], agc_ps[k+1], agc_ps[k+2] pt1 = get_tangent_point(c1, c2) pt2 = get_tangent_point(c2, c3) p = c2.get_center() if k != 1: agc_ps_points.extend( [pt1, p*(1-self.alpha)+pt1*self.alpha, p*(1-self.alpha)+pt2*self.alpha, pt2] ) else: agc_ps_points.extend( [pt1, p*0.7+pt1*0.3, p*0.6+pt2*0.4, pt2] ) else: c1, c2 = agc_ps[1], agc_ps[2] pt = get_tangent_point(c1, c2) agc_ps_points.extend([8*LEFT, 7*LEFT, 6*LEFT, pt]) loxo_curve_p_solid.append_points(agc_ps_points) loxo_curve_m_solid = loxo_curve_p_solid.deepcopy() loxo_curve_m_solid.rotate(PI, about_point = self.agc_group.get_center()) self.loxo_curve_p_solid = loxo_curve_p_solid self.loxo_curve_m_solid = loxo_curve_m_solid def generate_labels(self): labels = VGroup(*[ TexMobject("C_{%d}" % num, background_stroke_width = 0) for num in range(-self.max_iter-2, self.max_iter+3) ]) for label, circle in zip(labels, self.agc_group): label.set_height(circle.get_height()*0.15) label.move_to(circle.get_center()) label_c0 = labels[self.max_iter+2] label_c0.set_height(0.8) label_c0.next_to(self.agc_group.get_critical_point(UL), DR, buff = 0.1) self.labels = labels def generate_lines(self): agc_ps = self.agc_list[-self.max_iter-2:] line_p_solid = VMobject(**self.dashed_line_config) line_p_solid_corners = [8*LEFT] for circle in agc_ps: line_p_solid_corners.append(circle.get_center()) line_p_solid.set_points_as_corners(line_p_solid_corners) line_m_solid = line_p_solid.deepcopy() line_m_solid.rotate(PI, about_point = self.agc_group.get_center()) self.line_p_solid = line_p_solid self.line_m_solid = line_m_solid def add_elements(self): figure = VGroup( self.agc_group, self.loxo_curve_p_solid, self.loxo_curve_m_solid, self.line_p_solid, self.line_m_solid, self.labels, ) self.add(figure) self.figure = figure def zooming_in(self): self.figure.save_state() self.wait(0.5) self.play( ApplyMethod(self.figure.shift, -self.agc_group[-1].get_center()), run_time = 2, ) self.wait() for k in range(10): self.play( ApplyMethod(self.figure.scale, 2.5, {"about_point" : self.agc_group[-1].get_center()}), run_time = 2, ) self.wait() self.play(self.figure.restore, run_time = 15) self.wait(2) class ShowFordCircles(ZoomInOnFordCircles): CONFIG = { "q_max" : 30, } def construct(self): self.setup_axes() self.setup_circles_and_labels() self.add_remarks() self.first_zoom_in() self.wait() def first_zoom_in(self): self.zoom_in_on(1/2., 6) def add_remarks(self): nl_text = TextMobject("数轴") nl_arrow = Arrow(ORIGIN, UP).match_height(nl_text) nl_remark = VGroup(nl_arrow, nl_text) nl_remark.scale(0.8) nl_remark.set_color(LIGHT_GREY) nl_remark.arrange_submobjects(RIGHT, buff = 0.1) nl_remark.next_to(self.axes.coords_to_point(0, 0), DOWN, buff = 0.1) nl_remark.to_edge(LEFT, buff = 0.15) frac_remark = TextMobject("圆内分数为圆心横坐标") frac_remark.scale(0.6) frac_remark.to_corner(DL, buff = 0.15) self.add(nl_remark, frac_remark) class ShowFordCirclesDetails(ShowFordCircles): CONFIG = { "q_max" : 100, } def construct(self): super().construct() self.further_zoom_in() def setup_circles_and_labels(self): circles = VGroup() labels = VGroup() for q in range(1, self.q_max+1): for p in get_coprime_numers_by_denom(q): if (q <= 40) or (0.6 <= p/q <= 0.8): circle = self.generate_circle_by_fraction(p, q) circle.add_updater( lambda m: m.set_stroke(width = get_stroke_width_by_height(m.get_height())) ) label = AssembledFraction(p, q) label.set_height(circle.get_height() * self.label_height_factor) label.move_to(circle.get_center()) circles.add(circle) labels.add(label) self.add(circles, labels) self.circles = circles self.labels = labels def further_zoom_in(self): self.acl = VGroup(self.axes, self.circles, self.labels) self.acl.save_state() self.wait(0.5) self.play_zooming_animation(1/np.sqrt(2), 9, run_time = 5) self.wait() self.play_zooming_animation(0.73, 5, run_time = 4) self.wait() self.play_zooming_animation(0.74, 5, run_time = 4) self.wait() self.play(self.acl.restore, run_time = 5) self.wait(2) class ProveFordCirclesPropertiesP1(Scene): CONFIG = { "c1_frac" : [2, 3], "c2_frac" : [3, 4], "c3_frac" : [5, 7], "circle_config" : {"stroke_color" : BLUE, "stroke_width" : 2,}, "line_config" : {"stroke_color" : GREY, "stroke_width" : 2,}, "aux_line_config" : {"stroke_color" : GREY, "stroke_width" : 0.8,}, "polygon_config" : {"fill_color" : GREY, "fill_opacity" : 0.4, "stroke_width" : 0,}, } def setup(self): a, b = self.c1_frac c, d = self.c2_frac p, q = self.c3_frac r1 = 1/(2*b**2) r2 = 1/(2*d**2) r3 = 1/(2*q**2) c1_center = a/b*RIGHT + r1*UP c2_center = c/d*RIGHT + r2*UP c3_center = p/q*RIGHT + r3*UP c1 = Circle(arc_center = c1_center, radius = r1, **self.circle_config) c2 = Circle(arc_center = c2_center, radius = r2, **self.circle_config) c3 = Circle(arc_center = c3_center, radius = r3, **self.circle_config) c1_dot = SmallDot(color = GREY) c1_dot.add_updater(lambda m: m.move_to(c1.get_center())) c2_dot = SmallDot(color = GREY) c2_dot.add_updater(lambda m: m.move_to(c2.get_center())) c3_dot = SmallDot(color = GREY) c3_dot.add_updater(lambda m: m.move_to(c3.get_center())) line = Line( 2*c1.get_bottom()-c2.get_bottom(), 2*c2.get_bottom()-c1.get_bottom(), **self.line_config ) VGroup(c1, c2, c3, line).set_height(6).center().to_edge(UP) aux_line_1 = Line(c1.get_center(), c1.get_bottom(), **self.aux_line_config) aux_line_2 = Line(c2.get_center(), c2.get_bottom(), **self.aux_line_config) aux_line_3 = Line(c1.get_center(), c2.get_center(), **self.aux_line_config) aux_line_4 = Line(c1.get_bottom(), c2.get_bottom(), **self.aux_line_config) \ .shift(c2.get_height()/2*UP) polygon = Polygon( c1.get_center(), c2.get_center(), aux_line_4.get_start_and_end()[0], **self.polygon_config, ) l1 = TexMobject("\\dfrac{a}{b}").next_to(c1, DOWN) l2 = TexMobject("\\dfrac{c}{d}").next_to(c2, DOWN) l3 = TexMobject("\\dfrac{a+c}{b+d}").next_to(c3, DOWN) self.orig_group = VGroup(c1, c2, line, c1_dot, c2_dot, l1, l2) self.aux_group = VGroup(aux_line_1, aux_line_2, aux_line_3, aux_line_4, polygon) self.new_group = VGroup(c3, c3_dot, l3) def construct(self): self.add(self.orig_group, self.aux_group) self.wait() class ProveFordCirclesPropertiesP2(ProveFordCirclesPropertiesP1): def construct(self): self.add(self.orig_group, self.new_group) self.wait() class ShowFordCirclesFareySum(ZoomInOnFordCircles): pass # A rename, that's it. class DFCInversionProofP3(DFCInversionProofP2): CONFIG = { "remark_scale_text" : "示意图,反演圆未标出,且图像并非真实比例", "inv_label_texts" : ["C_1'", "C_2'", "C_3':\\mathrm{Im}(z)=-1", "C_4':\\mathrm{Im}(z)=1"], "inv_center_coord_text" : "z_0 = x_0+iy_0\\, (y_0>1)", "circle_center_coord_texts" : ["-1", "1"], } def construct(self): super().construct() self.wait() def add_coord_system(self): c1, c2, c3, c4 = self.normal_form center_point = (c1.get_center() + c2.get_center()) / 2 unit_size = c1.get_height()/2 coord_system = NumberPlane( center_point = center_point, number_line_config = {"unit_size" : unit_size}, y_min = -3, y_max = 3, background_line_style = { "stroke_color" : GREY, "stroke_width" : 1.5, "stroke_opacity" : 0.8, }, ) aux_coord_system = Axes( center_point = center_point, number_line_config = {"unit_size" : unit_size}, y_min = -3, y_max = 3, stroke_opacity = 0.8, ) self.add(coord_system, aux_coord_system) self.coord_system = coord_system class NormalFormIn3D(ThreeDScene): CONFIG = { "axis_unit_size" : 1.5, "axis_min" : -1.5, "axis_max" : 2.8, "resolution" : (60, 120), "plane_colors" : [GREEN, BLUE], "sphere_colors" : [MAROON_B, RED, PINK], } def construct(self): self.add_3d_stuff() self.add_2d_stuff() def add_3d_stuff(self): self.set_camera_orientation(theta = 70 * DEGREES, phi = 50 * DEGREES) axes = ThreeDAxes( x_min = self.axis_min, x_max = self.axis_max, y_min = self.axis_min, y_max = self.axis_max, z_min = self.axis_min, z_max = self.axis_max, number_line_config = {"unit_size" : self.axis_unit_size}, ) sphere_centers = [ axis.number_to_point(1) for axis in [axes.x_axis, axes.y_axis, axes.z_axis] ] radius = 1/np.sqrt(2) * self.axis_unit_size sphere_dots = VGroup(*[ Sphere( radius = 0.08, resolution = self.resolution, fill_opacity = 1, stroke_width = 0, ).move_to(sphere_center).set_color(color) for sphere_center, color in zip(sphere_centers, self.sphere_colors) ]) spheres = VGroup(*[ Sphere( radius = radius, resolution = self.resolution, fill_opacity = 0.6, stroke_width = 0.5, ).move_to(sphere_center).set_color(color) for sphere_center, color in zip(sphere_centers, self.sphere_colors) ]) planes = VGroup(*[ VGroup(*[ Square( side_length = 1, fill_opacity = fill_opacity, stroke_color = GREY, stroke_width = 0.3, stroke_opacity = 0.2, ) for k in range(n**2) ]).arrange_in_grid(n, n, buff = 0) \ .apply_matrix(z_to_vector([1, 1, 1])) \ .move_to(np.average(sphere_centers)) \ .shift(radius * normalize(direction)) \ .set_color(color) for n, fill_opacity, direction, color in zip( [7, 8], [0.2, 0.3], [np.ones(3), -np.ones(3)], self.plane_colors, ) ]) figure_group = VGroup(axes, planes, sphere_dots, spheres) figure_group.shift(RIGHT*2+0.5*OUT) self.add(figure_group) self.add(axes) self.add(planes) self.add(sphere_dots, spheres) def add_2d_stuff(self): sphere_remarks = VGroup(*[ TextMobject( "球:圆心为" + f"$({int(x)},{int(y)},{int(z)})$" + \ ",半径为" + "$\\dfrac{1}{\\sqrt{2}}$" ).set_color(color) for (x, y, z), color in zip([RIGHT, UP, OUT], self.sphere_colors) ]).arrange_submobjects(DOWN) plane_remarks = VGroup(*[ TexMobject( "\\text{平面:}" + "x+y+z=1" + sign + "\\dfrac{\\sqrt{3}}{\\sqrt{2}" ).set_color(color) for sign, color in zip(["+", "-"], self.plane_colors) ]).arrange_submobjects(DOWN) remarks = VGroup(sphere_remarks, plane_remarks) remarks.arrange_submobjects(DOWN, aligned_edge = LEFT) remarks.scale(0.8) remarks.to_corner(DR) self.add_fixed_in_frame_mobjects(remarks) self.wait() ##### ## Banner class Banner_Intro(Scene): CONFIG = { "circle_color" : YELLOW, "text_color" : BLUE, "inv_text_color" : BLUE, "circle_center" : 0.8*UP, "circle_radius" : 3, "grid_side_length" : 0.5, "x_range" : 300, "y_range" : 300, "dist_thres" : 300, } def construct(self): circle = Circle(color = self.circle_color, radius = self.circle_radius, stroke_width = 5) circle.move_to(self.circle_center) dot = SmallDot(self.circle_center, color = self.circle_color) text = TextMobject("Inversion", color = self.text_color, background_stroke_width = 3) text.rotate(PI/2.) text.move_to(0.4*RIGHT) text.apply_complex_function(np.exp) text.rotate(-PI/2.) text.scale(1.5) text.move_to(0.9*DOWN) inv_text = InversedVMobject(text, circle, use_dashed_vmob = False) inv_text.suspend_updating() inv_text.set_background_stroke(color = "#303030", width = 3) inv_text.set_stroke(width = 0) inv_text.set_fill(color = self.inv_text_color, opacity = 0.5) grid = VGroup(*[ Square( side_length = self.grid_side_length, stroke_width = 0, fill_opacity = 0.3, fill_color = CB_DARK if (i+j)%2==0 else CB_LIGHT ).move_to(self.circle_center + (i*RIGHT+j*UP)*self.grid_side_length) for i in range(-self.x_range, self.x_range+1, 1) for j in range(-self.y_range, self.y_range+1, 1) if np.sqrt(i**2+j**2) * self.grid_side_length < self.dist_thres ]) for square in grid: if is_close_in_R3(square.get_center(), self.circle_center): grid.remove(square) inv_grid = InversedVMobject(grid, circle, use_dashed_vmob = False) self.add(inv_grid, circle, dot, text, inv_text) self.wait() class Banner_AdvancedP1(ApollonianGasketScene): CONFIG = { "curvatures" : [570, 968, 1112], "init_angle" : PI/7, "num_iter" : 20, "curv_thres" : 1e6, "ag_config" : { "agc_config" : { "radius_thres" : 5e-6, "circle_color" : YELLOW, "label_color" : WHITE, }, }, "part_text" : "上篇", } def construct(self): super().construct() ag = self.ag ag.set_height(7) circle_myst = ag.agc_list[0][0] label_myst = circle_myst.label label_question = TexMobject("???") label_question.match_height(label_myst) label_question.move_to(label_myst) self.remove(label_myst) self.add(label_question) part = TextMobject(self.part_text) part.to_corner(DR) self.add(part) class Banner_AdvancedP2(Banner_AdvancedP1): CONFIG = { "part_text" : "下篇", }
40.915864
132
0.599125
= sp - n * unit_vec new_ep = ep + n * unit_vec Line.__init__(self, new_sp, new_ep, **kwargs) class DotLabel(VMobject): CONFIG = { "position" : UP, "label_buff" : 0.25, } def __init__(self, label_text, dot, **kwargs): VMobject.__init__(self, **kwargs) self.dot = dot label = TexMobject(label_text, **kwargs) if self.position is not None: label.add_updater( lambda l: l.next_to(self.dot.get_center(), self.position, buff = self.label_buff) ) self.add(label) def set_label(self, label): label.next_to(self.dot.get_center()) class TwoDotsSegment(Line): def __init__(self, dot_1, dot_2, **kwargs): self.dot_1 = dot_1 self.dot_2 = dot_2 sp, ep = self.get_dots_centers() Line.__init__(self, start = sp, end = ep, **kwargs) self.add_updater(self.set_start_and_end) def get_dots_centers(self): return self.dot_1.get_center(), self.dot_2.get_center() def set_start_and_end(self, line_mob): sp, ep = self.get_dots_centers() line_mob.put_start_and_end_on(sp, ep) class LengthLabel(DecimalNumber): CONFIG = { "num_decimal_places" : 3, "label_height" : 0.3, "label_buff" : 0.3, "offset" : 0, "is_on_opposite_side" : False, } def __init__(self, line_mob, **kwargs): DecimalNumber.__init__(self, **kwargs) self.line_mob = line_mob self.add_updater(self.set_label) def set_label(self, label): label.set_value(self.line_mob.get_length()) label.set_height(self.label_height) label.rotate(self.line_mob.get_angle()) side_factor = -1 if self.is_on_opposite_side else 1 label.move_to( self.line_mob.get_center() \ + self.line_mob.get_vector() / 2 * self.offset \ + side_factor * rotate_vector(self.line_mob.get_unit_vector(), PI/2) * self.label_buff ) def set_offset(self, offset): self.offset = offset return self def switch_side(self): self.is_on_opposite_side = not self.is_on_opposite_side return self class ManyDotsPolygon(VMobject): def __init__(self, *dots, **kwargs): VMobject.__init__(self, **kwargs) self.dots = dots dots_centers = self.get_dots_centers() polygon = Polygon(*dots_centers, **kwargs) polygon.add_updater(self.set_vertices) self.add(polygon) def get_dots_centers(self): return [dot.get_center() for dot in self.dots] def set_vertices(self, polygon_mob): vertices = self.get_dots_centers() polygon_mob.set_points_as_corners([*vertices, vertices[0]]) class AngleIndicator(VMobject): CONFIG = { "color" : RED, "radius" : 0.2, "fill_opacity" : 0.6, "is_minor_arc" : True, } def __init__(self, dot_A, dot_C, dot_B, **kwargs): VMobject.__init__(self, **kwargs) self.dot_A = dot_A self.dot_C = dot_C self.dot_B = dot_B sector = Sector() sector.add_updater(self.set_sector) self.add(sector) self.sector = sector def get_point_center(self, point_or_mob): if isinstance(point_or_mob, Mobject): return point_or_mob.get_center() else: return point_or_mob def get_point_centers(self): return tuple(map(self.get_point_center, [self.dot_A, self.dot_C, self.dot_B])) def set_sector(self, mob): pt_A, pt_C, pt_B = self.get_point_centers() start_angle, angle = self.get_angles() outer_radius = min([self.radius, get_norm(pt_C - pt_A)/2, get_norm(pt_C - pt_B)/2]) new_sector = Sector( start_angle = start_angle, angle = angle, outer_radius = outer_radius, color = self.color, fill_opacity = self.fill_opacity, stroke_width = 0 ) new_sector.move_arc_center_to(self.get_point_center(self.dot_C)) mob.become(new_sector) def get_angles(self): pt_A, pt_C, pt_B = self.get_point_centers() start_angle = angle_of_vector(pt_A - pt_C) end_angle = angle_of_vector(pt_B - pt_C) angle = (end_angle - start_angle) % TAU if self.is_minor_arc and angle > PI: angle -= TAU return start_angle, angle class RightAngleIndicator(VMobject): CONFIG = { "color" : WHITE, "side_length" : 0.2, "line_width" : 1, "square_opacity" : 0.5, } def __init__(self, dot_A, dot_C, dot_B, **kwargs): VMobject.__init__(self, **kwargs) self.dot_A = dot_A self.dot_C = dot_C self.dot_B = dot_B line = VMobject(stroke_width = self.line_width, fill_opacity = 0) square = VMobject(stroke_width = 0, fill_color = self.color, fill_opacity = self.square_opacity) line.add_updater(self.set_line) square.add_updater(self.set_square) self.add(square, line) self.line = line self.square = square def get_point_center(self, point_or_mob): if isinstance(point_or_mob, Mobject): return point_or_mob.get_center() else: return point_or_mob def get_point_centers(self): return tuple(map(self.get_point_center, [self.dot_A, self.dot_C, self.dot_B])) def get_norm_vectors(self): pt_A, pt_C, pt_B = self.get_point_centers() norm_vec_CA = normalize(pt_A - pt_C) norm_vec_CB = normalize(pt_B - pt_C) return norm_vec_CA, norm_vec_CB def get_corner_points(self): pt_A, pt_C, pt_B = self.get_point_centers() norm_vec_CA, norm_vec_CB = self.get_norm_vectors() side_length = min([self.side_length, get_norm(pt_A - pt_C)/2, get_norm(pt_B - pt_C)/2]) return ( pt_C, pt_C + norm_vec_CA * side_length, pt_C + norm_vec_CA * side_length + norm_vec_CB * side_length, pt_C + norm_vec_CB * side_length ) def set_line(self, line_mob): p, q, r, s = self.get_corner_points() line_mob.set_points_as_corners([q, r, s]) def set_square(self, square_mob): p, q, r, s = self.get_corner_points() square_mob.set_points_as_corners([p, q, r, s, p]) class InversedDot(VMobject): CONFIG = { "color" : PINK, "stroke_width" : 3, "fill_opacity" : 1, "is_hollow" : True, "center_color" : BLACK, } def __init__(self, orig_dot, circle, **kwargs): self.orig_dot = orig_dot self.circle = circle VMobject.__init__(self, **kwargs) def generate_points(self): if self.is_hollow: self.fill_color = self.center_color else: self.fill_color = self.color self.stroke_width = 0 inv_dot = Dot(ORIGIN, color = self.color) self.inv_dot = inv_dot self.add(inv_dot) self.add_updater_to_inversed_dot() def add_updater_to_inversed_dot(self): self.inv_dot.add_updater(self.move_inversed_dot) def move_inversed_dot(self, inv_dot): point = self.orig_dot.get_center() inv_center = self.circle.get_center() radius = self.circle.get_height() / 2. if is_close_in_R3(point, inv_center): pass else: inv_dot.move_to(inversion(point, inv_center, radius)) class InversedVMobject(VMobject): CONFIG = { "is_analytical" : True, "match_original_style" : False, "use_dashed_vmob" : True, "dashed_vmob_config": { "num_dashes" : 50, "positive_space_ratio" : 0.6, }, } def __init__(self, orig_vmob, circle, **kwargs): VMobject.__init__(self, **kwargs) self.orig_vmob = orig_vmob self.circle = circle self.orig_vmob_type = "Others" self.initialize_orig_vmob_type() self.add_updater_to_inversed_vmobject() def add_updater_to_inversed_vmobject(self): self.add_updater(self.set_inversed_vmobject) def initialize_orig_vmob_type(self): if isinstance(self.orig_vmob, Line): self.orig_vmob_type = "Line" elif isinstance(self.orig_vmob, Circle): self.orig_vmob_type = "Circle" else: self.orig_vmob_type = "Others" def set_orig_vmob_type(self, orig_vmob_type): self.orig_vmob_type = orig_vmob_type def set_inversed_vmobject(self, inv_vmob): inv_center = self.circle.get_center() radius = self.circle.get_height() / 2. if self.is_analytical and self.orig_vmob_type == "Line": # If it's a line... lp1, lp2 = self.orig_vmob.get_start_and_end() if is_on_the_line(inv_center, lp1, lp2): # then the inversion is just the line itself. temp_vmob = ExtendedLine(lp1, lp2) else: # If it's a line NOT through the inversion center, v_para, v_perp = get_para_and_perp_components(inv_center, lp1, lp2) d = distance_to_the_line(inv_center, lp1, lp2) inv_vmob_radius = fdiv(radius**2, 2*d) closepoint = inv_center + v_perp inv_vmob_closepoint = inversion(closepoint, inv_center, radius) inv_vmob_center = (inv_center + inv_vmob_closepoint) / 2. temp_vmob = FineCircle(radius = inv_vmob_radius) temp_vmob.move_to(inv_vmob_center) elif self.is_analytical and self.orig_vmob_type == "Circle": orig_vmob_center = self.orig_vmob.get_center() orig_vmob_radius = self.orig_vmob.get_height() / 2. center_vec = orig_vmob_center - inv_center d = get_norm(center_vec) if is_close(orig_vmob_radius, d): # If it's a circle passing through the inversion center, foot = inv_center + fdiv(radius**2, 2*d) * normalize(center_vec) lp1 = foot + rotate_vector(center_vec, PI/2) lp2 = foot + rotate_vector(center_vec, -PI/2) temp_vmob = ExtendedLine(lp1, lp2) else: # then the inversion is a circle NOT through the inversion center. dp1 = orig_vmob_center - orig_vmob_radius * normalize(center_vec) dp2 = orig_vmob_center + orig_vmob_radius * normalize(center_vec) inv_dp1 = inversion(dp1, inv_center, radius) inv_dp2 = inversion(dp2, inv_center, radius) inv_vmob_radius = get_norm(inv_dp2 - inv_dp1) / 2. inv_vmob_center = (inv_dp2 + inv_dp1) / 2. temp_vmob = FineCircle(radius = inv_vmob_radius) temp_vmob.move_to(inv_vmob_center) else: temp_vmob = self.orig_vmob.copy() temp_vmob.apply_function(lambda p: inversion(p, inv_center, radius)) if self.use_dashed_vmob: temp_vmob = DashedVMobject(temp_vmob, **self.dashed_vmob_config) inv_vmob.become(temp_vmob) if self.match_original_style: inv_vmob.match_style(self.orig_vmob) class FourCirclesNormalForm(VMobject): CONFIG = { "circle_colors" : [MAROON_B, RED, GREEN, BLUE], "r" : 1.2, "l" : 9, "use_dashed_vmob" : True, "dashed_vmob_config" : { "num_dashes" : 30, "positive_space_ratio" : 0.6, } } def __init__(self, **kwargs): VMobject.__init__(self, **kwargs) c1 = Circle(radius = self.r, **kwargs).shift(self.r*LEFT) c2 = Circle(radius = self.r, **kwargs).shift(self.r*RIGHT) c3 = Line(self.l*LEFT, self.l*RIGHT, **kwargs).shift(self.r*DOWN) c4 = Line(self.l*LEFT, self.l*RIGHT, **kwargs).shift(self.r*UP) for mob, color in zip([c1, c2, c3, c4], self.circle_colors): mob.set_color(color) if self.use_dashed_vmob: self.add(DashedVMobject(mob, **self.dashed_vmob_config)) else: self.add(mob) class DescartesFourCircles(VMobject): CONFIG = { "outer_circle_index" : None, "orig_circle_color" : BLUE, "new_circle_color" : YELLOW, "show_new_circles" : True, "show_new_circles_centers" : False, } def __init__(self, ccdot1, ccdot2, ccdot3, **kwargs): self.ccdot1 = ccdot1 self.ccdot2 = ccdot2 self.ccdot3 = ccdot3 VMobject.__init__(self, **kwargs) self.add_orig_circles() self.add_orig_circles_updaters() self.generate_new_circles() if self.show_new_circles: self.add_new_circles() if self.show_new_circles_centers: self.add_new_circles_centers() def add_orig_circles(self): self.c1, self.c2, self.c3 = self.cs = VGroup(*[ Circle(arc_center = cc, radius = r, color = self.orig_circle_color) for cc, r in zip(self.get_orig_circle_centers(), self.calc_radii_by_centers()) ]) self.add(self.cs) def add_orig_circles_updaters(self): def get_center(k): return self.get_orig_circle_centers()[k] def get_abs_radius(k): return np.abs(self.calc_radii_by_centers()[k]) # Since enumerate() won't work here (seriously?), self.c1.add_updater(lambda c: c.move_to(get_center(0))) self.c1.add_updater(lambda c: c.set_height(2*get_abs_radius(0))) self.c2.add_updater(lambda c: c.move_to(get_center(1))) self.c2.add_updater(lambda c: c.set_height(2*get_abs_radius(1))) self.c3.add_updater(lambda c: c.move_to(get_center(2))) self.c3.add_updater(lambda c: c.set_height(2*get_abs_radius(2))) def get_orig_circles(self): return self.cs def get_orig_circle_centers(self): return [dot.get_center() for dot in (self.ccdot1, self.ccdot2, self.ccdot3)] def get_orig_circle_radii(self): return self.calc_radii_by_centers() def get_orig_circle_curvatures(self): return [fdiv(1, radius) for radius in self.calc_radii_by_centers()] def calc_radii_by_centers(self): p1, p2, p3 = self.get_orig_circle_centers() d12 = get_norm(p2 - p1) d23 = get_norm(p3 - p2) d13 = get_norm(p3 - p1) sum_r = (d12 + d23 + d13) / 2. if self.outer_circle_index == 1: return [-sum_r, sum_r-d12, sum_r-d13] elif self.outer_circle_index == 2: return [sum_r-d12, -sum_r, sum_r-d23] elif self.outer_circle_index == 3: return [sum_r-d13, sum_r-d23, -sum_r] else: return [sum_r-d23, sum_r-d13, sum_r-d12] def generate_new_circles(self): self.c4_1, self.c4_2 = self.new_circles = VGroup(*[ Circle(arc_center = new_cc, radius = new_r, color = self.new_circle_color) for new_cc, new_r in self.calc_new_circles_centers_and_radii() ]) self.generate_new_circles_centers() self.add_new_circles_updaters() def calc_new_circles_centers_and_radii(self): k1, k2, k3 = self.get_orig_circle_curvatures() z1, z2, z3 = map(R3_to_complex, self.get_orig_circle_centers()) sum_k = k1 + k2 + k3 sum_k2 = k1**2 + k2**2 + k3**2 sum_k_cycle_prod = k1*k2 + k2*k3 + k3*k1 b = (-2)*sum_k c = sum_k2 - 2*sum_k_cycle_prod delta = b**2 - 4*c k4_1 = (-b + np.sqrt(delta)) / 2 k4_2 = (-b - np.sqrt(delta)) / 2 sum_kz = k1*z1 + k2*z2 + k3*z3 sum_k2z = k1**2 * z1 + k2**2 * z2 + k3**2 * z3 coeff_1 = (sum_k - k4_1) * k4_1 const_1 = 2 * sum_k2z - (sum_k + k4_1) * sum_kz z4_1 = const_1 / coeff_1 coeff_2 = (sum_k - k4_2) * k4_2 const_2 = 2 * sum_k2z - (sum_k + k4_2) * sum_kz z4_2 = const_2 / coeff_2 return [[complex_to_R3(z4_1), fdiv(1, k4_1)], [complex_to_R3(z4_2), fdiv(1, k4_2)]] def generate_new_circles_centers(self): ccdot4_1 = Dot(color = self.new_circle_color) ccdot4_1.add_updater(lambda m: m.move_to(self.c4_1.get_center())) ccdot4_2 = Dot(color = self.new_circle_color) ccdot4_2.add_updater(lambda m: m.move_to(self.c4_2.get_center())) self.ccdot4_1 = ccdot4_1 self.ccdot4_2 = ccdot4_2 def add_new_circles_updaters(self): def get_new_center(k): return self.calc_new_circles_centers_and_radii()[k][0] def get_abs_new_radius(k): return np.abs(self.calc_new_circles_centers_and_radii()[k][1]) # I have to use a much more direct approach - list them all. self.c4_1.add_updater(lambda c: c.move_to(get_new_center(0))) self.c4_1.add_updater(lambda c: c.set_height(2*get_abs_new_radius(0))) self.c4_2.add_updater(lambda c: c.move_to(get_new_center(1))) self.c4_2.add_updater(lambda c: c.set_height(2*get_abs_new_radius(1))) def add_new_circles(self): if not hasattr(self, "new_circles"): self.new_circles = generate_new_circles() self.add(self.new_circles) def get_new_circles(self): if not hasattr(self, "new_circles"): self.new_circles = generate_new_circles() return self.new_circles def add_new_circles_centers(self): self.add(self.ccdot4_1, self.ccdot4_2) def remove_new_circles_center(self): self.remove(self.ccdot4_1, self.ccdot4_2) ##### ## Inversion Introduction Scenes class ConceptsInInversion(Scene): CONFIG = { "color_circle" : YELLOW, "color_radius" : RED, "color_P" : WHITE, } def construct(self): self.add_backgrounds() self.move_around_point_P() def add_backgrounds(self): circle_O = Circle(radius = 3.5, color = self.color_circle) circle_O.shift(3*LEFT) remark_circle = TextMobject("反演圆", color = self.color_circle) remark_circle.next_to(circle_O.get_bottom(), UP) dot_O = Dot(circle_O.get_center(), color = self.color_circle) label_O = DotLabel("O", dot_O, color = self.color_circle, position = DOWN) remark_O = TextMobject("反演中心", color = self.color_circle) remark_O.next_to(label_O, LEFT, buff = 0.15) radius = Line(circle_O.get_center(), circle_O.get_left()) label_radius = TexMobject("R").scale(0.8) remark_radius = TextMobject("反演幂").scale(0.8) brace_radius = Brace(radius, UP) brace_radius.put_at_tip(label_radius) remark_radius.next_to(label_radius, LEFT, buff = 0.15) group_radius = VGroup(radius, label_radius, brace_radius, remark_radius) group_radius.set_color(self.color_radius) group_radius.rotate(-PI/12, about_point = dot_O.get_center()) def_inversion = TextMobject("反演变换:$P \\mapsto P'$") rlt_inversion = TexMobject("|OP| \\times |OP'|=", "R^2") rlt_inversion.next_to(def_inversion, DOWN, aligned_edge = RIGHT) rlt_inversion[-1].set_color(self.color_radius) remarks = VGroup(def_inversion, rlt_inversion) remarks.to_corner(DR) dot_P = Dot(LEFT, color = self.color_P) label_P = DotLabel("P", dot_P, color = self.color_P, position = DL, label_buff = 0.2) dot_Pi = InversedDot(dot_P, circle_O, color = self.color_P) label_Pi = DotLabel("P'", dot_Pi, color = self.color_P, position = DR, label_buff = 0.2) line_OP = TwoDotsSegment(dot_O, dot_P, stroke_width = 2) line_OPi = TwoDotsSegment(dot_O, dot_Pi, stroke_width = 2) self.add(remarks) self.add(group_radius) self.add(circle_O, dot_O, label_O, remark_O, remark_circle) self.add(dot_P, dot_Pi, label_P, label_Pi, line_OP, line_OPi) self.circle_O = circle_O self.dot_P = dot_P def move_around_point_P(self): self.dot_P.save_state() for dx, dy in [(-0.2, 0.3), (0.1, -0.4), (4, 0.3), (1, 1)]: vec = np.array([dx, dy, 0]) self.play(self.dot_P.shift, vec, run_time = 1) self.wait() self.play(self.dot_P.move_to, self.circle_O.get_right()) self.wait() self.play(self.dot_P.restore, run_time = 1) self.wait() class InversionExamples(Scene): CONFIG = { "color_circle" : YELLOW, } def construct(self): circle_O = Circle(radius = 3.5, color = self.color_circle) circle_O.shift(3*LEFT) remark_circle = TextMobject("反演圆", color = self.color_circle) remark_circle.next_to(circle_O.get_bottom(), UP) dot_O = Dot(circle_O.get_center(), color = self.color_circle) label_O = DotLabel("O", dot_O, color = self.color_circle, position = DOWN) init_shape = Square(side_length = 1.2, color = BLUE).rotate(TAU/13) init_shape.next_to(circle_O.get_right(), LEFT, buff = 0.5) init_shape.save_state() inv_shape = InversedVMobject(init_shape, circle_O, use_dashed_vmob = False) new_shapes = [ RegularPolygon(n = 6, start_angle = PI/7, color = PINK).scale(0.8), TexMobject("42", color = RED).scale(2.5).rotate(-PI/9), TexMobject("\\pi", color = MAROON_B).scale(5).rotate(PI/15), ] self.add(circle_O, remark_circle, dot_O, label_O) self.add(init_shape, inv_shape) for new_shape in new_shapes: new_shape.next_to(circle_O.get_right(), LEFT, buff = 0.6) self.play(Transform(init_shape, new_shape), run_time = 1) self.wait() init_shape.generate_target() init_shape.target.become(new_shape) init_shape.target.shift(get_random_vector(0.5)) random_angle = 0.5*np.random.random() init_shape.target.rotate(random_angle) self.play(MoveToTarget(init_shape, path_arc = random_angle, run_time = 1)), self.wait() self.play(ApplyMethod(init_shape.restore)) self.wait() class LineToLineInversion(Scene): CONFIG = { "color_circle" : YELLOW, "color_orig" : BLUE, "color_inv" : RED, } def construct(self): self.add_backgrounds() self.show_line_to_line_inversion() def add_backgrounds(self): circle_O = Circle(radius = 2.5, color = self.color_circle) remark_circle = TextMobject("反演圆", color = self.color_circle) remark_circle.next_to(circle_O.get_bottom(), UP) dot_O = Dot(circle_O.get_center(), color = self.color_circle) label_O = DotLabel("O", dot_O, color = self.color_circle, position = DOWN) conclusion = TextMobject("经过反演中心的直线", "$\\mapsto$", "经过反演中心的直线") conclusion.scale(0.8) conclusion[0].set_color(self.color_orig) conclusion[2].set_color(self.color_inv) conclusion.to_corner(DR) self.add(circle_O, remark_circle, dot_O, label_O) self.add(conclusion) self.circle_O = circle_O def show_line_to_line_inversion(self): angle_tracker = ValueTracker(-PI/11) position_tracker = ValueTracker(1.4) angle_tracker.save_state() position_tracker.save_state() orig_line = ExtendedLine(LEFT, RIGHT, color = self.color_orig, stroke_width = 8) orig_line.add_updater(lambda m: m.rotate(angle_tracker.get_value() - m.get_angle())) inv_line = ExtendedLine(LEFT, RIGHT, color = self.color_inv, stroke_width = 4) inv_line.add_updater(lambda m: m.rotate(angle_tracker.get_value() - m.get_angle())) dot_P = Dot(color = self.color_orig) dot_P.add_updater( lambda m: m.move_to( position_tracker.get_value() * rotate_vector(RIGHT, angle_tracker.get_value()) ) ) dot_Pi = InversedDot(dot_P, self.circle_O, is_hollow = False, color = self.color_inv) label_P = DotLabel("P", dot_P, position = DOWN, color = self.color_orig) label_Pi = DotLabel("P'", dot_Pi, position = DOWN, color = self.color_inv) def get_lb(): return LEFT_SIDE + UP * LEFT_SIDE[0] * np.tan(angle_tracker.get_value()) def get_rb(): return RIGHT_SIDE + UP * RIGHT_SIDE[0] * np.tan(angle_tracker.get_value()) def is_oolb(m): return m.get_right()[0] < LEFT_SIDE[0] def is_oorb(m): return m.get_left()[0] > RIGHT_SIDE[0] oolb_arrow = Arrow(ORIGIN, LEFT, color = self.color_inv).scale(2) oolb_arrow.add_updater(lambda m: m.set_angle(angle_tracker.get_value() + PI)) oolb_arrow.add_updater(lambda m: m.next_to(get_lb(), DOWN, aligned_edge = LEFT, buff = 0.2)) oorb_arrow = Arrow(ORIGIN, RIGHT, color = self.color_inv).scale(2) oorb_arrow.add_updater(lambda m: m.set_angle(angle_tracker.get_value())) oorb_arrow.add_updater(lambda m: m.next_to(get_rb(), DOWN, aligned_edge = RIGHT, buff = 0.2)) oolb_label = TexMobject("P'", color = self.color_inv, background_stroke_width = 0) oolb_label.add_updater(lambda m: m.next_to(oolb_arrow, DOWN, buff = 0.2)) oorb_label = TexMobject("P'", color = self.color_inv, background_stroke_width = 0) oorb_label.add_updater(lambda m: m.next_to(oorb_arrow, DOWN, buff = 0.2)) oolb_group = VGroup(oolb_arrow, oolb_label) oorb_group = VGroup(oorb_arrow, oorb_label) oolb_group.add_updater(lambda m: m.set_fill(opacity = 1 if is_oolb(label_Pi) else 0)) oolb_group.add_updater(lambda m: m.set_stroke(opacity = 1 if is_oolb(label_Pi) else 0)) oorb_group.add_updater(lambda m: m.set_fill(opacity = 1 if is_oorb(label_Pi) else 0)) oorb_group.add_updater(lambda m: m.set_stroke(opacity = 1 if is_oorb(label_Pi) else 0)) self.add(orig_line, inv_line, dot_P, dot_Pi, label_P, label_Pi) self.add(oolb_group, oorb_group) for d_position, d_angle in [(2, 0), (1, PI/10), (-5, 0), (-3, -PI/7), (4, PI/11)]: self.play( ApplyMethod(position_tracker.increment_value, d_position), ApplyMethod(angle_tracker.increment_value, d_angle), run_time = 2, ) self.wait() self.play( ApplyMethod(angle_tracker.restore), ApplyMethod(position_tracker.restore), run_time = 2, ) self.wait() class LineToCircleInversion(Scene): CONFIG = { "color_circle" : YELLOW, "color_orig" : BLUE, "color_inv" : RED, "line_config" : { "stroke_width" : 2, "color" : WHITE, }, } def construct(self): self.add_backgrounds() self.add_shapes() self.show_line_to_circle_inversion() def add_backgrounds(self): circle_O = Circle(radius = 3, color = self.color_circle) circle_O.shift(3*LEFT+0.5*UP) remark_circle = TextMobject("反演圆", color = self.color_circle) remark_circle.next_to(circle_O.get_bottom(), UP) dot_O = Dot(circle_O.get_center(), color = self.color_circle) label_O = DotLabel("O", dot_O, color = self.color_circle, position = DOWN) conclusion1 = TextMobject("不经过反演中心的直线", "$\\mapsto$", "经过反演中心的圆") conclusion1[0].set_color(self.color_orig) conclusion1[-1].set_color(self.color_inv) conclusion2 = TextMobject("经过反演中心的圆", "$\\mapsto$", "不经过反演中心的直线") conclusion2[0].set_color(self.color_inv) conclusion2[-1].set_color(self.color_orig) conclusions = VGroup(conclusion1, conclusion2) for c in conclusions: c.scale(0.8) conclusions.arrange_submobjects(DOWN, index_of_submobject_to_align = 1) conclusions.to_corner(DR) bg_rect = BackgroundRectangle(conclusions) self.add(circle_O, remark_circle) self.add_foreground_mobjects(dot_O, label_O, bg_rect, conclusions) self.dot_O = dot_O self.circle_O = circle_O self.conclusions = conclusions self.bg_rect = bg_rect def add_shapes(self): position_tracker = ValueTracker(2) line_angle_tracker = ValueTracker(PI*9/19) circle_angle_tracker = ValueTracker(PI/5) line = ExtendedLine(LEFT, RIGHT, color = self.color_orig) line.add_updater(lambda m: m.move_to(position_tracker.get_value() * RIGHT)) line.add_updater(lambda m: m.rotate(line_angle_tracker.get_value() - m.get_angle())) inv_line = InversedVMobject(line, self.circle_O, use_dashed_vmob = False, color = self.color_inv) inv_line_center = SmallDot(color = self.color_inv) inv_line_center.add_updater(lambda m: m.move_to(inv_line.get_center())) dot_Ai = Dot(color = self.color_inv) dot_Ai.add_updater( lambda m: m.move_to(inv_line.get_center() * 2 - self.circle_O.get_center()) ) dot_Pi = Dot(color = self.color_inv) dot_Pi.add_updater( lambda m: m.move_to( inv_line.get_center() \ + rotate_vector( inv_line.get_center() - self.circle_O.get_center(), circle_angle_tracker.get_value() ) ) ) dot_P = InversedDot(dot_Pi, self.circle_O, is_hollow = False, color = self.color_orig) dot_A = InversedDot(dot_Ai, self.circle_O, is_hollow = False, color = self.color_orig) line_OA, line_OAi, line_OP, line_OPi, line_AP, line_AiPi = aux_lines = VGroup(*[ TwoDotsSegment(pt_1, pt_2, **self.line_config) for pt_1, pt_2 in [ (self.dot_O, dot_A), (self.dot_O, dot_Ai), (self.dot_O, dot_P), (self.dot_O, dot_Pi), (dot_A, dot_P), (dot_Ai, dot_Pi) ] ]) ai_AiOPi = AngleIndicator(dot_Ai, self.dot_O, dot_Pi, color = MAROON_B, radius = 0.8) rtai_OAP = RightAngleIndicator(self.dot_O, dot_A, dot_P) rtai_OPiAi = RightAngleIndicator(self.dot_O, dot_Pi, dot_Ai) label_P = TexMobject("P", color = self.color_orig) label_Pi = TexMobject("P'", color = self.color_inv) label_A = TexMobject("A", color = self.color_orig) label_Ai = TexMobject("A'", color = self.color_inv) label_A.add_updater( lambda m: m.move_to( dot_A.get_center() + 0.3 * normalize(dot_A.get_center() - self.dot_O.get_center()) ) ) label_P.add_updater( lambda m: m.move_to( dot_P.get_center() + 0.3 * normalize(dot_A.get_center() - self.dot_O.get_center()) ) ) label_Ai.add_updater( lambda m: m.move_to( dot_Ai.get_center() + 0.4 * rotate_vector( normalize(dot_Ai.get_center() - inv_line_center.get_center()), -PI/4 ) ) ) label_Pi.add_updater( lambda m: m.move_to( dot_Pi.get_center() + 0.4 * normalize(dot_Pi.get_center() - inv_line_center.get_center()) ) ) def get_ub(): return line.get_center() + TOP + RIGHT * TOP[1] / np.tan(line_angle_tracker.get_value()) def get_bb(): return line.get_center() + BOTTOM + RIGHT * BOTTOM[1] / np.tan(line_angle_tracker.get_value()) def is_ooub(m): return m.get_bottom()[1] > TOP[1] def is_oobb(m): return m.get_top()[1] < BOTTOM[1] ooub_arrow = Arrow(ORIGIN, LEFT, color = self.color_orig).scale(2) ooub_arrow.add_updater(lambda m: m.set_angle(line_angle_tracker.get_value())) ooub_arrow.add_updater(lambda m: m.next_to(get_ub(), RIGHT, aligned_edge = TOP, buff = 0.2)) oobb_arrow = Arrow(ORIGIN, RIGHT, color = self.color_orig).scale(2) oobb_arrow.add_updater(lambda m: m.set_angle(line_angle_tracker.get_value() + PI)) oobb_arrow.add_updater(lambda m: m.next_to(get_bb(), RIGHT, aligned_edge = BOTTOM, buff = 0.2)) oolb_label = TexMobject("P", color = self.color_orig, background_stroke_width = 0) oolb_label.add_updater(lambda m: m.next_to(ooub_arrow, RIGHT, buff = 0.2)) oorb_label = TexMobject("P", color = self.color_orig, background_stroke_width = 0) oorb_label.add_updater(lambda m: m.next_to(oobb_arrow, RIGHT, buff = 0.2)) ooub_group = VGroup(ooub_arrow, oolb_label) oobb_group = VGroup(oobb_arrow, oorb_label) ooub_group.add_updater(lambda m: m.set_fill(opacity = 1 if is_ooub(label_P) else 0)) ooub_group.add_updater(lambda m: m.set_stroke(opacity = 1 if is_ooub(label_P) else 0)) oobb_group.add_updater(lambda m: m.set_fill(opacity = 1 if is_oobb(label_P) else 0)) oobb_group.add_updater(lambda m: m.set_stroke(opacity = 1 if is_oobb(label_P) else 0)) self.add(line, inv_line) self.add(dot_A, dot_P, dot_Ai, dot_Pi) self.add(label_P, label_Pi, label_A, label_Ai) self.add(aux_lines) self.add(ai_AiOPi, rtai_OAP, rtai_OPiAi) self.add(ooub_group, oobb_group) self.position_tracker = position_tracker self.line_angle_tracker = line_angle_tracker self.circle_angle_tracker = circle_angle_tracker def show_line_to_circle_inversion(self): play_args = [ [0, PI/12, 0, 2], [0, 0, PI*7/5, 4], [-2, PI/8, -PI/5, 3], [0, 0, PI*19/10, 6], [1.5, -PI/7, PI*2/5, 4], ] restore_arg = [ -sum([arg[k] for arg in play_args]) for k in range(len(play_args[0])) ] restore_arg[1] = (restore_arg[1] + PI) % (2*PI) - PI restore_arg[2] = (restore_arg[2] + PI) % (2*PI) - PI restore_arg[-1] = 3 play_args.append(restore_arg) for d_center, d_line_angle, d_circle_angle, run_time in play_args: self.play( ApplyMethod(self.position_tracker.increment_value, d_center), ApplyMethod(self.line_angle_tracker.increment_value, d_line_angle), ApplyMethod(self.circle_angle_tracker.increment_value, d_circle_angle), run_time = run_time, ) self.wait() class InversionCreateSimilarTriangles(Scene): CONFIG = { "random_seed" : 5+7-0, "num_of_nudges" : 5, "max_step" : 1, "color_A" : RED, "color_B" : BLUE, "color_combined" : MAROON_B, "color_circle": YELLOW, } def construct(self): self.add_remark() self.show_figure_animation() def add_remark(self): cond_1 = TexMobject("{|OP|", "\\over", "|OQ|}", "=", "{|OQ'|", "\\over", "|OP'|}") cond_2 = TexMobject("\\angle POQ", "=", "\\angle Q'OP'") conds = VGroup(cond_1, cond_2) conds.arrange_submobjects(DOWN, buff = 0.5) conds_rect = SurroundingRectangle(conds, color = WHITE) arrow = TexMobject("\\Downarrow") arrow.next_to(conds_rect, DOWN) concl = TexMobject("\\triangle OPQ", "\\sim", "\\triangle OQ'P'") concl.next_to(arrow, DOWN) for mob in (cond_1[0], cond_1[2], concl[0]): mob.set_color(self.color_A) for mob in (cond_1[-1], cond_1[-3], concl[-1]): mob.set_color(self.color_B) for mob in (cond_2[0], cond_2[-1]): mob.set_color(self.color_combined) remark = VGroup(conds, conds_rect, arrow, concl) remark.to_corner(DR) self.add(remark) def show_figure_animation(self): circle = Circle(radius = 3, color = self.color_circle) circle.move_to(3.5*LEFT) dot_O = Dot(color = self.color_combined) dot_O.add_updater(lambda m: m.move_to(circle.get_center())) dot_P = Dot(point = 1.2*UP+LEFT, color = self.color_A) dot_Q = Dot(point = 0.5*DOWN+1.9*LEFT, color = self.color_A) dot_Pi = InversedDot(dot_P, circle, is_hollow = False, color = self.color_B) dot_Qi = InversedDot(dot_Q, circle, is_hollow = False, color = self.color_B) triangle_OPQ = ManyDotsPolygon( dot_O, dot_P, dot_Q, color = self.color_A, stroke_width = 5, fill_opacity = 0.4 ) triangle_OPiQi = ManyDotsPolygon( dot_O, dot_Pi, dot_Qi, color = self.color_B, stroke_width = 2, fill_opacity = 0.3 ) label_O, label_P, label_Pi, label_Q, label_Qi = ( DotLabel( text, dot, color = color, position = position, background_stroke_width = 5, ).scale(0.8) for text, dot, color, position in zip( ["O", "P", "P'", "Q", "Q'"], [dot_O, dot_P, dot_Pi, dot_Q, dot_Qi], [self.color_combined, self.color_A, self.color_B, self.color_A, self.color_B], [LEFT, UP, UP, DOWN, DOWN] ) ) self.add(dot_O, dot_P, dot_Q, dot_Pi, dot_Qi) self.add(circle, triangle_OPQ, triangle_OPiQi) self.add(label_O, label_P, label_Pi, label_Q, label_Qi) dot_P.save_state() dot_Q.save_state() for k in range(self.num_of_nudges): nudge_P = get_random_vector(self.max_step) nudge_Q = get_random_vector(self.max_step) self.play( ApplyMethod(dot_P.shift, nudge_P), ApplyMethod(dot_Q.shift, nudge_Q), run_time = 2 ) self.wait() self.play(dot_P.restore, dot_Q.restore, run_time = 2) self.wait() class CircleToCircleInversionProof(Scene): CONFIG = { "color_O" : YELLOW, "color_A" : RED, "color_B" : BLUE, "color_combined" : MAROON_B, "label_buff" : 0.1, "label_scaling_factor" : 0.75, "line_config" : { "stroke_width" : 2, "color" : WHITE, }, } def construct(self): self.add_backgrounds() self.show_left_and_right_points() self.show_random_point() self.show_similar_triangles() self.show_complementary_property() self.show_inversion_result() def add_backgrounds(self): circle_O = Circle(radius = 3.2, color = self.color_O) circle_O.shift(3.5*LEFT) dot_O = Dot(circle_O.get_center(), color = self.color_O) remark_O = TextMobject("反演圆", color = YELLOW) remark_O.next_to(circle_O.get_bottom(), UP, buff = 0.4) circle_C = Circle(radius = 0.8, stroke_width = 2) circle_C.next_to(circle_O.get_right(), LEFT, buff = 0.5) dot_C = Dot(circle_C.get_center()) label_O, label_C = ( DotLabel( text, dot, color = color, position = DOWN, label_buff = self.label_buff ).scale(self.label_scaling_factor) for text, dot, color in zip(["O", "C"], [dot_O, dot_C], [self.color_O, WHITE]) ) for orig_mob in (circle_C, dot_C, label_C): orig_mob.set_sheen_direction(RIGHT) orig_mob.set_color([self.color_A, self.color_B]) inv_circle_template = InversedVMobject(circle_C, circle_O, use_dashed_vmob = False) inv_circle = Circle(radius = inv_circle_template.get_width()/2) inv_circle.move_to(inv_circle_template.get_center()) inv_circle.set_sheen_direction(LEFT) inv_circle.set_color([self.color_A, self.color_B]) self.add(circle_O, dot_O, circle_C, dot_C) self.add(label_O, label_C) self.add(remark_O) self.wait() self.circle_O = circle_O self.dot_O = dot_O self.remark_O = remark_O self.circle_C = circle_C self.dot_C = dot_C self.inv_circle = inv_circle def show_left_and_right_points(self): dot_A = Dot(color = self.color_A) dot_A.move_to(self.circle_C.get_left()) dot_B = Dot(color = self.color_B) dot_B.move_to(self.circle_C.get_right()) dot_Ai = InversedDot(dot_A, self.circle_O, is_hollow = False, color = self.color_A) dot_Bi = InversedDot(dot_B, self.circle_O, is_hollow = False, color = self.color_B) dot_Q = Dot((dot_Ai.get_center() + dot_Bi.get_center()) / 2) line_OB = Line(self.dot_O.get_center(), dot_B.get_center(), **self.line_config) line_OAi = Line(self.dot_O.get_center(), dot_Ai.get_center(), **self.line_config) label_A, label_Ai, label_B, label_Bi = ( DotLabel( text, dot, color = color, position = position, label_buff = self.label_buff ).scale(self.label_scaling_factor) for text, dot, color, position in zip( ["A", "A'", "B", "B'"], [dot_A, dot_Ai, dot_B, dot_Bi], [self.color_A, self.color_A, self.color_B, self.color_B], [DL, DR, DR, DL] ) ) remark_AB = TextMobject("圆心连线 \\\\ 的交点...").scale(0.6) remark_AB.next_to(VGroup(dot_A, dot_B), DOWN, buff = 1) arrows_AB = VGroup(*[ Arrow(remark_AB.get_critical_point(direction), dot, buff = 0.1) for direction, dot in zip([UL, UR], [dot_A, dot_B]) ]) remark_AiBi = TextMobject("...以及它们的反点").scale(0.8) remark_AiBi.next_to(VGroup(dot_Ai, dot_Bi), DOWN, buff = 1) arrows_AiBi = VGroup(*[ Arrow(remark_AiBi.get_critical_point(direction), dot, buff = 0.1) for direction, dot in zip([UR, UL], [dot_Ai, dot_Bi]) ]) self.play(ShowCreation(line_OB)) self.play(Write(dot_A), Write(dot_B), Write(label_A), Write(label_B)) self.wait() self.play(Write(remark_AB), ShowCreation(arrows_AB)) self.wait() self.play( ReplacementTransform(dot_A.deepcopy(), dot_Ai), ReplacementTransform(dot_B.deepcopy(), dot_Bi), ) self.play(Write(label_Ai), Write(label_Bi)) self.wait() self.play( ReplacementTransform(remark_AB, remark_AiBi), ReplacementTransform(arrows_AB, arrows_AiBi) ) self.play(ReplacementTransform(line_OB, line_OAi)) self.play(FadeOut(VGroup(remark_AiBi, arrows_AiBi))) self.wait() self.dot_A = dot_A self.dot_Ai = dot_Ai self.dot_B = dot_B self.dot_Bi = dot_Bi self.dot_Q = dot_Q self.line_OAi = line_OAi self.dots_AB = VGroup(dot_A, dot_Ai, dot_B, dot_Bi) self.labels_AB = VGroup(label_A, label_Ai, label_B, label_Bi) def show_random_point(self): angle_tracker = ValueTracker(PI/3) dot_P = Dot() dot_P.add_updater( lambda m: m.move_to( self.circle_C.point_at_angle(angle_tracker.get_value() % TAU) ) ) dot_P.add_updater( lambda m: m.set_color( interpolate_color( self.color_A, self.color_B, (dot_P.get_center()[0] - self.dot_A.get_center()[0]) / (self.dot_B.get_center()[0] - self.dot_A.get_center()[0]) ) ) ) label_P = DotLabel("P", dot_P, position = None) label_P.scale(0.8) label_P.add_updater(lambda m: m.set_color(dot_P.get_color())) label_P.add_updater( lambda m: m.move_to(dot_P.get_center() * 1.4 - self.dot_C.get_center() * 0.4) ) arrow_P = Vector(DR, buff = 0, color = WHITE).scale(0.5) arrow_P.add_updater(lambda m: m.next_to(dot_P, UL, buff = 0.1)) remark_P = TextMobject("圆上任意一点...").scale(0.75) remark_P.add_updater(lambda m: m.next_to(arrow_P, UL, buff = 0.1)) dot_Pi = InversedDot(dot_P, self.circle_O, is_hollow = False) dot_Pi.add_updater(lambda m: m.set_color(dot_P.get_color())) label_Pi = DotLabel("P'", dot_Pi, position = None) label_Pi.scale(0.8) label_Pi.add_updater(lambda m: m.set_color(dot_Pi.get_color())) label_Pi.add_updater( lambda m: m.move_to(dot_Pi.get_center() * 1.1 - self.inv_circle.get_center() * 0.1) ) arrow_Pi = Vector(DL, buff = 0, color = WHITE).scale(0.5) arrow_Pi.add_updater(lambda m: m.next_to(dot_Pi, UR, buff = 0.1)) remark_Pi = TextMobject("...以及它的反点").scale(0.75) remark_Pi.add_updater(lambda m: m.next_to(arrow_Pi, UR, buff = 0.1)) line_OP, line_OPi, line_AP, line_AiPi, line_BP, line_BiPi = aux_lines = VGroup(*[ TwoDotsSegment(pt_1, pt_2, **self.line_config) for pt_1, pt_2 in [ (self.dot_O, dot_P), (self.dot_O, dot_Pi), (self.dot_A, dot_P), (self.dot_Ai, dot_Pi), (self.dot_B, dot_P), (self.dot_Bi, dot_Pi) ] ]) rtai_APB = RightAngleIndicator(self.dot_A, dot_P, self.dot_B) rtai_BiPiAi = RightAngleIndicator(self.dot_Bi, dot_Pi, self.dot_Ai, side_length = 0.5) self.play(Write(dot_P), Write(label_P)) self.play(ShowCreation(arrow_P), Write(remark_P)) self.play(Write(line_AP), Write(line_BP)) self.play(ShowCreation(rtai_APB)) self.wait() self.play(ReplacementTransform(dot_P.deepcopy(), dot_Pi)) self.play(Write(label_Pi)) self.play( ReplacementTransform(arrow_P.deepcopy(), arrow_Pi), ReplacementTransform(remark_P.deepcopy(), remark_Pi), ) self.play(angle_tracker.increment_value, PI/6, run_time = 2) self.play(FadeOut(VGroup(arrow_P, remark_P, arrow_Pi, remark_Pi))) self.wait() self.play(Write(VGroup(line_OP, line_OPi, line_AiPi, line_BiPi))) self.wait() self.dot_P = dot_P self.dot_Pi = dot_Pi self.rtai_APB = rtai_APB self.rtai_BiPiAi = rtai_BiPiAi self.angle_tracker = angle_tracker self.aux_lines = aux_lines self.dots_P = VGroup(dot_P, dot_Pi) self.labels_P = VGroup(label_P, label_Pi) self.rtais = VGroup(self.rtai_APB, self.rtai_BiPiAi) def show_similar_triangles(self): ai_OAP = AngleIndicator(self.dot_O, self.dot_A, self.dot_P, radius = 0.3, color = self.color_A) ai_OBP = AngleIndicator(self.dot_O, self.dot_B, self.dot_P, radius = 0.4, color = self.color_B) ai_OPiAi = AngleIndicator(self.dot_O, self.dot_Pi, self.dot_Ai, radius = 0.3, color = self.color_A) ai_OPiBi = AngleIndicator(self.dot_O, self.dot_Pi, self.dot_Bi, radius = 0.4, color = self.color_B) triangle_OAP, triangle_OPiAi, triangle_OBP, triangle_OPiBi = [ ManyDotsPolygon( pt_1, pt_2, pt_3, color = self.color_combined, stroke_width = 0, fill_opacity = 0.4 ) for pt_1, pt_2, pt_3 in ( (self.dot_O, self.dot_A, self.dot_P), (self.dot_O, self.dot_Pi, self.dot_Ai), (self.dot_O, self.dot_B, self.dot_P), (self.dot_O, self.dot_Pi, self.dot_Bi), ) ] remark_sim_A = TexMobject("\\triangle OAP", "\\sim", "\\triangle OP'A'") remark_sim_B = TexMobject("\\triangle OBP", "\\sim", "\\triangle OP'B'") remark_arrow = TexMobject("\\Downarrow") remark_angle_A = TexMobject("\\angle OAP", "=", "\\angle OP'A'") remark_angle_B = TexMobject("\\angle OBP", "=", "\\angle OP'B'") remarks_A = VGroup(remark_sim_A, remark_arrow, remark_angle_A) remarks_B = VGroup(remark_sim_B, remark_arrow, remark_angle_B) remarks_A.arrange_submobjects(DOWN) remarks_A.next_to(self.dot_Q, DOWN, buff = 1) remark_sim_B.move_to(remark_sim_A.get_center()) remark_angle_B.move_to(remark_angle_A.get_center()) for remark, color in ([remark_sim_A, self.color_combined], [remark_sim_B, self.color_combined], \ [remark_angle_A, self.color_A], [remark_angle_B, self.color_B]): remark[0].set_color(color) remark[-1].set_color(color) self.play(Write(remark_sim_A)) self.play(FadeInFromDown(VGroup(remark_arrow, remark_angle_A))) self.wait() self.play(ShowCreation(triangle_OAP), ShowCreation(ai_OAP)) self.wait() self.play( ReplacementTransform(triangle_OAP, triangle_OPiAi), ReplacementTransform(ai_OAP.deepcopy(), ai_OPiAi), ) self.play(FadeOut(triangle_OPiAi)) self.wait() self.play(ReplacementTransform(remarks_A, remarks_B)) self.wait() self.play(ShowCreation(triangle_OBP), ShowCreation(ai_OBP)) self.wait() self.play( ReplacementTransform(triangle_OBP, triangle_OPiBi), ReplacementTransform(ai_OBP.deepcopy(), ai_OPiBi), ) self.play(FadeOut(remarks_B), FadeOut(triangle_OPiBi)) self.wait() self.ai_OAP = ai_OAP self.ai_OBP = ai_OBP self.ai_OPiAi = ai_OPiAi self.ai_OPiBi = ai_OPiBi self.ais = VGroup(ai_OAP, ai_OBP, ai_OPiAi, ai_OPiBi) def show_complementary_property(self): ai_OAP_copy = self.ai_OAP.deepcopy() ai_OBP_copy = self.ai_OBP.deepcopy() rtai_APB_copy = self.rtai_APB.deepcopy() for ai_copy in (ai_OAP_copy, ai_OBP_copy, rtai_APB_copy): ai_copy.clear_updaters() comp_prop = VGroup(ai_OAP_copy, TexMobject("="), ai_OBP_copy, TexMobject("+"), rtai_APB_copy) comp_prop.arrange_submobjects(RIGHT) comp_prop.scale(1.2) comp_prop.next_to(self.circle_O.get_top(), DOWN, buff = 1) self.play( ReplacementTransform(self.ai_OAP.deepcopy(), ai_OAP_copy), ReplacementTransform(self.ai_OBP.deepcopy(), ai_OBP_copy), ReplacementTransform(self.rtai_APB.deepcopy(), rtai_APB_copy), ) self.play(Write(comp_prop[1]), Write(comp_prop[3])) self.wait() self.play(ReplacementTransform(rtai_APB_copy.deepcopy(), self.rtai_BiPiAi)) self.wait() for ai in self.ais: ai.clear_updaters() self.play( FadeOut(comp_prop), FadeOut(self.ais), FadeOut(self.labels_AB), FadeOut(self.labels_P), ) self.wait() def show_inversion_result(self): inv_circle_copy = self.inv_circle.deepcopy() self.play(self.angle_tracker.set_value, PI, run_time = 2) self.wait() def update_inv_circle(inv_circle): angle = self.angle_tracker.get_value() if (angle <= -PI) or (angle > PI): alpha = 1 else: QPi = self.dot_Pi.get_center() - self.dot_Q.get_center() QAi = self.dot_Ai.get_center() - self.dot_Q.get_center() theta = angle_between(QPi, QAi) if self.dot_Pi.get_center()[1] < self.dot_Q.get_center()[1]: theta = 2*PI - theta alpha = theta / (2*PI) inv_circle.become(inv_circle_copy.get_subcurve(0, alpha)) self.inv_circle.add_updater(update_inv_circle) self.add(self.inv_circle) self.play( ApplyMethod(self.angle_tracker.increment_value, -2*PI), run_time = 5, ) self.inv_circle.clear_updaters() for line in self.aux_lines: line.clear_updaters() self.play( FadeOut(self.dots_AB), FadeOut(self.dots_P), FadeOut(self.rtais), FadeOut(self.line_OAi), FadeOut(self.aux_lines) ) self.wait() color_template = Square( stroke_width = 0, fill_opacity = 1, fill_color = [self.color_A, self.color_B] ) conclusion = TextMobject("不经过反演中心的圆", "$\\mapsto$", "不经过反演中心的圆") conclusion.scale(0.8) conclusion[0].set_color_by_gradient(self.color_A, self.color_B) conclusion[2].set_color_by_gradient(self.color_B, self.color_A) conclusion.to_corner(DR) self.play(Write(conclusion)) self.wait(3) self.play(FadeOut(conclusion), FadeOut(self.inv_circle)) self.wait() class ConcentricPropertyDoesNotHold(Scene): def setup(self): N = 8 self.circle_radii = [0.9-0.1*k for k in range(N)] self.dot_radii = [0.08-0.005*k for k in range(N)] self.circle_colors = color_gradient([BLUE, GREEN, RED], N) def construct(self): orig_circles = VGroup(*[ Circle(radius = radius, stroke_width = 1.5,color = color) for radius, color in zip(self.circle_radii, self.circle_colors)] ) orig_circles.shift(2*LEFT+0.5*DOWN) orig_circles_centers = VGroup(*[ Dot(circle.get_center(), radius = radius, color = color) for circle, radius, color in zip(orig_circles, self.dot_radii, self.circle_colors) ]) circle = Circle(radius = 3, color = YELLOW) circle.shift(3.8*LEFT+0.5*DOWN) circle_center = Dot(circle.get_center(), color = YELLOW) inv_circles = VGroup(*[ InversedVMobject(orig_circle, circle).clear_updaters().set_color(color) for orig_circle, color in zip(orig_circles, self.circle_colors) ]) inv_circles_centers = VGroup(*[ Dot(inv_circle.get_center(), color = color) for inv_circle, color in zip(inv_circles, self.circle_colors) ]) circle_text = TextMobject("反演圆", color = YELLOW) circle_text.next_to(circle.get_bottom(), UP, buff = 0.4) orig_circles_text = TextMobject("同心的圆", color = WHITE) orig_circles_text.next_to(orig_circles, UP) orig_circles_text.to_edge(UP, buff = 0.4) inv_circles_text = TextMobject("不同心的像", color = WHITE) inv_circles_text.next_to(inv_circles, UP) inv_circles_text.to_edge(UP, buff = 0.4) arrow = Arrow(orig_circles_text.get_right(), inv_circles_text.get_left()) self.add(circle, circle_center) self.add(orig_circles, orig_circles_centers) self.add(inv_circles, inv_circles_centers) self.add(circle_text, orig_circles_text, inv_circles_text, arrow) self.wait() class DemonstratePtolemyInequality(Scene): CONFIG = { "R" : 2.7, "angle_A" : -PI*2/3, "angle_B" : PI*4/5, "angle_D" : -PI/5, "radius_C" : 3.2, "angle_C" : PI/5, } def construct(self): radius_tracker = ValueTracker(self.radius_C) angle_tracker = ValueTracker(self.angle_C) circle = Circle(radius = self.R, color = WHITE, stroke_width = 1) circle.shift(DOWN) dashed_circle = DashedVMobject(circle, num_dashes = 100, positive_space_ratio = 0.5) dot_A, dot_B, dot_C, dot_D = dots = VGroup(*[ Dot(circle.point_at_angle(angle % TAU), color = WHITE) for angle in (self.angle_A, self.angle_B, self.angle_C, self.angle_D) ]) dot_C.add_updater( lambda m: m.move_to( circle.get_center() + radius_tracker.get_value() * \ rotate_vector(RIGHT, angle_tracker.get_value()) ) ) dot_labels = VGroup(*[ DotLabel(text, dot, position = position, label_buff = 0.1) for text, dot, position in zip( ["A", "B", "C", "D"], dots, [DL, UL, UR, DR] ) ]) lines = VGroup(*[ TwoDotsSegment(dot_1, dot_2) for dot_1, dot_2 in ( [dot_B, dot_A], [dot_A, dot_C], [dot_A, dot_D], [dot_B, dot_C], [dot_B, dot_D], [dot_C, dot_D], ) ]) length_labels = VGroup(*[LengthLabel(line) for line in lines]) length_labels[0].switch_side() length_labels[2].switch_side() length_labels[1].set_offset(-0.4) length_labels[-2].set_offset(-0.4) def get_sums(): AB, AC, AD, BC, BD, CD = [line.get_length() for line in lines] sum_lhs = AB * CD + AD * BC sum_rhs = AC * BD return sum_lhs, sum_rhs relation_eq = TexMobject( "|AB| \\cdot |CD| + |AD| \\cdot |BC|", "=", "|AC| \\cdot |BD|", background_stroke_width = 0, ) relation_neq = TexMobject( "|AB| \\cdot |CD| + |AD| \\cdot |BC|", ">", "|AC| \\cdot |BD|", background_stroke_width = 0, ) relation_eq[1].set_color(GREEN) relation_neq[1].set_color(RED) relation_eq.to_edge(UP, buff = 1.2) for eq_mob, neq_mob in zip(relation_eq, relation_neq): neq_mob.move_to(eq_mob.get_center()) lhs, eq_sign, rhs = relation_eq neq_sign = relation_neq[1] label_lhs = DecimalNumber(num_decimal_places = 4, show_ellipsis = True) label_rhs = DecimalNumber(num_decimal_places = 4, show_ellipsis = True) label_lhs.add_updater(lambda m: m.set_value(get_sums()[0])) label_rhs.add_updater(lambda m: m.set_value(get_sums()[1])) brace_lhs = Brace(lhs, UP, buff = 0.1) brace_rhs = Brace(rhs, UP, buff = 0.1) brace_lhs.put_at_tip(label_lhs) brace_rhs.put_at_tip(label_rhs) def get_indication_color(thres = 1e-2): return GREEN if is_close(radius_tracker.get_value(), self.R, thres = thres) else RED def get_indication_opacity(thres = 1e-2): return 0 if is_close(radius_tracker.get_value(), self.R, thres = thres) else 1 figure_group = VGroup(dashed_circle, dots, lines, length_labels, dot_labels) figure_group.add_updater(lambda m: m.set_color(get_indication_color())) relation_group = VGroup(lhs, eq_sign, rhs, neq_sign, brace_lhs, brace_rhs, label_lhs, label_rhs) label_lhs.add_updater(lambda m: m.set_color(get_indication_color())) label_rhs.add_updater(lambda m: m.set_color(get_indication_color())) eq_sign.add_updater(lambda m: m.set_opacity(1 - get_indication_opacity())) neq_sign.add_updater(lambda m: m.set_opacity(get_indication_opacity())) self.add(figure_group) self.add(relation_group) deltas = [ (0.5, -0.1), (0, -0.4), (-1, 0.3), (0, 0.4), (-1, 0), (0.3, -0.2), (0.7, -0.3), ] radius_tracker.save_state() angle_tracker.save_state() for d_radius, d_angle in deltas: self.play( ApplyMethod(radius_tracker.increment_value, d_radius), ApplyMethod(angle_tracker.increment_value, d_angle), run_time = 2, ) self.wait() self.play( ApplyMethod(radius_tracker.restore), ApplyMethod(angle_tracker.restore), run_time = 2, ) self.wait() class PtolemyInversionFigure(Scene): CONFIG = { "R" : 3.8, "r" : 1.3, "angle_A" : PI, "angle_B" : PI/3, "angle_C" : -PI/9, "angle_D" : -PI*2/7, "color_circle" : YELLOW, "color_ABD" : BLUE, } def construct(self): circle_ABD = Circle(radius = self.r, color = self.color_ABD, stroke_width = 3) circle_ABD.shift(0.2*LEFT) dot_A, dot_B, dot_C, dot_D = dots = VGroup(*[ Dot(circle_ABD.point_at_angle(angle % TAU), color = WHITE) for angle in (self.angle_A, self.angle_B, self.angle_C, self.angle_D) ]) dot_A.set_color(self.color_circle) dot_C.shift(0.4*RIGHT) circle = Circle(radius = self.R, color = self.color_circle, stroke_width = 5) circle.move_to(dot_A.get_center()) remark_circle = TextMobject("反演圆", color = self.color_circle) remark_circle.next_to(circle.get_bottom(), UP) label_A, label_B, label_C, label_D = dot_labels = VGroup(*[ DotLabel(text, dot, position = position, label_buff = 0.2) for text, dot, position in zip( ["A", "B", "C", "D"], dots, [DL, UP, DOWN, DOWN] ) ]) label_A.set_color(self.color_circle) dot_Bi, dot_Ci, dot_Di = inv_dots = VGroup(*[ InversedDot(dot, circle, is_hollow = False, color = WHITE) for dot in (dot_B, dot_C, dot_D) ]) label_Bi, label_Ci, label_Di = inv_dot_labels = VGroup(*[ DotLabel(text, dot, position = RIGHT, label_buff = 0.2) for text, dot in zip(["B'", "C'", "D'"], [dot_Bi, dot_Ci, dot_Di]) ]) lines = VGroup(*[ TwoDotsSegment(dot_1, dot_2, stroke_width = 1) for dot_1, dot_2 in ( [dot_A, dot_B], [dot_A, dot_C], [dot_A, dot_D], [dot_B, dot_C], [dot_B, dot_D], [dot_C, dot_D], [dot_A, dot_Bi], [dot_A, dot_Ci], [dot_A, dot_Di], [dot_Bi, dot_Ci], [dot_Bi, dot_Di], [dot_Ci, dot_Di], ) ]) inv_circle_ABD = InversedVMobject(circle_ABD, circle, use_dashed_vmob = False) inv_circle_ABD.add_updater(lambda m: m.set_color(self.color_ABD)) inv_circle_ABD.add_updater(lambda m: m.set_stroke(width = 2)) self.add(circle, remark_circle, circle_ABD, inv_circle_ABD) self.add(dots, dot_labels, inv_dots, inv_dot_labels, lines) self.add() self.wait() ##### ## Inversion Advanced P1 Scenes class KissingCirclesPuzzle(Scene): def construct(self): self.show_figure() self.show_question() def show_figure(self): type_text_1 = TextMobject("外切-外切-外切") type_text_2 = TextMobject("内切-内切-外切") type_text_1.move_to(LEFT_SIDE/2) type_text_2.move_to(RIGHT_SIDE/2) type_text_1.to_edge(DOWN) type_text_2.to_edge(DOWN) dot_l1, dot_l2, dot_l3 = dots_l = VGroup(*[ VectorizedPoint(np.array([coords[0], coords[1], 0]), color = BLUE) for coords in [(-3.9, 1.5), (-4.9, 0.0), (-2.8, -1.0)] ]) dot_r1, dot_r2, dot_r3 = dots_r = VGroup(*[ VectorizedPoint(np.array([coords[0], coords[1], 0]), color = BLUE) for coords in [(4.6, 0.3), (3.9, 0.6), (3.5, 1.6)] ]) dfc_l = DescartesFourCircles(*dots_l, show_new_circles = False) dfc_r = DescartesFourCircles(*dots_r, show_new_circles = False, outer_circle_index = 2) for dfc in [dfc_l, dfc_r]: for mob in dfc.get_orig_circles(): mob.set_stroke(width = 2, color = BLUE) self.add(type_text_1, type_text_2) self.add(dfc_l, dfc_r) self.dfc_l = dfc_l self.dfc_r = dfc_r self.dots_l = dots_l self.dots_r = dots_r def show_question(self): question = TextMobject("能否添加第四个圆,使之与其他三个圆都相切?") question.to_edge(UP, buff = 0.2) self.add(question) self.wait() class KissingCirclesSimplified(Scene): def construct(self): line1 = ExtendedLine(UL, UR) line2 = ExtendedLine(DL, DR) center_circle = Circle(radius = 1) figure_group = VGroup(line1, line2, center_circle) for mob in figure_group: mob.set_stroke(width = 2, color = BLUE) question = TextMobject("能否添加第四个“圆”,使之与其他三个“圆”都相切?") question.next_to(figure_group, UP, buff = 0.5) group = VGroup(question, figure_group) group.move_to(ORIGIN) self.add(group) self.wait() class KissingCirclesSimplifiedAnswer(Scene): def construct(self): line1 = ExtendedLine(UL, UR, stroke_width = 2, color = BLUE) line2 = ExtendedLine(DL, DR, stroke_width = 2, color = BLUE) center_circle = Circle(radius = 1, stroke_width = 2, color = BLUE) new_circles = VGroup(*[ Circle(radius = 1, color = color, fill_opacity = 0.1, stroke_width = 5) \ .next_to(center_circle, direction, buff = 0) for direction, color in zip([LEFT, RIGHT], [RED, ORANGE]) ]) numbers = VGroup(*[ TexMobject(f"{num}", color = circle.get_color()).move_to(circle.get_center()) for num, circle in zip(["1", "2"], new_circles) ]) group = VGroup(line1, line2, center_circle, new_circles, numbers) group.move_to(ORIGIN) self.add(group) self.wait() class KissingCirclesSimplifiedExplanation(Scene): CONFIG = { "dashed_vmob_config" : { "num_dashes" : 30, "positive_space_ratio" : 0.6, }, "line_colors" : [GREEN, BLUE], "center_color" : MAROON_B, "circle_colors" : [RED, ORANGE], } def construct(self): self.add_backgrounds() self.show_process() def add_backgrounds(self): N = 5 line1 = Line(UP + N*LEFT, UP + N*RIGHT, stroke_width = 2, color = self.line_colors[0]) line2 = Line(DOWN + N*LEFT, DOWN + N*RIGHT, stroke_width = 2, color = self.line_colors[1]) center_circle = FineCircle(radius = 1, stroke_width = 2, color = self.center_color) new_circle1 = FineCircle(radius = 1, stroke_width = 5, color = self.circle_colors[0]) new_circle1.next_to(center_circle, LEFT, buff = 0) new_circle2 = FineCircle(radius = 1, stroke_width = 5, color = self.circle_colors[1]) new_circle2.next_to(center_circle, RIGHT, buff = 0) inv_old_group = VGroup(line1, line2, center_circle) inv_new_group = VGroup(new_circle1, new_circle2) inv_group = VGroup(inv_old_group, inv_new_group) inv_group.rotate(-PI*2/5) inv_group.shift(3*RIGHT) circle = FineCircle(radius = 3.5, color = YELLOW) circle.shift(2*LEFT) circle_center = Dot(circle.get_center(), color = YELLOW) remark_circle = TextMobject("反演圆", color = YELLOW) remark_circle.next_to(circle.get_bottom(), UP) remark_center = VGroup(*[ Arrow(DL, UR, color = YELLOW, buff = 0).scale(0.3), TextMobject("反演中心", color = YELLOW).scale(0.8), ]) remark_center.arrange_submobjects(DL, buff = 0) remark_center.next_to(circle_center, DL, buff = 0.1) orig_old_group = VGroup(*[ InversedVMobject(mob, circle, use_dashed_vmob = False, match_original_style = True) for mob in inv_old_group ]) orig_new_group = VGroup(*[ InversedVMobject(mob, circle, use_dashed_vmob = False, match_original_style = True) for mob in inv_new_group ]) for mob in orig_old_group: mob.clear_updaters() mob.set_stroke(width = 2) for mob in orig_new_group: mob.clear_updaters() mob.set_stroke(width = 5) mob.set_fill(opacity = 0.1) self.add(orig_old_group) self.add(circle, circle_center, remark_circle, remark_center) self.circle = circle self.inv_old_group = inv_old_group self.inv_new_group = inv_new_group self.orig_old_group = orig_old_group self.orig_new_group = orig_new_group def show_process(self): dashed_inv_old_group = VGroup(*[ DashedVMobject(mob, **self.dashed_vmob_config) for mob in self.inv_old_group ]) dashed_inv_new_group = VGroup(*[ DashedVMobject(mob, **self.dashed_vmob_config) for mob in self.inv_new_group ]) self.play(ShowCreation(dashed_inv_old_group, lag_ratio = 0.05), run_time = 3) self.wait() dashed_copys = VGroup(*[dashed_inv_old_group[-1].deepcopy() for k in range(2)]) dashed_copys.generate_target() for mob_copy, mob_template in zip(dashed_copys.target, dashed_inv_new_group): mob_copy.match_style(mob_template) mob_copy.move_to(mob_template.get_center()) self.play(MoveToTarget(dashed_copys), run_time = 3) self.remove(dashed_copys) self.add(dashed_inv_new_group) self.wait() self.play(DrawBorderThenFill(self.orig_new_group), run_time = 3) self.wait(2) self.play( FadeOut(dashed_inv_new_group), FadeOut(dashed_inv_old_group), FadeOut(self.orig_new_group), ) self.wait() class DifferentTangentTypesWithSameConclusion(KissingCirclesPuzzle): CONFIG = { "random_seed" : 570, "num_of_nudges" : 5, "max_step" : 0.5, "color_1" : ORANGE, "color_2" : RED, } def construct(self): super().show_figure() self.dots_l.save_state() self.dots_r.save_state() for dfc in [self.dfc_l, self.dfc_r]: dfc.add_new_circles() dfc.get_orig_circles().set_stroke(width = 2) c4_1, c4_2 = dfc.get_new_circles() c4_1.set_color(self.color_1) c4_2.set_color(self.color_2) self.add(self.dfc_l, self.dfc_r) for k in range(self.num_of_nudges): for dot in it.chain(self.dots_l, self.dots_r): dot.generate_target() dot.target.shift(get_random_vector(self.max_step)) anims = AnimationGroup(*[ MoveToTarget(dot, path_arc = PI/3., run_time = 1.5) for dot in it.chain(self.dots_l, self.dots_r) ], run_time = 2) self.play(anims) self.wait() self.play(self.dots_l.restore, self.dots_r.restore, run_time = 1.5) class LineToCircleInversionRevisited(LineToCircleInversion): def construct(self): super().construct() self.remove_conclusions() self.add_explanation() def remove_conclusions(self): self.remove(self.bg_rect) self.remove(self.conclusions) def add_explanation(self): radius = Line( self.circle_O.get_left(), self.circle_O.get_center(), color = self.color_circle, stroke_width = 1, ) radius_text = TexMobject("R", color = self.color_circle) radius_text.next_to(radius, UP, buff = 0.1) radius_group = VGroup(radius, radius_text) radius_group.rotate(-PI/12, about_point = self.circle_O.get_center()) remark_length = TexMobject("|OA| = d", "\\Downarrow", "|OA'| = \dfrac{R^2}{d}") remark_length.arrange_submobjects(DOWN) remark_length.scale(1.2) remark_length[0].set_color(self.color_orig) remark_length[-1].set_color(self.color_inv) remark_length.to_edge(RIGHT) self.add(radius_group, remark_length) self.wait() class CircleToCircleInversionRevisited(CircleToCircleInversionProof): def construct(self): super().add_backgrounds() super().show_left_and_right_points() super().show_random_point() super().show_similar_triangles() self.arrange_elements() self.add_explanation() def arrange_elements(self): self.angle_tracker.set_value(PI/3) self.remove(self.remark_O) self.remove(self.ai_OAP, self.ai_OBP, self.ai_OPiAi, self.ai_OPiBi) self.add(self.inv_circle) self.add(self.dots_P, self.labels_P) self.add(self.dots_AB, self.labels_AB) self.add(self.aux_lines, self.rtais) dot_I = Dot(self.inv_circle.get_center()) label_I = DotLabel("I", dot_I, position = DOWN, label_buff = 0.15).scale(0.8) for mob in (dot_I, label_I): mob.set_sheen_direction(RIGHT) mob.set_color([self.color_B, self.color_A]) remark_I = TextMobject("反形的圆心(并非$C$的反点!)") remark_I.scale(0.5) remark_I.next_to(label_I, DOWN, buff = 0.1) self.add(dot_I, label_I, remark_I) def add_explanation(self): for circle, color, text, angle in zip( [self.circle_O, self.circle_C], [self.color_O, MAROON_B], ["R", "r"], [-PI/12, PI/3] ): radius = Line( circle.get_left(), circle.get_center(), color = color, stroke_width = 1, ) radius_text = TexMobject(text, color = color) radius_text.next_to(radius, UP, buff = 0.1) radius_group = VGroup(radius, radius_text) radius_group.rotate(angle, about_point = circle.get_center()) self.add(radius_group) remark_length_A = TexMobject("|OA| = d-r", "\\Rightarrow", "|OA'| = \dfrac{R^2}{d-r}") remark_length_B = TexMobject("|OB| = d+r", "\\Rightarrow", "|OB'| = \dfrac{R^2}{d+r}") remark_length_A[0].set_color(self.color_A) remark_length_A[-1].set_color(self.color_A) remark_length_B[0].set_color(self.color_B) remark_length_B[-1].set_color(self.color_B) length_group = VGroup(remark_length_A, remark_length_B) length_group.arrange_submobjects(DOWN, buff = 0.4) brace = Brace(length_group, RIGHT) arrow = TexMobject("\\Rightarrow") remarks = VGroup( TexMobject("|A'B'| = \\dfrac{2 R^2 r}{|d^2-r^2|}"), TexMobject("|OI| = \\dfrac{R^2 d}{|d^2-r^2|}") ) remarks.arrange_submobjects(DOWN, aligned_edge = LEFT) remarks.set_color(MAROON_B) result_group = VGroup(brace, arrow, remarks) result_group.arrange_submobjects(RIGHT) result_group.next_to(length_group, RIGHT) remark_group = VGroup(length_group, result_group) remark_group.center().to_edge(DOWN, buff = 0.2) bg_rect = BackgroundRectangle(remark_group, fill_opacity = 0.9) self.add(bg_rect, remark_group) self.wait() class DescartesTheoremExamples(Scene): CONFIG = { "circle_colors" : [MAROON_B, RED, GREEN, BLUE], "curvs_outer" : [3, 6, 7, 34], "curvs_inner" : [10, 15, 19, -6], } def setup(self): self.text_color_map = dict( zip(["{k_1}", "{k_2}", "{k_3}", "{k_4}"], self.circle_colors) ) def construct(self): self.add_title() self.add_outer_dfc() self.add_inner_dfc() def add_title(self): title = TexMobject( "\\left(", "{k_1}", "+", "{k_2}", "+", "{k_3}", "+", "{k_4}", "\\right) ^2", "= 2 \\left(", "{k_1}","^2 +","{k_2}","^2 +","{k_3}","^2 +","{k_4}","^2", "\\right)" ) title.set_color_by_tex_to_color_map(self.text_color_map) title.scale(1.2) title.to_edge(UP, buff = 0.2) self.add(title) def add_outer_dfc(self): r1, r2, r3, r4 = [1./curv for curv in self.curvs_outer] p1, p2, p3 = [ VectorizedPoint(center) for center in calc_centers_by_radii(r1, r2, r3, init_angle = PI*2/3) ] outer_dfc = DescartesFourCircles(p1, p2, p3, show_new_circles = False) c1, c2, c3 = outer_dfc.get_orig_circles() c4 = outer_dfc.get_new_circles()[0] outer_circles = VGroup(c1, c2, c3, c4) outer_circles.clear_updaters() outer_circles.set_height(5.5) outer_circles.to_corner(DL) texts = VGroup(*[ TexMobject(f"k_{num}", "=", f"{curv}") \ .scale(0.8) \ .move_to(circle.get_center()) for num, curv, circle in zip(range(1, 5), self.curvs_outer, outer_circles) ]) for circle, text, color in zip(outer_circles, texts, self.circle_colors): circle.set_color(color) text.set_color(color) texts[-1].shift(2.5*RIGHT+1.2*UP) arrow = Arrow( texts[-1].get_bottom(), outer_circles[-1].get_right(), path_arc = -PI*2/3, buff = 0.1, ).set_color(self.circle_colors[-1]) outer_group = VGroup(outer_circles, texts, arrow) self.add(outer_group) def add_inner_dfc(self): r1, r2, r3, r4 = [1./curv for curv in self.curvs_inner] p1, p2, p3 = [ VectorizedPoint(center) for center in calc_centers_by_radii(r1, r2, r3, init_angle = -PI/7) ] inner_dfc = DescartesFourCircles(p1, p2, p3, show_new_circles = False) c1, c2, c3 = inner_dfc.get_orig_circles() c4 = inner_dfc.get_new_circles()[1] inner_circles = VGroup(c1, c2, c3, c4) inner_circles.clear_updaters() inner_circles.set_height(5.5) inner_circles.to_corner(DR) inner_texts = VGroup(*[ TexMobject(f"k_{num}", "=", f"{curv}") \ .scale(0.8) \ .move_to(circle.get_center()) for num, curv, circle in zip(range(1, 5), self.curvs_inner, inner_circles) ]) for circle, text, color in zip(inner_circles, inner_texts, self.circle_colors): circle.set_color(color) text.set_color(color) inner_texts[-1].shift(2.8*LEFT+2.7*UP) inner_arrow = Arrow( inner_texts[-1].get_critical_point(DOWN), inner_texts[-1].get_critical_point(DOWN)+0.7*DR, buff = 0.1, ).set_color(self.circle_colors[-1]) inner_group = VGroup(inner_circles, inner_texts, inner_arrow) self.add(inner_group) self.wait() self.inner_circles = inner_circles self.inner_texts = inner_texts self.inner_arrow = inner_arrow class DFCInversionProofP1(DescartesTheoremExamples): CONFIG = { "remark_scale_text" : "示意图,图像并非真实比例", "orig_label_texts" : ["C_1", "C_2", "C_3", "C_4"], "inv_label_texts" : ["C_1'", "C_2'", "C_3'", "C_4'"], } def construct(self): super().add_inner_dfc() self.arrange_elements() self.add_labels() self.add_inversion_center() self.add_mapsto_symbol() self.add_not_to_scale_remark() self.wait() def arrange_elements(self): self.remove(self.inner_texts, self.inner_arrow) self.inner_circles.center().shift(4*UP) normal_form = FourCirclesNormalForm() normal_form.shift(4*DOWN) self.add(normal_form) self.normal_form = normal_form def add_labels(self): orig_labels = VGroup() for n, (circle, text) in enumerate(zip(self.inner_circles, self.orig_label_texts)): label = TexMobject(text).scale(1.2) label.set_color(circle.get_color()) label.move_to(circle.get_center()) orig_labels.add(label) inv_labels = VGroup() for n, (circle, text) in enumerate(zip(self.normal_form, self.inv_label_texts)): label = TexMobject(text).scale(1.2) label.set_color(circle.get_color()) label.move_to(circle.get_center()) inv_labels.add(label) c1, c2, c3, c4 = self.inner_circles l1, l2, l3, l4 = orig_labels c1i, c2i, c3i, c4i = self.normal_form l1i, l2i, l3i, l4i = inv_labels l4.next_to(c4.get_bottom(), UP, buff = 0.3) l3i.next_to(c3i, DOWN).to_edge(RIGHT) l4i.next_to(c4i, UP).to_edge(RIGHT) self.add(orig_labels, inv_labels) self.orig_labels = orig_labels self.inv_labels = inv_labels def add_inversion_center(self): c1, c2, c3, c4 = self.inner_circles inv_center = get_tangent_point(c3, c4) dot_O = Dot(inv_center, color = YELLOW) label_O = TexMobject("O", color = YELLOW).next_to(dot_O, UP) remark_O = TextMobject("反演中心", color = YELLOW) remark_O.next_to(dot_O, RIGHT, buff = 1.5) arrow_O = Arrow(remark_O.get_left(), dot_O.get_right(), color = YELLOW, buff = 0.2) orig_center_group = VGroup(dot_O, label_O, remark_O, arrow_O) inv_dot_O = VectorizedPoint() inv_dot_O.next_to(self.normal_form[-1], UP, buff = 1.4) inv_dot_O.shift(2*RIGHT) inv_center_group = orig_center_group.deepcopy() inv_center_group.shift(inv_dot_O.get_center() - dot_O.get_center()) self.add(orig_center_group, inv_center_group) self.orig_center_group = orig_center_group self.inv_center_group = inv_center_group def add_mapsto_symbol(self): mapsto = TexMobject("\\mapsto") mapsto.rotate(-PI/2) mapsto.scale(2.5) mapsto.next_to(self.inner_circles, DOWN) remark_mapsto = TextMobject("反演变换") remark_mapsto.next_to(mapsto, LEFT) self.add(mapsto, remark_mapsto) def add_not_to_scale_remark(self): remark_scale = TextMobject("(" + self.remark_scale_text + ")") remark_scale.scale(0.75) remark_scale.next_to(6.5*DL, RIGHT, buff = 0) self.add(remark_scale) class DFCInversionProofP2(DFCInversionProofP1): CONFIG = { "remark_scale_text" : "示意图,反演圆未标出,且图像并非真实比例", "inv_label_texts" : ["C_1'", "C_2'", "C_3':y=-1", "C_4':y=1"], "inv_center_coord_text" : "(x_0, y_0) \\, (y_0>1)", "circle_center_coord_texts" : ["(-1,0)", "(1,0)"], } def construct(self): super().construct() self.change_center_remarks() self.add_coord_system() self.change_inv_labels() self.wait() def change_center_remarks(self): for center_group in (self.orig_center_group, self.inv_center_group): dot, label, remark, arrow = center_group self.remove(remark, arrow) if center_group is self.inv_center_group: coord = TexMobject(self.inv_center_coord_text) coord.next_to(dot, RIGHT) coord.set_color(dot.get_color()) self.add(coord) def add_coord_system(self): c1, c2, c3, c4 = self.normal_form center_point = (c1.get_center() + c2.get_center()) / 2 unit_size = c1.get_height()/2 coord_system = Axes( center_point = center_point, number_line_config = {"unit_size" : unit_size}, y_min = -1.8, y_max = 2.8, ) self.add(coord_system) self.coord_system = coord_system def change_inv_labels(self): l1i, l2i, l3i, l4i = self.inv_labels for label, x_coord, coord_text in zip([l1i, l2i], [-1, 1], self.circle_center_coord_texts): center = self.coord_system.c2p(x_coord, 0) label.next_to(center, UP) dot_i = Dot(center, radius = 0.1).set_color(label.get_color()) coord_i = TexMobject(coord_text).set_color(label.get_color()).next_to(center, DOWN) self.add(dot_i, coord_i) llonianGasketScene): CONFIG = { "max_iter" : 8, "curvatures" : [2, 2, 3], "init_angle" : 0, "curv_thres" : 30000, "ag_config": { "agc_config" : { "radius_thres" : 1e-3, "circle_color" : BLUE, "label_color" : WHITE, }, }, "color_curr" : YELLOW, "wait_time" : 2, } def construct(self): r1, r2, r3 = [1./curv for curv in self.curvatures] p1, p2, p3 = calc_centers_by_radii(r1, r2, r3, init_angle = self.init_angle) agc1 = AGCircle(p1, r1, parents = None, **self.ag_config["agc_config"]) agc2 = AGCircle(p2, r2, parents = None, **self.ag_config["agc_config"]) agc3 = AGCircle(p3, r3, parents = None, **self.ag_config["agc_config"]) remark = TextMobject("(圆内数字为该圆的曲率)") remark.scale(0.75).to_corner(DL) self.add(remark) for k in range(self.max_iter): agcs_copy = [agc.deepcopy() for agc in (agc1, agc2, agc3)] ag = ApollonianGasket( *agcs_copy, num_iter = k, curv_thres = self.curv_thres, **self.ag_config ) iter_num = VGroup( TextMobject("迭代次数:"), TexMobject(f"{k}") ).arrange_submobjects(RIGHT).scale(1.5) iter_num.to_edge(LEFT, buff = 1) ag.scale(3.8) ag.shift(np.array([0, 3.8, 0]) - ag.get_top() + 3*RIGHT) VGroup(*ag.agc_list[-1]).set_color(self.color_curr) self.add(ag, iter_num) self.wait(self.wait_time) if k != self.max_iter-1: self.remove(ag, iter_num) class ApollonianGasketExample1(Scene): CONFIG = { "max_iter" : 20, "curvatures" : [3, 6, 7], "curvature_texts" : [-2, 3, 6, 7], "init_angle" : 0, "curv_thres" : 4000, "ag_config": { "agc_config" : { "radius_thres" : 1e-3, "circle_color" : BLUE, "label_color" : WHITE, }, }, "ag_scaling_factor" : 5.2, } def construct(self): r1, r2, r3 = [1./curv for curv in self.curvatures] p1, p2, p3 = calc_centers_by_radii(r1, r2, r3, init_angle = self.init_angle) agc1 = AGCircle(p1, r1, parents = None, **self.ag_config["agc_config"]) agc2 = AGCircle(p2, r2, parents = None, **self.ag_config["agc_config"]) agc3 = AGCircle(p3, r3, parents = None, **self.ag_config["agc_config"]) ag_seed = ApollonianGasket( *[agc.deepcopy() for agc in (agc1, agc2, agc3)], num_iter = 0, curv_thres = self.curv_thres, **self.ag_config ) ag_result = ApollonianGasket( *[agc.deepcopy() for agc in (agc1, agc2, agc3)], num_iter = self.max_iter, curv_thres = self.curv_thres, **self.ag_config ) ag_seed_center = ag_seed[0][0].get_right() ag_result_center = ag_result[0][0].get_right() arrow = Arrow(LEFT, RIGHT) figure_group = VGroup(ag_seed, ag_result, arrow) for ag, center, direction in zip( [ag_seed, ag_result], [ag_seed_center, ag_result_center], [4*LEFT, 4*RIGHT]): ag.scale(self.ag_scaling_factor) ag.shift(direction - center) figure_group.shift(DOWN) k1, k2, k3, k4 = list(map(str, self.curvature_texts)) title = TexMobject( f"({k1}+{k2}+{k3}+{k4})^2 = 2\\left[({k1})^2+{k2}^2+{k3}^2+{k4}^2 \\right]" ) title.set_width(13) title.set_color(YELLOW) title.to_edge(UP) self.add(figure_group, title) self.wait() class ApollonianGasketExample2(ApollonianGasketExample1): CONFIG = { "max_iter" : 20, "curvatures" : [5, 8, 12], "curvature_texts" : [-3, 5, 8, 12], "curv_thres" : 5000, "ag_config": { "agc_config" : { "radius_thres" : 5e-4, "circle_color" : BLUE, "label_color" : WHITE, }, }, "ag_scaling_factor" : 8, } class LoxodromicSpiralInTangentCircles(Scene): CONFIG = { "max_iter" : 20, "agc_config" : { "radius_thres" : 1, "circle_color" : BLUE, "label_color" : WHITE, }, "curve_config" : { "color" : YELLOW, "stroke_width" : 2, }, "alpha" : 0.6, "dashed_line_config" : { "color" : GREY, "stroke_width" : 0.5, "num_dashes" : 200, "positive_space_ratio" : 0.6, } } def construct(self): self.generate_circles() self.generate_curves() self.generate_labels() self.generate_lines() self.add_elements() self.zooming_in() def generate_circles(self): agcm2 = AGCircle(2/3.*UP, 1/3., **self.agc_config) agcm1 = AGCircle(RIGHT/2, 1/2., **self.agc_config) agczr = AGCircle(ORIGIN, -1, **self.agc_config) agcp1 = AGCircle(LEFT/2, 1/2., **self.agc_config) agcp2 = AGCircle(2/3.*DOWN, 1/3., **self.agc_config) agc_list = [agcm2, agcm1, agczr, agcp1, agcp2] for n in range(self.max_iter): A, B, C, known_agc = agc_list[:4] agc_m_k, agc_m_c = calc_new_agc_info(A, B, C, known_agc = known_agc) agc_m = AGCircle(agc_m_c, 1./agc_m_k, parents = (A, B, C), **self.agc_config) known_agc, C, B, A = agc_list[-4:] agc_p_k, agc_p_c = calc_new_agc_info(C, B, A, known_agc = known_agc) agc_p = AGCircle(agc_p_c, 1./agc_p_k, parents = (C, B, A), **self.agc_config) agc_list.insert(0, agc_m) agc_list.append(agc_p) agc_group = VGroup(*agc_list) agc_group.set_height(7.8) self.agc_list = agc_list self.agc_group = agc_group def generate_curves(self): agc_ps = self.agc_list[-self.max_iter-4:] agc_ps_points = [] loxo_curve_p_solid = VMobject(**self.curve_config) for k in range(len(agc_ps)-2): if k != 0: c1, c2, c3 = agc_ps[k], agc_ps[k+1], agc_ps[k+2] pt1 = get_tangent_point(c1, c2) pt2 = get_tangent_point(c2, c3) p = c2.get_center() if k != 1: agc_ps_points.extend( [pt1, p*(1-self.alpha)+pt1*self.alpha, p*(1-self.alpha)+pt2*self.alpha, pt2] ) else: agc_ps_points.extend( [pt1, p*0.7+pt1*0.3, p*0.6+pt2*0.4, pt2] ) else: c1, c2 = agc_ps[1], agc_ps[2] pt = get_tangent_point(c1, c2) agc_ps_points.extend([8*LEFT, 7*LEFT, 6*LEFT, pt]) loxo_curve_p_solid.append_points(agc_ps_points) loxo_curve_m_solid = loxo_curve_p_solid.deepcopy() loxo_curve_m_solid.rotate(PI, about_point = self.agc_group.get_center()) self.loxo_curve_p_solid = loxo_curve_p_solid self.loxo_curve_m_solid = loxo_curve_m_solid def generate_labels(self): labels = VGroup(*[ TexMobject("C_{%d}" % num, background_stroke_width = 0) for num in range(-self.max_iter-2, self.max_iter+3) ]) for label, circle in zip(labels, self.agc_group): label.set_height(circle.get_height()*0.15) label.move_to(circle.get_center()) label_c0 = labels[self.max_iter+2] label_c0.set_height(0.8) label_c0.next_to(self.agc_group.get_critical_point(UL), DR, buff = 0.1) self.labels = labels def generate_lines(self): agc_ps = self.agc_list[-self.max_iter-2:] line_p_solid = VMobject(**self.dashed_line_config) line_p_solid_corners = [8*LEFT] for circle in agc_ps: line_p_solid_corners.append(circle.get_center()) line_p_solid.set_points_as_corners(line_p_solid_corners) line_m_solid = line_p_solid.deepcopy() line_m_solid.rotate(PI, about_point = self.agc_group.get_center()) self.line_p_solid = line_p_solid self.line_m_solid = line_m_solid def add_elements(self): figure = VGroup( self.agc_group, self.loxo_curve_p_solid, self.loxo_curve_m_solid, self.line_p_solid, self.line_m_solid, self.labels, ) self.add(figure) self.figure = figure def zooming_in(self): self.figure.save_state() self.wait(0.5) self.play( ApplyMethod(self.figure.shift, -self.agc_group[-1].get_center()), run_time = 2, ) self.wait() for k in range(10): self.play( ApplyMethod(self.figure.scale, 2.5, {"about_point" : self.agc_group[-1].get_center()}), run_time = 2, ) self.wait() self.play(self.figure.restore, run_time = 15) self.wait(2) class ShowFordCircles(ZoomInOnFordCircles): CONFIG = { "q_max" : 30, } def construct(self): self.setup_axes() self.setup_circles_and_labels() self.add_remarks() self.first_zoom_in() self.wait() def first_zoom_in(self): self.zoom_in_on(1/2., 6) def add_remarks(self): nl_text = TextMobject("数轴") nl_arrow = Arrow(ORIGIN, UP).match_height(nl_text) nl_remark = VGroup(nl_arrow, nl_text) nl_remark.scale(0.8) nl_remark.set_color(LIGHT_GREY) nl_remark.arrange_submobjects(RIGHT, buff = 0.1) nl_remark.next_to(self.axes.coords_to_point(0, 0), DOWN, buff = 0.1) nl_remark.to_edge(LEFT, buff = 0.15) frac_remark = TextMobject("圆内分数为圆心横坐标") frac_remark.scale(0.6) frac_remark.to_corner(DL, buff = 0.15) self.add(nl_remark, frac_remark) class ShowFordCirclesDetails(ShowFordCircles): CONFIG = { "q_max" : 100, } def construct(self): super().construct() self.further_zoom_in() def setup_circles_and_labels(self): circles = VGroup() labels = VGroup() for q in range(1, self.q_max+1): for p in get_coprime_numers_by_denom(q): if (q <= 40) or (0.6 <= p/q <= 0.8): circle = self.generate_circle_by_fraction(p, q) circle.add_updater( lambda m: m.set_stroke(width = get_stroke_width_by_height(m.get_height())) ) label = AssembledFraction(p, q) label.set_height(circle.get_height() * self.label_height_factor) label.move_to(circle.get_center()) circles.add(circle) labels.add(label) self.add(circles, labels) self.circles = circles self.labels = labels def further_zoom_in(self): self.acl = VGroup(self.axes, self.circles, self.labels) self.acl.save_state() self.wait(0.5) self.play_zooming_animation(1/np.sqrt(2), 9, run_time = 5) self.wait() self.play_zooming_animation(0.73, 5, run_time = 4) self.wait() self.play_zooming_animation(0.74, 5, run_time = 4) self.wait() self.play(self.acl.restore, run_time = 5) self.wait(2) class ProveFordCirclesPropertiesP1(Scene): CONFIG = { "c1_frac" : [2, 3], "c2_frac" : [3, 4], "c3_frac" : [5, 7], "circle_config" : {"stroke_color" : BLUE, "stroke_width" : 2,}, "line_config" : {"stroke_color" : GREY, "stroke_width" : 2,}, "aux_line_config" : {"stroke_color" : GREY, "stroke_width" : 0.8,}, "polygon_config" : {"fill_color" : GREY, "fill_opacity" : 0.4, "stroke_width" : 0,}, } def setup(self): a, b = self.c1_frac c, d = self.c2_frac p, q = self.c3_frac r1 = 1/(2*b**2) r2 = 1/(2*d**2) r3 = 1/(2*q**2) c1_center = a/b*RIGHT + r1*UP c2_center = c/d*RIGHT + r2*UP c3_center = p/q*RIGHT + r3*UP c1 = Circle(arc_center = c1_center, radius = r1, **self.circle_config) c2 = Circle(arc_center = c2_center, radius = r2, **self.circle_config) c3 = Circle(arc_center = c3_center, radius = r3, **self.circle_config) c1_dot = SmallDot(color = GREY) c1_dot.add_updater(lambda m: m.move_to(c1.get_center())) c2_dot = SmallDot(color = GREY) c2_dot.add_updater(lambda m: m.move_to(c2.get_center())) c3_dot = SmallDot(color = GREY) c3_dot.add_updater(lambda m: m.move_to(c3.get_center())) line = Line( 2*c1.get_bottom()-c2.get_bottom(), 2*c2.get_bottom()-c1.get_bottom(), **self.line_config ) VGroup(c1, c2, c3, line).set_height(6).center().to_edge(UP) aux_line_1 = Line(c1.get_center(), c1.get_bottom(), **self.aux_line_config) aux_line_2 = Line(c2.get_center(), c2.get_bottom(), **self.aux_line_config) aux_line_3 = Line(c1.get_center(), c2.get_center(), **self.aux_line_config) aux_line_4 = Line(c1.get_bottom(), c2.get_bottom(), **self.aux_line_config) \ .shift(c2.get_height()/2*UP) polygon = Polygon( c1.get_center(), c2.get_center(), aux_line_4.get_start_and_end()[0], **self.polygon_config, ) l1 = TexMobject("\\dfrac{a}{b}").next_to(c1, DOWN) l2 = TexMobject("\\dfrac{c}{d}").next_to(c2, DOWN) l3 = TexMobject("\\dfrac{a+c}{b+d}").next_to(c3, DOWN) self.orig_group = VGroup(c1, c2, line, c1_dot, c2_dot, l1, l2) self.aux_group = VGroup(aux_line_1, aux_line_2, aux_line_3, aux_line_4, polygon) self.new_group = VGroup(c3, c3_dot, l3) def construct(self): self.add(self.orig_group, self.aux_group) self.wait() class ProveFordCirclesPropertiesP2(ProveFordCirclesPropertiesP1): def construct(self): self.add(self.orig_group, self.new_group) self.wait() class ShowFordCirclesFareySum(ZoomInOnFordCircles): pass class DFCInversionProofP3(DFCInversionProofP2): CONFIG = { "remark_scale_text" : "示意图,反演圆未标出,且图像并非真实比例", "inv_label_texts" : ["C_1'", "C_2'", "C_3':\\mathrm{Im}(z)=-1", "C_4':\\mathrm{Im}(z)=1"], "inv_center_coord_text" : "z_0 = x_0+iy_0\\, (y_0>1)", "circle_center_coord_texts" : ["-1", "1"], } def construct(self): super().construct() self.wait() def add_coord_system(self): c1, c2, c3, c4 = self.normal_form center_point = (c1.get_center() + c2.get_center()) / 2 unit_size = c1.get_height()/2 coord_system = NumberPlane( center_point = center_point, number_line_config = {"unit_size" : unit_size}, y_min = -3, y_max = 3, background_line_style = { "stroke_color" : GREY, "stroke_width" : 1.5, "stroke_opacity" : 0.8, }, ) aux_coord_system = Axes( center_point = center_point, number_line_config = {"unit_size" : unit_size}, y_min = -3, y_max = 3, stroke_opacity = 0.8, ) self.add(coord_system, aux_coord_system) self.coord_system = coord_system class NormalFormIn3D(ThreeDScene): CONFIG = { "axis_unit_size" : 1.5, "axis_min" : -1.5, "axis_max" : 2.8, "resolution" : (60, 120), "plane_colors" : [GREEN, BLUE], "sphere_colors" : [MAROON_B, RED, PINK], } def construct(self): self.add_3d_stuff() self.add_2d_stuff() def add_3d_stuff(self): self.set_camera_orientation(theta = 70 * DEGREES, phi = 50 * DEGREES) axes = ThreeDAxes( x_min = self.axis_min, x_max = self.axis_max, y_min = self.axis_min, y_max = self.axis_max, z_min = self.axis_min, z_max = self.axis_max, number_line_config = {"unit_size" : self.axis_unit_size}, ) sphere_centers = [ axis.number_to_point(1) for axis in [axes.x_axis, axes.y_axis, axes.z_axis] ] radius = 1/np.sqrt(2) * self.axis_unit_size sphere_dots = VGroup(*[ Sphere( radius = 0.08, resolution = self.resolution, fill_opacity = 1, stroke_width = 0, ).move_to(sphere_center).set_color(color) for sphere_center, color in zip(sphere_centers, self.sphere_colors) ]) spheres = VGroup(*[ Sphere( radius = radius, resolution = self.resolution, fill_opacity = 0.6, stroke_width = 0.5, ).move_to(sphere_center).set_color(color) for sphere_center, color in zip(sphere_centers, self.sphere_colors) ]) planes = VGroup(*[ VGroup(*[ Square( side_length = 1, fill_opacity = fill_opacity, stroke_color = GREY, stroke_width = 0.3, stroke_opacity = 0.2, ) for k in range(n**2) ]).arrange_in_grid(n, n, buff = 0) \ .apply_matrix(z_to_vector([1, 1, 1])) \ .move_to(np.average(sphere_centers)) \ .shift(radius * normalize(direction)) \ .set_color(color) for n, fill_opacity, direction, color in zip( [7, 8], [0.2, 0.3], [np.ones(3), -np.ones(3)], self.plane_colors, ) ]) figure_group = VGroup(axes, planes, sphere_dots, spheres) figure_group.shift(RIGHT*2+0.5*OUT) self.add(figure_group) self.add(axes) self.add(planes) self.add(sphere_dots, spheres) def add_2d_stuff(self): sphere_remarks = VGroup(*[ TextMobject( "球:圆心为" + f"$({int(x)},{int(y)},{int(z)})$" + \ ",半径为" + "$\\dfrac{1}{\\sqrt{2}}$" ).set_color(color) for (x, y, z), color in zip([RIGHT, UP, OUT], self.sphere_colors) ]).arrange_submobjects(DOWN) plane_remarks = VGroup(*[ TexMobject( "\\text{平面:}" + "x+y+z=1" + sign + "\\dfrac{\\sqrt{3}}{\\sqrt{2}" ).set_color(color) for sign, color in zip(["+", "-"], self.plane_colors) ]).arrange_submobjects(DOWN) remarks = VGroup(sphere_remarks, plane_remarks) remarks.arrange_submobjects(DOWN, aligned_edge = LEFT) remarks.scale(0.8) remarks.to_corner(DR) self.add_fixed_in_frame_mobjects(remarks) self.wait() ##### ## Banner class Banner_Intro(Scene): CONFIG = { "circle_color" : YELLOW, "text_color" : BLUE, "inv_text_color" : BLUE, "circle_center" : 0.8*UP, "circle_radius" : 3, "grid_side_length" : 0.5, "x_range" : 300, "y_range" : 300, "dist_thres" : 300, } def construct(self): circle = Circle(color = self.circle_color, radius = self.circle_radius, stroke_width = 5) circle.move_to(self.circle_center) dot = SmallDot(self.circle_center, color = self.circle_color) text = TextMobject("Inversion", color = self.text_color, background_stroke_width = 3) text.rotate(PI/2.) text.move_to(0.4*RIGHT) text.apply_complex_function(np.exp) text.rotate(-PI/2.) text.scale(1.5) text.move_to(0.9*DOWN) inv_text = InversedVMobject(text, circle, use_dashed_vmob = False) inv_text.suspend_updating() inv_text.set_background_stroke(color = "#303030", width = 3) inv_text.set_stroke(width = 0) inv_text.set_fill(color = self.inv_text_color, opacity = 0.5) grid = VGroup(*[ Square( side_length = self.grid_side_length, stroke_width = 0, fill_opacity = 0.3, fill_color = CB_DARK if (i+j)%2==0 else CB_LIGHT ).move_to(self.circle_center + (i*RIGHT+j*UP)*self.grid_side_length) for i in range(-self.x_range, self.x_range+1, 1) for j in range(-self.y_range, self.y_range+1, 1) if np.sqrt(i**2+j**2) * self.grid_side_length < self.dist_thres ]) for square in grid: if is_close_in_R3(square.get_center(), self.circle_center): grid.remove(square) inv_grid = InversedVMobject(grid, circle, use_dashed_vmob = False) self.add(inv_grid, circle, dot, text, inv_text) self.wait() class Banner_AdvancedP1(ApollonianGasketScene): CONFIG = { "curvatures" : [570, 968, 1112], "init_angle" : PI/7, "num_iter" : 20, "curv_thres" : 1e6, "ag_config" : { "agc_config" : { "radius_thres" : 5e-6, "circle_color" : YELLOW, "label_color" : WHITE, }, }, "part_text" : "上篇", } def construct(self): super().construct() ag = self.ag ag.set_height(7) circle_myst = ag.agc_list[0][0] label_myst = circle_myst.label label_question = TexMobject("???") label_question.match_height(label_myst) label_question.move_to(label_myst) self.remove(label_myst) self.add(label_question) part = TextMobject(self.part_text) part.to_corner(DR) self.add(part) class Banner_AdvancedP2(Banner_AdvancedP1): CONFIG = { "part_text" : "下篇", }
true
true
f716c4d714c5addfce5044ca823eb0d4556612cc
518
py
Python
test/autest/gold_tests/smoke/smoke.test.py
SolidWallOfCode/txn-box
d92269be8bb8989bdaa96048757a01a0d3f6ba6d
[ "Apache-2.0" ]
7
2019-10-11T23:53:01.000Z
2021-09-15T01:56:50.000Z
test/autest/gold_tests/smoke/smoke.test.py
SolidWallOfCode/txn-box
d92269be8bb8989bdaa96048757a01a0d3f6ba6d
[ "Apache-2.0" ]
13
2019-08-07T16:03:51.000Z
2022-03-24T19:01:33.000Z
test/autest/gold_tests/smoke/smoke.test.py
SolidWallOfCode/txn-box
d92269be8bb8989bdaa96048757a01a0d3f6ba6d
[ "Apache-2.0" ]
5
2019-07-24T15:59:02.000Z
2021-06-23T10:02:47.000Z
# @file # # Copyright 2020, Verizon Media # SPDX-License-Identifier: Apache-2.0 # ''' Basic smoke tests. ''' Test.Summary = ''' Test basic functions and directives. ''' Test.TxnBoxTestAndRun("Smoke Test", "smoke.replay.yaml", config_path='Auto', config_key="meta.txn_box.global" ,remap=[('http://example.one/3', 'http://example.one/3', ('--key=meta.txn_box.remap-1', 'smoke.replay.yaml')) ,('http://example.one', 'http://example.one') ] )
28.777778
125
0.57722
Test.Summary = ''' Test basic functions and directives. ''' Test.TxnBoxTestAndRun("Smoke Test", "smoke.replay.yaml", config_path='Auto', config_key="meta.txn_box.global" ,remap=[('http://example.one/3', 'http://example.one/3', ('--key=meta.txn_box.remap-1', 'smoke.replay.yaml')) ,('http://example.one', 'http://example.one') ] )
true
true
f716c4e4e1ba4e92ac11bebaf53ac8c7e09eae5b
209
py
Python
treat/__init__.py
tjlaboss/tasty_treat
5a137b49c6648eda6500025de8bab9c8dcc78d45
[ "MIT" ]
3
2019-03-04T22:52:07.000Z
2022-01-23T12:28:58.000Z
treat/__init__.py
tjlaboss/tasty_treat
5a137b49c6648eda6500025de8bab9c8dcc78d45
[ "MIT" ]
3
2021-07-23T17:30:35.000Z
2021-09-17T16:25:57.000Z
treat/__init__.py
tjlaboss/tasty_treat
5a137b49c6648eda6500025de8bab9c8dcc78d45
[ "MIT" ]
null
null
null
from . import constants from . import argparse from . import elements from . import materials from . import mesh from . import moc from .treat_lattice import TreatLattice from .core_builder import CoreBuilder
23.222222
39
0.808612
from . import constants from . import argparse from . import elements from . import materials from . import mesh from . import moc from .treat_lattice import TreatLattice from .core_builder import CoreBuilder
true
true
f716c526012bd7a8136418de119de0fdce82deb1
2,884
py
Python
mswh/comm/tests/test_sql.py
hannesb0/MSWH
ce214f26369106c124052638e93cc38fbd58cc91
[ "BSD-3-Clause-LBNL" ]
5
2019-05-23T00:54:33.000Z
2021-06-01T18:06:49.000Z
mswh/comm/tests/test_sql.py
hannesb0/MSWH
ce214f26369106c124052638e93cc38fbd58cc91
[ "BSD-3-Clause-LBNL" ]
36
2019-05-22T23:02:35.000Z
2021-04-04T21:24:17.000Z
mswh/comm/tests/test_sql.py
hannesb0/MSWH
ce214f26369106c124052638e93cc38fbd58cc91
[ "BSD-3-Clause-LBNL" ]
14
2019-08-25T01:27:40.000Z
2021-11-17T19:25:02.000Z
import logging import os import unittest from mswh.comm.sql import Sql import pandas as pd logging.basicConfig(level=logging.DEBUG) # has setUpClass method, thus run the test on the entire class class SqlTests(unittest.TestCase): """Tests the db-python read-write capabilities.""" @classmethod def setUpClass(cls): """Initiates the sqlite db engine for the test db file. """ test_db_name = "test.db" test_db_fulpath = os.path.join(os.path.dirname(__file__), test_db_name) cls.test_db_fulpath = test_db_fulpath print(test_db_fulpath) # create test db if it does not exist if not os.path.exists(test_db_fulpath): os.system("touch " + test_db_fulpath) cls.sql_api = Sql(test_db_fulpath) # example dict to write to db cls.df = pd.DataFrame( data=[["a", 1], ["b", 2]], columns=["comp", "cost"] ) # example dict to write to db as table cls.dict = {"k1": [12, 13, 14], "k2": ["a", "b", "c"]} # example csv data cls.path_to_csv = os.path.join(os.path.dirname(__file__), "table.csv") # sql code to execute cls.raw_sql = """CREATE TABLE sys_components ( Component TEXT NOT NULL , Function TEXT NOT NULL , PRIMARY KEY (Component) );""" @classmethod def tearDownClass(cls): """Clean up for any reinitiation of the test, but keep the result. Any new run will overwrite the result. """ store_db_name = "test_done.db" # close the test db cls.sql_api.db.close() store_db_fulpath = os.path.join( os.path.dirname(__file__), store_db_name ) # rename file, overwrite if exists if os.path.exists(store_db_fulpath): os.remove(store_db_fulpath) os.rename(cls.test_db_fulpath, store_db_fulpath) def test_a_pd2table(self): """Tests write pandas dataframe to db as a table. """ self.sql_api.pd2table(self.df, "pd2table") def test_b_csv2table(self): """Tests write csv file to db as a table. """ self.sql_api.csv2table(self.path_to_csv, "csv2table") def test_c_table2pd(self): """Reads a single table from db as a pd.df""" df = self.sql_api.table2pd("pd2table") self.assertTrue((df == self.df).all().all()) def test_d_commit(self): """Use sql to write to db (e.g. create, alter)""" self.assertTrue(self.sql_api.commit(self.raw_sql)) def test_e_tables2dict(self): """Read all tables from db into a dictionary of dataframes. """ data = self.sql_api.tables2dict() self.assertEqual(data["pd2table"].iloc[1, 1], 2)
29.428571
80
0.587379
import logging import os import unittest from mswh.comm.sql import Sql import pandas as pd logging.basicConfig(level=logging.DEBUG) class SqlTests(unittest.TestCase): @classmethod def setUpClass(cls): test_db_name = "test.db" test_db_fulpath = os.path.join(os.path.dirname(__file__), test_db_name) cls.test_db_fulpath = test_db_fulpath print(test_db_fulpath) if not os.path.exists(test_db_fulpath): os.system("touch " + test_db_fulpath) cls.sql_api = Sql(test_db_fulpath) cls.df = pd.DataFrame( data=[["a", 1], ["b", 2]], columns=["comp", "cost"] ) cls.dict = {"k1": [12, 13, 14], "k2": ["a", "b", "c"]} cls.path_to_csv = os.path.join(os.path.dirname(__file__), "table.csv") cls.raw_sql = """CREATE TABLE sys_components ( Component TEXT NOT NULL , Function TEXT NOT NULL , PRIMARY KEY (Component) );""" @classmethod def tearDownClass(cls): store_db_name = "test_done.db" cls.sql_api.db.close() store_db_fulpath = os.path.join( os.path.dirname(__file__), store_db_name ) if os.path.exists(store_db_fulpath): os.remove(store_db_fulpath) os.rename(cls.test_db_fulpath, store_db_fulpath) def test_a_pd2table(self): self.sql_api.pd2table(self.df, "pd2table") def test_b_csv2table(self): self.sql_api.csv2table(self.path_to_csv, "csv2table") def test_c_table2pd(self): df = self.sql_api.table2pd("pd2table") self.assertTrue((df == self.df).all().all()) def test_d_commit(self): self.assertTrue(self.sql_api.commit(self.raw_sql)) def test_e_tables2dict(self): data = self.sql_api.tables2dict() self.assertEqual(data["pd2table"].iloc[1, 1], 2)
true
true
f716c53c15f21f36710f5ff9f2db87e1f34e3965
2,442
py
Python
2022-TR-Crystallography/Simulations-2.py
JMSkelton/Linkage-Isomer-JMAK-Kinetics
6ad7626ea9447855121a9b8ef6eb10efb93db300
[ "MIT" ]
null
null
null
2022-TR-Crystallography/Simulations-2.py
JMSkelton/Linkage-Isomer-JMAK-Kinetics
6ad7626ea9447855121a9b8ef6eb10efb93db300
[ "MIT" ]
null
null
null
2022-TR-Crystallography/Simulations-2.py
JMSkelton/Linkage-Isomer-JMAK-Kinetics
6ad7626ea9447855121a9b8ef6eb10efb93db300
[ "MIT" ]
null
null
null
# Simulations-2.py import glob import math import yaml from KineticAnalysis.NumericalSimulator import NumericalSimulator PPEqThreshold = 1.0e-4 if __name__ == "__main__": # Read fits to time-resolved datasets. tr_data_sets = { } for f in glob.glob(r"TimeResolved-*.yaml"): with open(f, 'rb') as input_file: input_yaml = yaml.load( input_file, Loader = yaml.CLoader ) tr_data_sets[input_yaml['label']] = ( input_yaml['t_cyc'], input_yaml['t_exc'], input_yaml['temp'], (input_yaml['data_t'], input_yaml['data_a']), (input_yaml['alpha_bg'], input_yaml['k_exc'], input_yaml['k_dec']), input_yaml['rms'] ) # For each dataset, determine the maximum excited-state population given the fitted k_exc/k_dec. for label, (t_cyc, t_exc, _, _, (_, k_exc, k_dec), _) in tr_data_sets.items(): simulator = NumericalSimulator( excN = 1.0, excK = k_exc, decN = 1.0, decK = k_dec ) # 1. Determine the excitation level achieved with the set t_cyc/t_exc. a_0, a_max = 0.0, 0.0 while True: simulator.InitialiseTrajectory(a_0) simulator.SetExcitation(True) simulator.RunTrajectory(t_exc) _, a_sim = simulator.GetTrajectory() a_max = a_sim[-1] simulator.SetExcitation(False) simulator.RunTrajectory(t_cyc - t_exc) _, a_sim = simulator.GetTrajectory() if math.fabs(a_sim[-1] - a_0) < PPEqThreshold: break a_0 = a_sim[-1] # 2. Determine the steady-state (maximum) excitation level. simulator.SetExcitation(True) simulator.InitialiseTrajectory(0.0) a_max_sim = 0.0 while True: simulator.RunTrajectory(1.0) _, a_sim = simulator.GetTrajectory() if math.fabs(a_sim[-1] - a_max_sim) < PPEqThreshold: break a_max_sim = a_sim[-1] print("{0}: a_max = {1:.3f}, theoretical = {2:.3f}".format(label, a_max, a_max_sim)) print("")
28.729412
100
0.514742
import glob import math import yaml from KineticAnalysis.NumericalSimulator import NumericalSimulator PPEqThreshold = 1.0e-4 if __name__ == "__main__": tr_data_sets = { } for f in glob.glob(r"TimeResolved-*.yaml"): with open(f, 'rb') as input_file: input_yaml = yaml.load( input_file, Loader = yaml.CLoader ) tr_data_sets[input_yaml['label']] = ( input_yaml['t_cyc'], input_yaml['t_exc'], input_yaml['temp'], (input_yaml['data_t'], input_yaml['data_a']), (input_yaml['alpha_bg'], input_yaml['k_exc'], input_yaml['k_dec']), input_yaml['rms'] ) for label, (t_cyc, t_exc, _, _, (_, k_exc, k_dec), _) in tr_data_sets.items(): simulator = NumericalSimulator( excN = 1.0, excK = k_exc, decN = 1.0, decK = k_dec ) a_0, a_max = 0.0, 0.0 while True: simulator.InitialiseTrajectory(a_0) simulator.SetExcitation(True) simulator.RunTrajectory(t_exc) _, a_sim = simulator.GetTrajectory() a_max = a_sim[-1] simulator.SetExcitation(False) simulator.RunTrajectory(t_cyc - t_exc) _, a_sim = simulator.GetTrajectory() if math.fabs(a_sim[-1] - a_0) < PPEqThreshold: break a_0 = a_sim[-1] simulator.SetExcitation(True) simulator.InitialiseTrajectory(0.0) a_max_sim = 0.0 while True: simulator.RunTrajectory(1.0) _, a_sim = simulator.GetTrajectory() if math.fabs(a_sim[-1] - a_max_sim) < PPEqThreshold: break a_max_sim = a_sim[-1] print("{0}: a_max = {1:.3f}, theoretical = {2:.3f}".format(label, a_max, a_max_sim)) print("")
true
true
f716c5621fcdf0c3aada36d496dec8b1fe35c8cc
9,045
py
Python
GUI-II/web_scrap.py
DulceWRLD/College
9b94868514f461c97121d72ea0855f72ca95e798
[ "MIT" ]
2
2021-08-21T01:25:50.000Z
2021-12-10T06:51:46.000Z
GUI-II/web_scrap.py
DulceWRLD/College
9b94868514f461c97121d72ea0855f72ca95e798
[ "MIT" ]
null
null
null
GUI-II/web_scrap.py
DulceWRLD/College
9b94868514f461c97121d72ea0855f72ca95e798
[ "MIT" ]
6
2021-03-14T22:21:23.000Z
2022-03-29T15:30:58.000Z
########################################################################################### # Created by Jason Downing # # Some code originally found at this Stackoverflow Post: # # https://stackoverflow.com/questions/18966368/python-beautifulsoup-scrape-tables # # Also this page as well: # # http://www.pythonforbeginners.com/python-on-the-web/web-scraping-with-beautifulsoup/ # # # # Copyright 2016 Jason Downing # # # MIT LICENSED - DO WHATEVER YOU WANT WITH THIS FILE. # ########################################################################################### # To setup urllib2 / bs4 (BeautifulSoup) # Follow this URL: http://linuxconfig.org/how-to-install-python3-beautiful-soup-environment-on-debian-linux # and run this command: pip install requests import json import requests from bs4 import BeautifulSoup # 6 URLs to scrap for lift / trail data. # Order is: Waterville Valley, Cannon Mt, Bretton Woods, Loon Mt & Cranmore Mt urls = ["http://www.waterville.com/ski-ride/snow-report.html", "http://cannonmt.com/trail-lift-report.html", "http://brettonwoods.com/alpine_trails/trail_report#top", "http://www.loonmtn.com/explore/snow-conditions/trail-lift-report", "http://www.cranmore.com/winter/snow-grooming-report", "http://www.patspeak.com/snow_report.php"] mountains = ["Waterville Valley", "Cannon Mt", "Bretton Woods", "Loon Mt", "Cranmore Mt", "Pats Peak"] # global JSON object to write only once. JSON_trails = {} # Waterville Valley def waterville(): print ("DONE\n") open_trails = [] closed_trails = [] # Get the page, then grab just the text and use BeautifulSoup to work some magic on it. page = requests.get(urls[0]) data = page.text soup = BeautifulSoup(data, "lxml") # Get an entire div. ski_data = soup.findAll('div', {'class' : 'tabset_content'}) # Let's get all open trails. for each_div in soup.findAll('li', {'class' : 'open'}): open_trails.append(each_div.text) # Also all closed trails. for each_div in soup.findAll('li', {'class' : 'closed'}): closed_trails.append(each_div.text) # Dump to trails object. JSON_trails['waterville_open'] = open_trails JSON_trails['waterville_closed'] = closed_trails # Cannon Mt def cannon(): print ("DONE\n") trail_list = [] trail_status = [] open_trails = [] closed_trails = [] # Get the page, then grab just the text and use BeautifulSoup to work some magic on it. page = requests.get(urls[1]) data = page.text soup = BeautifulSoup(data, "lxml") # Get lift status # From stackoverflow: # https://stackoverflow.com/questions/13074586/extracting-selected-columns-from-a-table-using-beautifulsoup tables = soup.find('table') # change this for consistent code. rows = tables.findAll('tr') for cells in rows: cell = cells.findAll('td') trail_list.append(cell[0].text) trail_status.append(cell[1].text) # Get trail status # THIS TRICK COMES FROM STACKOVERFLOW! # https://stackoverflow.com/questions/14095511/beautifulsoup-in-python-getting-the-n-th-tag-of-a-type tables = soup.findAll('table')[1] rows = tables.findAll('tr') for cells in rows: if (len(cells) == 4): cell = cells.findAll('td') trail_list.append(cell[0].text) trail_status.append(cell[1].text) # # Print for debugging purposes. # print ("Trails: \n") # for trail in trail_list: # print (trail) # print ("Status: \n") # for status in trail_status: # print (status) # Now let's figure out open / closed status for trails! list_length = len(trail_list) for a in range(list_length): if (trail_status[a] == 'Open'): open_trails.append(trail_list[a]) else: closed_trails.append(trail_list[a]) # Dump to trails object. JSON_trails['cannon_open'] = open_trails JSON_trails['cannon_closed'] = closed_trails # Bretton Woods def bretton_woods(): print ("DONE\n") trail_list = [] # List of all the trails, in order on the page. trail_status = [] # List of trail status, in order on the page. open_trails = [] # All the open trails or lifts closed_trails = [] # All the closed trails or lifts open_src = '/images/icons/open-sm.png' # Get the page, then grab just the text and use BeautifulSoup to work some magic on it. page = requests.get(urls[2]) data = page.text soup = BeautifulSoup(data, "lxml") # Get an entire div. ski_data = soup.findAll('div', {'id' : 'trail-content'}) # Using this Stackoverflow post to figure out how to get the text I need. # https://stackoverflow.com/questions/13202087/beautiful-soup-find-children-for-particular-div for tag in ski_data: tab = tag.findAll('div', {'class': 'trails-report'}) for tag2 in tab: trail_list.append(tag2.text) # This gets all the trails by name. # Now to get trail conditions tab = tag.findAll('div', {'class': 'condition'}) for img in tab: img_src = img.findAll('img')[0].get('src') # This gets the trail status (by image source) trail_status.append(img_src) # Now let's figure out open / closed status for trails! list_length = len(trail_list) for a in range(list_length): if (trail_status[a] == open_src): open_trails.append(trail_list[a]) else: closed_trails.append(trail_list[a]) # Dump to trails object. JSON_trails['bretton_woods_open'] = open_trails JSON_trails['bretton_woods_closed'] = closed_trails # Loon Mt def loon(): trail_list = [] # List of all the trails, in order on the page. trail_status = [] # List of trail status, in order on the page. open_trails = [] # All the open trails or lifts closed_trails = [] # All the closed trails or lifts # Get the page, then grab just the text and use BeautifulSoup to work some magic on it. page = requests.get(urls[3]) data = page.text soup = BeautifulSoup(data, "lxml") lifts = soup.findAll("table", {"class": "lift-status"}) titles_html = [] open_src = "/assets/prebuilt/img/template/small-green-checkmark.png" closed_src = "/assets/prebuilt/img/template/small-red-x.png" # Get all the td's on the page so we can go through and find the names / trail status. for td in soup.findAll("td"): titles_html += td # Let's get all the img's so we can find open / closed trails and lifts. for lift in lifts: # Get all the trail names. for td in lift.findAll('td'): #print (td.getText()) trail_list.append(td.getText().strip()) # Get all the trail status' img_src = lift.findAll('img') # Get all img's. # See if we found an image if len(img_src): # We did, so only keep the relevant images. if (img_src[0].get('src') == open_src or img_src[0].get('src') == closed_src): # Append to our trail status list. trail_status.append(img_src) # See what list of trails we got. # for trail in trail_list: # print (trail) # See what list of status we got. for status in trail_status: print (status) print ("length of names: ") print (len(trail_list)) print ("length of status: ") print (len(trail_status)) # Now that we have a list of status and trails, let's put them together. list_length = len(trail_list) for a in range(list_length): if (trail_status[a] == open_src): open_trails.append(trail_list[a]) else: closed_trails.append(trail_list[a]) # Dump to trails object. JSON_trails['loon_open'] = open_trails JSON_trails['loon_closed'] = closed_trails # Cranmore Mt def cranmore(): print ("NOT DONE.\n") open_trails = [] closed_trails = [] # Dump to trails object. JSON_trails['cranmore_open'] = open_trails JSON_trails['cranmore_closed'] = closed_trails def pats_peak(): print ("NOT DONE.\n") open_trails = [] closed_trails = [] # Dump to trails object. JSON_trails['pats_peak_open'] = open_trails JSON_trails['pats_peak_closed'] = closed_trails # Main loop for data gathering for num in range(0, len(urls)): print (mountains[num] + " lift / trail conditions") print ("Current URL to check: " + urls[num] + "\n") if (num == 0): print ("Hello.") #waterville() if (num == 1): print ("Hello.") #cannon() if (num == 2): print ("Hello.") #bretton_woods() if (num == 3): loon() if (num == 4): print ("Hello.") #cranmore() if (num == 5): print ("Hello.") #pats_peak() # Dump to JSON file now. # Stackoverflow post this is from: # https://stackoverflow.com/questions/16267767/python-writing-json-to-file with open("json/ski.json", "w") as outfile: json.dump(JSON_trails, outfile, indent = 4)
31.297578
109
0.629519
true
true
f716c5911528e44ebe67f4a2d8cb1ca1d4e1243b
4,273
py
Python
codalab/rest/chats.py
millerjohnp/codalab-worksheets
d6fc37864e7a8966380fc9d73865b10e434d6678
[ "Apache-2.0" ]
null
null
null
codalab/rest/chats.py
millerjohnp/codalab-worksheets
d6fc37864e7a8966380fc9d73865b10e434d6678
[ "Apache-2.0" ]
null
null
null
codalab/rest/chats.py
millerjohnp/codalab-worksheets
d6fc37864e7a8966380fc9d73865b10e434d6678
[ "Apache-2.0" ]
1
2020-03-13T08:16:17.000Z
2020-03-13T08:16:17.000Z
""" Chatbox API """ import os from bottle import get, local, post, request import yaml from codalab.objects.chat_box_qa import ChatBoxQA from codalab.server.authenticated_plugin import AuthenticatedPlugin @get('/chats', apply=AuthenticatedPlugin()) def get_chat_box(): """ Return a list of chats that the current user has had """ query = {'user_id': request.user.user_id} return { 'chats': local.model.get_chat_log_info(query), 'root_user_id': local.model.root_user_id, 'system_user_id': local.model.system_user_id, } @post('/chats', apply=AuthenticatedPlugin()) def post_chat_box(): """ Add the chat to the log. Return an auto response, if the chat is directed to the system. Otherwise, return an updated chat list of the sender. """ recipient_user_id = request.POST.get('recipientUserId', None) message = request.POST.get('message', None) worksheet_uuid = request.POST.get('worksheetId', -1) bundle_uuid = request.POST.get('bundleId', -1) info = { 'sender_user_id': request.user.user_id, 'recipient_user_id': recipient_user_id, 'message': message, 'worksheet_uuid': worksheet_uuid, 'bundle_uuid': bundle_uuid, } chats = add_chat_log_info(info) return {'chats': chats} # @get('/faqs') def get_faq(): """ Return a list of FAQ items, each of the following format: '0': { 'question': 'how can I upload / add a bundle?' 'answer': { 'response': 'You can do cl upload or click Update Bundle.', 'command': 'cl upload <file_path>' } } Currently disabled. Needs further work. """ file_path = os.path.join( os.path.dirname(os.path.realpath(__file__)), '../objects/chat_box_qa.yaml' ) with open(file_path, 'r') as stream: content = yaml.safe_load(stream) return {'faq': content} def add_chat_log_info(query_info): """ Add the given chat into the database. |query_info| encapsulates all the information of one chat Example: query_info = { 'sender_user_id': 1, 'recipient_user_id': 2, 'message': 'Hello this is my message', 'worksheet_uuid': 0x508cf51e546742beba97ed9a69329838, // the worksheet the user is browsing when he/she sends this message 'bundle_uuid': 0x8e66b11ecbda42e2a1f544627acf1418, // the bundle the user is browsing when he/she sends this message } Return an auto response, if the chat is directed to the system. Otherwise, return an updated chat list of the sender. """ updated_data = local.model.add_chat_log_info(query_info) if query_info.get('recipient_user_id') != local.model.system_user_id: return updated_data else: message = query_info.get('message') worksheet_uuid = query_info.get('worksheet_uuid') bundle_uuid = query_info.get('bundle_uuid') bot_response = format_message_response( ChatBoxQA.answer(message, worksheet_uuid, bundle_uuid) ) info = { 'sender_user_id': local.model.system_user_id, 'recipient_user_id': request.user.user_id, 'message': bot_response, 'worksheet_uuid': worksheet_uuid, 'bundle_uuid': bundle_uuid, } local.model.add_chat_log_info(info) return bot_response def format_message_response(params): """ Format automatic response |params| is None if the system can't process the user's message or is not confident enough to give a response. Otherwise, |params| is a triple that consists of the question that the system is trying to answer, the response it has for that question, and the recommended command to run. Return the automatic response that will be sent back to the user's chat box. """ if params is None: return 'Thank you for your question. Our staff will get back to you as soon as we can.' else: question, response, command = params result = 'This is the question we are trying to answer: ' + question + '\n' result += response + '\n' result += 'You can try to run the following command: \n' result += command return result
34.739837
132
0.656681
import os from bottle import get, local, post, request import yaml from codalab.objects.chat_box_qa import ChatBoxQA from codalab.server.authenticated_plugin import AuthenticatedPlugin @get('/chats', apply=AuthenticatedPlugin()) def get_chat_box(): query = {'user_id': request.user.user_id} return { 'chats': local.model.get_chat_log_info(query), 'root_user_id': local.model.root_user_id, 'system_user_id': local.model.system_user_id, } @post('/chats', apply=AuthenticatedPlugin()) def post_chat_box(): recipient_user_id = request.POST.get('recipientUserId', None) message = request.POST.get('message', None) worksheet_uuid = request.POST.get('worksheetId', -1) bundle_uuid = request.POST.get('bundleId', -1) info = { 'sender_user_id': request.user.user_id, 'recipient_user_id': recipient_user_id, 'message': message, 'worksheet_uuid': worksheet_uuid, 'bundle_uuid': bundle_uuid, } chats = add_chat_log_info(info) return {'chats': chats} def get_faq(): file_path = os.path.join( os.path.dirname(os.path.realpath(__file__)), '../objects/chat_box_qa.yaml' ) with open(file_path, 'r') as stream: content = yaml.safe_load(stream) return {'faq': content} def add_chat_log_info(query_info): updated_data = local.model.add_chat_log_info(query_info) if query_info.get('recipient_user_id') != local.model.system_user_id: return updated_data else: message = query_info.get('message') worksheet_uuid = query_info.get('worksheet_uuid') bundle_uuid = query_info.get('bundle_uuid') bot_response = format_message_response( ChatBoxQA.answer(message, worksheet_uuid, bundle_uuid) ) info = { 'sender_user_id': local.model.system_user_id, 'recipient_user_id': request.user.user_id, 'message': bot_response, 'worksheet_uuid': worksheet_uuid, 'bundle_uuid': bundle_uuid, } local.model.add_chat_log_info(info) return bot_response def format_message_response(params): if params is None: return 'Thank you for your question. Our staff will get back to you as soon as we can.' else: question, response, command = params result = 'This is the question we are trying to answer: ' + question + '\n' result += response + '\n' result += 'You can try to run the following command: \n' result += command return result
true
true
f716c5deb4bfae59b106289a4f4dd8d17a07d48e
12,462
py
Python
testing/test_ldclient_evaluation.py
gangeli/python-server-sdk
3095315fd53c3bf723b0f16b0c18acadef4dfb3e
[ "Apache-2.0" ]
null
null
null
testing/test_ldclient_evaluation.py
gangeli/python-server-sdk
3095315fd53c3bf723b0f16b0c18acadef4dfb3e
[ "Apache-2.0" ]
null
null
null
testing/test_ldclient_evaluation.py
gangeli/python-server-sdk
3095315fd53c3bf723b0f16b0c18acadef4dfb3e
[ "Apache-2.0" ]
null
null
null
import pytest import json import time from ldclient.client import LDClient, Config from ldclient.feature_store import InMemoryFeatureStore from ldclient.flag import EvaluationDetail from ldclient.interfaces import FeatureStore from ldclient.versioned_data_kind import FEATURES from testing.stub_util import MockEventProcessor, MockUpdateProcessor from testing.test_ldclient import make_off_flag_with_value user = { 'key': 'userkey' } flag1 = { 'key': 'key1', 'version': 100, 'on': False, 'offVariation': 0, 'variations': [ 'value1' ], 'trackEvents': False } flag2 = { 'key': 'key2', 'version': 200, 'on': False, 'offVariation': 1, 'variations': [ 'x', 'value2' ], 'trackEvents': True, 'debugEventsUntilDate': 1000 } class ErroringFeatureStore(FeatureStore): def get(self, kind, key, callback=lambda x: x): raise NotImplementedError() def all(self, kind, callback=lambda x: x): raise NotImplementedError() def upsert(self, kind, item): pass def delete(self, key, version): pass def init(self, data): pass @property def initialized(self): return True def make_client(store): return LDClient(config=Config(sdk_key='SDK_KEY', base_uri='http://test', event_processor_class=MockEventProcessor, update_processor_class=MockUpdateProcessor, feature_store=store)) def get_log_lines(caplog, level): loglines = caplog.records if callable(loglines): # records() is a function in older versions of the caplog plugin loglines = loglines() return [line.message for line in loglines if line.levelname == level] def test_variation_for_existing_feature(): feature = make_off_flag_with_value('feature.key', 'value') store = InMemoryFeatureStore() store.init({FEATURES: {'feature.key': feature}}) client = make_client(store) assert 'value' == client.variation('feature.key', user, default='default') def test_variation_for_unknown_feature(): store = InMemoryFeatureStore() client = make_client(store) assert 'default' == client.variation('feature.key', user, default='default') def test_variation_when_user_is_none(): feature = make_off_flag_with_value('feature.key', 'value') store = InMemoryFeatureStore() store.init({FEATURES: {'feature.key': feature}}) client = make_client(store) assert 'default' == client.variation('feature.key', None, default='default') def test_variation_when_user_has_no_key(): feature = make_off_flag_with_value('feature.key', 'value') store = InMemoryFeatureStore() store.init({FEATURES: {'feature.key': feature}}) client = make_client(store) assert 'default' == client.variation('feature.key', { }, default='default') def test_variation_for_flag_that_evaluates_to_none(): empty_flag = { 'key': 'feature.key', 'on': False, 'offVariation': None } store = InMemoryFeatureStore() store.init({FEATURES: {'feature.key': empty_flag}}) client = make_client(store) assert 'default' == client.variation('feature.key', user, default='default') def test_variation_detail_for_existing_feature(): feature = make_off_flag_with_value('feature.key', 'value') store = InMemoryFeatureStore() store.init({FEATURES: {'feature.key': feature}}) client = make_client(store) expected = EvaluationDetail('value', 0, {'kind': 'OFF'}) assert expected == client.variation_detail('feature.key', user, default='default') def test_variation_detail_for_unknown_feature(): store = InMemoryFeatureStore() client = make_client(store) expected = EvaluationDetail('default', None, {'kind': 'ERROR', 'errorKind': 'FLAG_NOT_FOUND'}) assert expected == client.variation_detail('feature.key', user, default='default') def test_variation_detail_when_user_is_none(): feature = make_off_flag_with_value('feature.key', 'value') store = InMemoryFeatureStore() store.init({FEATURES: {'feature.key': feature}}) client = make_client(store) expected = EvaluationDetail('default', None, {'kind': 'ERROR', 'errorKind': 'USER_NOT_SPECIFIED'}) assert expected == client.variation_detail('feature.key', None, default='default') def test_variation_detail_when_user_has_no_key(): feature = make_off_flag_with_value('feature.key', 'value') store = InMemoryFeatureStore() store.init({FEATURES: {'feature.key': feature}}) client = make_client(store) expected = EvaluationDetail('default', None, {'kind': 'ERROR', 'errorKind': 'USER_NOT_SPECIFIED'}) assert expected == client.variation_detail('feature.key', { }, default='default') def test_variation_detail_for_flag_that_evaluates_to_none(): empty_flag = { 'key': 'feature.key', 'on': False, 'offVariation': None } store = InMemoryFeatureStore() store.init({FEATURES: {'feature.key': empty_flag}}) client = make_client(store) expected = EvaluationDetail('default', None, {'kind': 'OFF'}) actual = client.variation_detail('feature.key', user, default='default') assert expected == actual assert actual.is_default_value() == True def test_variation_when_feature_store_throws_error(caplog): store = ErroringFeatureStore() client = make_client(store) assert client.variation('feature.key', { "key": "user" }, default='default') == 'default' errlog = get_log_lines(caplog, 'ERROR') assert errlog == [ 'Unexpected error while retrieving feature flag "feature.key": NotImplementedError()' ] def test_variation_detail_when_feature_store_throws_error(caplog): store = ErroringFeatureStore() client = make_client(store) expected = EvaluationDetail('default', None, {'kind': 'ERROR', 'errorKind': 'EXCEPTION'}) actual = client.variation_detail('feature.key', { "key": "user" }, default='default') assert expected == actual assert actual.is_default_value() == True errlog = get_log_lines(caplog, 'ERROR') assert errlog == [ 'Unexpected error while retrieving feature flag "feature.key": NotImplementedError()' ] def test_all_flags_returns_values(): store = InMemoryFeatureStore() store.init({ FEATURES: { 'key1': flag1, 'key2': flag2 } }) client = make_client(store) result = client.all_flags(user) assert result == { 'key1': 'value1', 'key2': 'value2' } def test_all_flags_returns_none_if_user_is_none(): store = InMemoryFeatureStore() store.init({ FEATURES: { 'key1': flag1, 'key2': flag2 } }) client = make_client(store) result = client.all_flags(None) assert result is None def test_all_flags_returns_none_if_user_has_no_key(): store = InMemoryFeatureStore() store.init({ FEATURES: { 'key1': flag1, 'key2': flag2 } }) client = make_client(store) result = client.all_flags({ }) assert result is None def test_all_flags_returns_none_if_feature_store_throws_error(caplog): store = ErroringFeatureStore() client = make_client(store) assert client.all_flags({ "key": "user" }) is None errlog = get_log_lines(caplog, 'ERROR') assert errlog == [ 'Unable to read flags for all_flag_state: NotImplementedError()' ] def test_all_flags_state_returns_state(): store = InMemoryFeatureStore() store.init({ FEATURES: { 'key1': flag1, 'key2': flag2 } }) client = make_client(store) state = client.all_flags_state(user) assert state.valid == True result = state.to_json_dict() assert result == { 'key1': 'value1', 'key2': 'value2', '$flagsState': { 'key1': { 'variation': 0, 'version': 100 }, 'key2': { 'variation': 1, 'version': 200, 'trackEvents': True, 'debugEventsUntilDate': 1000 } }, '$valid': True } def test_all_flags_state_returns_state_with_reasons(): store = InMemoryFeatureStore() store.init({ FEATURES: { 'key1': flag1, 'key2': flag2 } }) client = make_client(store) state = client.all_flags_state(user, with_reasons=True) assert state.valid == True result = state.to_json_dict() assert result == { 'key1': 'value1', 'key2': 'value2', '$flagsState': { 'key1': { 'variation': 0, 'version': 100, 'reason': {'kind': 'OFF'} }, 'key2': { 'variation': 1, 'version': 200, 'trackEvents': True, 'debugEventsUntilDate': 1000, 'reason': {'kind': 'OFF'} } }, '$valid': True } def test_all_flags_state_can_be_filtered_for_client_side_flags(): flag1 = { 'key': 'server-side-1', 'on': False, 'offVariation': 0, 'variations': [ 'a' ], 'clientSide': False } flag2 = { 'key': 'server-side-2', 'on': False, 'offVariation': 0, 'variations': [ 'b' ], 'clientSide': False } flag3 = { 'key': 'client-side-1', 'on': False, 'offVariation': 0, 'variations': [ 'value1' ], 'clientSide': True } flag4 = { 'key': 'client-side-2', 'on': False, 'offVariation': 0, 'variations': [ 'value2' ], 'clientSide': True } store = InMemoryFeatureStore() store.init({ FEATURES: { flag1['key']: flag1, flag2['key']: flag2, flag3['key']: flag3, flag4['key']: flag4 } }) client = make_client(store) state = client.all_flags_state(user, client_side_only=True) assert state.valid == True values = state.to_values_map() assert values == { 'client-side-1': 'value1', 'client-side-2': 'value2' } def test_all_flags_state_can_omit_details_for_untracked_flags(): future_time = (time.time() * 1000) + 100000 flag1 = { 'key': 'key1', 'version': 100, 'on': False, 'offVariation': 0, 'variations': [ 'value1' ], 'trackEvents': False } flag2 = { 'key': 'key2', 'version': 200, 'on': False, 'offVariation': 1, 'variations': [ 'x', 'value2' ], 'trackEvents': True } flag3 = { 'key': 'key3', 'version': 300, 'on': False, 'offVariation': 1, 'variations': [ 'x', 'value3' ], 'debugEventsUntilDate': future_time } store = InMemoryFeatureStore() store.init({ FEATURES: { 'key1': flag1, 'key2': flag2, 'key3': flag3 } }) client = make_client(store) state = client.all_flags_state(user, with_reasons=True, details_only_for_tracked_flags=True) assert state.valid == True result = state.to_json_dict() assert result == { 'key1': 'value1', 'key2': 'value2', 'key3': 'value3', '$flagsState': { 'key1': { 'variation': 0 }, 'key2': { 'variation': 1, 'version': 200, 'trackEvents': True, 'reason': {'kind': 'OFF'} }, 'key3': { 'variation': 1, 'version': 300, 'debugEventsUntilDate': future_time, 'reason': {'kind': 'OFF'} } }, '$valid': True } def test_all_flags_state_returns_empty_state_if_user_is_none(): store = InMemoryFeatureStore() store.init({ FEATURES: { 'key1': flag1, 'key2': flag2 } }) client = make_client(store) state = client.all_flags_state(None) assert state.valid == False def test_all_flags_state_returns_empty_state_if_user_has_no_key(): store = InMemoryFeatureStore() store.init({ FEATURES: { 'key1': flag1, 'key2': flag2 } }) client = make_client(store) state = client.all_flags_state({ }) assert state.valid == False def test_all_flags_returns_empty_state_if_feature_store_throws_error(caplog): store = ErroringFeatureStore() client = make_client(store) state = client.all_flags_state({ "key": "user" }) assert state.valid == False errlog = get_log_lines(caplog, 'ERROR') assert errlog == [ 'Unable to read flags for all_flag_state: NotImplementedError()' ]
34.520776
116
0.620847
import pytest import json import time from ldclient.client import LDClient, Config from ldclient.feature_store import InMemoryFeatureStore from ldclient.flag import EvaluationDetail from ldclient.interfaces import FeatureStore from ldclient.versioned_data_kind import FEATURES from testing.stub_util import MockEventProcessor, MockUpdateProcessor from testing.test_ldclient import make_off_flag_with_value user = { 'key': 'userkey' } flag1 = { 'key': 'key1', 'version': 100, 'on': False, 'offVariation': 0, 'variations': [ 'value1' ], 'trackEvents': False } flag2 = { 'key': 'key2', 'version': 200, 'on': False, 'offVariation': 1, 'variations': [ 'x', 'value2' ], 'trackEvents': True, 'debugEventsUntilDate': 1000 } class ErroringFeatureStore(FeatureStore): def get(self, kind, key, callback=lambda x: x): raise NotImplementedError() def all(self, kind, callback=lambda x: x): raise NotImplementedError() def upsert(self, kind, item): pass def delete(self, key, version): pass def init(self, data): pass @property def initialized(self): return True def make_client(store): return LDClient(config=Config(sdk_key='SDK_KEY', base_uri='http://test', event_processor_class=MockEventProcessor, update_processor_class=MockUpdateProcessor, feature_store=store)) def get_log_lines(caplog, level): loglines = caplog.records if callable(loglines): loglines = loglines() return [line.message for line in loglines if line.levelname == level] def test_variation_for_existing_feature(): feature = make_off_flag_with_value('feature.key', 'value') store = InMemoryFeatureStore() store.init({FEATURES: {'feature.key': feature}}) client = make_client(store) assert 'value' == client.variation('feature.key', user, default='default') def test_variation_for_unknown_feature(): store = InMemoryFeatureStore() client = make_client(store) assert 'default' == client.variation('feature.key', user, default='default') def test_variation_when_user_is_none(): feature = make_off_flag_with_value('feature.key', 'value') store = InMemoryFeatureStore() store.init({FEATURES: {'feature.key': feature}}) client = make_client(store) assert 'default' == client.variation('feature.key', None, default='default') def test_variation_when_user_has_no_key(): feature = make_off_flag_with_value('feature.key', 'value') store = InMemoryFeatureStore() store.init({FEATURES: {'feature.key': feature}}) client = make_client(store) assert 'default' == client.variation('feature.key', { }, default='default') def test_variation_for_flag_that_evaluates_to_none(): empty_flag = { 'key': 'feature.key', 'on': False, 'offVariation': None } store = InMemoryFeatureStore() store.init({FEATURES: {'feature.key': empty_flag}}) client = make_client(store) assert 'default' == client.variation('feature.key', user, default='default') def test_variation_detail_for_existing_feature(): feature = make_off_flag_with_value('feature.key', 'value') store = InMemoryFeatureStore() store.init({FEATURES: {'feature.key': feature}}) client = make_client(store) expected = EvaluationDetail('value', 0, {'kind': 'OFF'}) assert expected == client.variation_detail('feature.key', user, default='default') def test_variation_detail_for_unknown_feature(): store = InMemoryFeatureStore() client = make_client(store) expected = EvaluationDetail('default', None, {'kind': 'ERROR', 'errorKind': 'FLAG_NOT_FOUND'}) assert expected == client.variation_detail('feature.key', user, default='default') def test_variation_detail_when_user_is_none(): feature = make_off_flag_with_value('feature.key', 'value') store = InMemoryFeatureStore() store.init({FEATURES: {'feature.key': feature}}) client = make_client(store) expected = EvaluationDetail('default', None, {'kind': 'ERROR', 'errorKind': 'USER_NOT_SPECIFIED'}) assert expected == client.variation_detail('feature.key', None, default='default') def test_variation_detail_when_user_has_no_key(): feature = make_off_flag_with_value('feature.key', 'value') store = InMemoryFeatureStore() store.init({FEATURES: {'feature.key': feature}}) client = make_client(store) expected = EvaluationDetail('default', None, {'kind': 'ERROR', 'errorKind': 'USER_NOT_SPECIFIED'}) assert expected == client.variation_detail('feature.key', { }, default='default') def test_variation_detail_for_flag_that_evaluates_to_none(): empty_flag = { 'key': 'feature.key', 'on': False, 'offVariation': None } store = InMemoryFeatureStore() store.init({FEATURES: {'feature.key': empty_flag}}) client = make_client(store) expected = EvaluationDetail('default', None, {'kind': 'OFF'}) actual = client.variation_detail('feature.key', user, default='default') assert expected == actual assert actual.is_default_value() == True def test_variation_when_feature_store_throws_error(caplog): store = ErroringFeatureStore() client = make_client(store) assert client.variation('feature.key', { "key": "user" }, default='default') == 'default' errlog = get_log_lines(caplog, 'ERROR') assert errlog == [ 'Unexpected error while retrieving feature flag "feature.key": NotImplementedError()' ] def test_variation_detail_when_feature_store_throws_error(caplog): store = ErroringFeatureStore() client = make_client(store) expected = EvaluationDetail('default', None, {'kind': 'ERROR', 'errorKind': 'EXCEPTION'}) actual = client.variation_detail('feature.key', { "key": "user" }, default='default') assert expected == actual assert actual.is_default_value() == True errlog = get_log_lines(caplog, 'ERROR') assert errlog == [ 'Unexpected error while retrieving feature flag "feature.key": NotImplementedError()' ] def test_all_flags_returns_values(): store = InMemoryFeatureStore() store.init({ FEATURES: { 'key1': flag1, 'key2': flag2 } }) client = make_client(store) result = client.all_flags(user) assert result == { 'key1': 'value1', 'key2': 'value2' } def test_all_flags_returns_none_if_user_is_none(): store = InMemoryFeatureStore() store.init({ FEATURES: { 'key1': flag1, 'key2': flag2 } }) client = make_client(store) result = client.all_flags(None) assert result is None def test_all_flags_returns_none_if_user_has_no_key(): store = InMemoryFeatureStore() store.init({ FEATURES: { 'key1': flag1, 'key2': flag2 } }) client = make_client(store) result = client.all_flags({ }) assert result is None def test_all_flags_returns_none_if_feature_store_throws_error(caplog): store = ErroringFeatureStore() client = make_client(store) assert client.all_flags({ "key": "user" }) is None errlog = get_log_lines(caplog, 'ERROR') assert errlog == [ 'Unable to read flags for all_flag_state: NotImplementedError()' ] def test_all_flags_state_returns_state(): store = InMemoryFeatureStore() store.init({ FEATURES: { 'key1': flag1, 'key2': flag2 } }) client = make_client(store) state = client.all_flags_state(user) assert state.valid == True result = state.to_json_dict() assert result == { 'key1': 'value1', 'key2': 'value2', '$flagsState': { 'key1': { 'variation': 0, 'version': 100 }, 'key2': { 'variation': 1, 'version': 200, 'trackEvents': True, 'debugEventsUntilDate': 1000 } }, '$valid': True } def test_all_flags_state_returns_state_with_reasons(): store = InMemoryFeatureStore() store.init({ FEATURES: { 'key1': flag1, 'key2': flag2 } }) client = make_client(store) state = client.all_flags_state(user, with_reasons=True) assert state.valid == True result = state.to_json_dict() assert result == { 'key1': 'value1', 'key2': 'value2', '$flagsState': { 'key1': { 'variation': 0, 'version': 100, 'reason': {'kind': 'OFF'} }, 'key2': { 'variation': 1, 'version': 200, 'trackEvents': True, 'debugEventsUntilDate': 1000, 'reason': {'kind': 'OFF'} } }, '$valid': True } def test_all_flags_state_can_be_filtered_for_client_side_flags(): flag1 = { 'key': 'server-side-1', 'on': False, 'offVariation': 0, 'variations': [ 'a' ], 'clientSide': False } flag2 = { 'key': 'server-side-2', 'on': False, 'offVariation': 0, 'variations': [ 'b' ], 'clientSide': False } flag3 = { 'key': 'client-side-1', 'on': False, 'offVariation': 0, 'variations': [ 'value1' ], 'clientSide': True } flag4 = { 'key': 'client-side-2', 'on': False, 'offVariation': 0, 'variations': [ 'value2' ], 'clientSide': True } store = InMemoryFeatureStore() store.init({ FEATURES: { flag1['key']: flag1, flag2['key']: flag2, flag3['key']: flag3, flag4['key']: flag4 } }) client = make_client(store) state = client.all_flags_state(user, client_side_only=True) assert state.valid == True values = state.to_values_map() assert values == { 'client-side-1': 'value1', 'client-side-2': 'value2' } def test_all_flags_state_can_omit_details_for_untracked_flags(): future_time = (time.time() * 1000) + 100000 flag1 = { 'key': 'key1', 'version': 100, 'on': False, 'offVariation': 0, 'variations': [ 'value1' ], 'trackEvents': False } flag2 = { 'key': 'key2', 'version': 200, 'on': False, 'offVariation': 1, 'variations': [ 'x', 'value2' ], 'trackEvents': True } flag3 = { 'key': 'key3', 'version': 300, 'on': False, 'offVariation': 1, 'variations': [ 'x', 'value3' ], 'debugEventsUntilDate': future_time } store = InMemoryFeatureStore() store.init({ FEATURES: { 'key1': flag1, 'key2': flag2, 'key3': flag3 } }) client = make_client(store) state = client.all_flags_state(user, with_reasons=True, details_only_for_tracked_flags=True) assert state.valid == True result = state.to_json_dict() assert result == { 'key1': 'value1', 'key2': 'value2', 'key3': 'value3', '$flagsState': { 'key1': { 'variation': 0 }, 'key2': { 'variation': 1, 'version': 200, 'trackEvents': True, 'reason': {'kind': 'OFF'} }, 'key3': { 'variation': 1, 'version': 300, 'debugEventsUntilDate': future_time, 'reason': {'kind': 'OFF'} } }, '$valid': True } def test_all_flags_state_returns_empty_state_if_user_is_none(): store = InMemoryFeatureStore() store.init({ FEATURES: { 'key1': flag1, 'key2': flag2 } }) client = make_client(store) state = client.all_flags_state(None) assert state.valid == False def test_all_flags_state_returns_empty_state_if_user_has_no_key(): store = InMemoryFeatureStore() store.init({ FEATURES: { 'key1': flag1, 'key2': flag2 } }) client = make_client(store) state = client.all_flags_state({ }) assert state.valid == False def test_all_flags_returns_empty_state_if_feature_store_throws_error(caplog): store = ErroringFeatureStore() client = make_client(store) state = client.all_flags_state({ "key": "user" }) assert state.valid == False errlog = get_log_lines(caplog, 'ERROR') assert errlog == [ 'Unable to read flags for all_flag_state: NotImplementedError()' ]
true
true
f716c6d4dbe952ad73298aca3af754a81ad523a5
1,666
py
Python
step6_orchestration/telegram_publisher/publisher.py
osmya/pydatanlp
4162221224d2ad67078949691b0cfc4222731acd
[ "Apache-2.0" ]
19
2019-07-01T13:31:45.000Z
2021-08-02T10:46:59.000Z
step6_orchestration/telegram_publisher/publisher.py
osmya/pydatanlp
4162221224d2ad67078949691b0cfc4222731acd
[ "Apache-2.0" ]
1
2019-07-11T15:15:18.000Z
2019-07-20T16:41:36.000Z
step6_orchestration/telegram_publisher/publisher.py
osmya/pydatanlp
4162221224d2ad67078949691b0cfc4222731acd
[ "Apache-2.0" ]
15
2019-07-11T21:26:06.000Z
2021-08-04T09:04:59.000Z
from aiogram import Bot, Dispatcher, executor, types from aiogram.utils.exceptions import CantParseEntities from dotenv import load_dotenv, find_dotenv from signal import signal, SIGINT from tqdm import tqdm from os import getenv import sys import fire import uvloop import redis load_dotenv(find_dotenv('.telegram')) uvloop.install() REDIS_HOST = getenv('REDIS_URL', 'localhost') channel_id = getenv('MY_TELEGRAM_NUMBER') async def push_update(content, bot): try: return await bot.send_message( channel_id, content, parse_mode='Markdown') except CantParseEntities: return await bot.send_message(channel_id, content) async def listen(source): r_conn = redis.Redis(REDIS_HOST) p = r_conn.pubsub(ignore_subscribe_messages=True) p.subscribe(source) for message in tqdm(p.listen()): yield message['data'] async def subscribe_and_listen(bot, channel_name='processed'): async for message in listen(channel_name): await push_update(message, bot) def main(): fire.Fire(TelegramPublisher) class TelegramPublisher: def publish(self, channel_name='processed'): signal(SIGINT, interrupt_handler) try: loop = uvloop.new_event_loop() bot = Bot(token=getenv('TELEGRAM_KEY'), loop=loop) task = loop.create_task(subscribe_and_listen(bot, channel_name)) loop.run_until_complete(task) finally: task.cancel() loop.run_until_complete(bot.close()) loop.close() def interrupt_handler(signal, frame): print('\nYou pressed Ctrl+C!') sys.exit(0) if __name__ == "__main__": main()
27.311475
76
0.697479
from aiogram import Bot, Dispatcher, executor, types from aiogram.utils.exceptions import CantParseEntities from dotenv import load_dotenv, find_dotenv from signal import signal, SIGINT from tqdm import tqdm from os import getenv import sys import fire import uvloop import redis load_dotenv(find_dotenv('.telegram')) uvloop.install() REDIS_HOST = getenv('REDIS_URL', 'localhost') channel_id = getenv('MY_TELEGRAM_NUMBER') async def push_update(content, bot): try: return await bot.send_message( channel_id, content, parse_mode='Markdown') except CantParseEntities: return await bot.send_message(channel_id, content) async def listen(source): r_conn = redis.Redis(REDIS_HOST) p = r_conn.pubsub(ignore_subscribe_messages=True) p.subscribe(source) for message in tqdm(p.listen()): yield message['data'] async def subscribe_and_listen(bot, channel_name='processed'): async for message in listen(channel_name): await push_update(message, bot) def main(): fire.Fire(TelegramPublisher) class TelegramPublisher: def publish(self, channel_name='processed'): signal(SIGINT, interrupt_handler) try: loop = uvloop.new_event_loop() bot = Bot(token=getenv('TELEGRAM_KEY'), loop=loop) task = loop.create_task(subscribe_and_listen(bot, channel_name)) loop.run_until_complete(task) finally: task.cancel() loop.run_until_complete(bot.close()) loop.close() def interrupt_handler(signal, frame): print('\nYou pressed Ctrl+C!') sys.exit(0) if __name__ == "__main__": main()
true
true
f716ca32bfbce92f904eda519d192105b6956caa
3,447
py
Python
cmdbox/scaffold_templates/migrations/0002_auto_20160404_2007.py
vitorfs/cmdbox
97806c02caf5947ec855286212e61db714e3fb02
[ "MIT" ]
1
2019-09-07T11:49:11.000Z
2019-09-07T11:49:11.000Z
cmdbox/scaffold_templates/migrations/0002_auto_20160404_2007.py
vitorfs/cmdbox
97806c02caf5947ec855286212e61db714e3fb02
[ "MIT" ]
null
null
null
cmdbox/scaffold_templates/migrations/0002_auto_20160404_2007.py
vitorfs/cmdbox
97806c02caf5947ec855286212e61db714e3fb02
[ "MIT" ]
2
2018-09-04T08:33:17.000Z
2020-09-18T20:26:46.000Z
# -*- coding: utf-8 -*- # Generated by Django 1.9.4 on 2016-04-04 20:07 from __future__ import unicode_literals from django.conf import settings from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ ('scaffold_templates', '0001_initial'), ] operations = [ migrations.CreateModel( name='File', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('name', models.CharField(max_length=255, verbose_name='name')), ('created_at', models.DateTimeField(auto_now_add=True, verbose_name='created at')), ('updated_at', models.DateTimeField(auto_now=True, verbose_name='updated at')), ('extension', models.CharField(blank=True, max_length=10, null=True, verbose_name='extension')), ('size', models.PositiveIntegerField(default=0, verbose_name='size')), ], options={ 'verbose_name': 'file', 'verbose_name_plural': 'files', }, ), migrations.CreateModel( name='Folder', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('name', models.CharField(max_length=255, verbose_name='name')), ('created_at', models.DateTimeField(auto_now_add=True, verbose_name='created at')), ('updated_at', models.DateTimeField(auto_now=True, verbose_name='updated at')), ], options={ 'verbose_name': 'folder', 'verbose_name_plural': 'folders', }, ), migrations.AlterModelOptions( name='scaffoldtemplate', options={'ordering': ('-updated_at',), 'verbose_name': 'scaffold template', 'verbose_name_plural': 'scaffold template'}, ), migrations.AlterField( model_name='scaffoldtemplate', name='user', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='scaffoldtemplates', to=settings.AUTH_USER_MODEL), ), migrations.AlterUniqueTogether( name='scaffoldtemplate', unique_together=set([('user', 'slug')]), ), migrations.AddField( model_name='folder', name='template', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='folders', to='scaffold_templates.ScaffoldTemplate'), ), migrations.AddField( model_name='file', name='folder', field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='files', to='scaffold_templates.Folder'), ), migrations.AddField( model_name='file', name='template', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='files', to='scaffold_templates.ScaffoldTemplate'), ), migrations.AlterUniqueTogether( name='folder', unique_together=set([('template', 'name')]), ), migrations.AlterUniqueTogether( name='file', unique_together=set([('template', 'name')]), ), ]
42.036585
158
0.59385
from __future__ import unicode_literals from django.conf import settings from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ ('scaffold_templates', '0001_initial'), ] operations = [ migrations.CreateModel( name='File', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('name', models.CharField(max_length=255, verbose_name='name')), ('created_at', models.DateTimeField(auto_now_add=True, verbose_name='created at')), ('updated_at', models.DateTimeField(auto_now=True, verbose_name='updated at')), ('extension', models.CharField(blank=True, max_length=10, null=True, verbose_name='extension')), ('size', models.PositiveIntegerField(default=0, verbose_name='size')), ], options={ 'verbose_name': 'file', 'verbose_name_plural': 'files', }, ), migrations.CreateModel( name='Folder', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('name', models.CharField(max_length=255, verbose_name='name')), ('created_at', models.DateTimeField(auto_now_add=True, verbose_name='created at')), ('updated_at', models.DateTimeField(auto_now=True, verbose_name='updated at')), ], options={ 'verbose_name': 'folder', 'verbose_name_plural': 'folders', }, ), migrations.AlterModelOptions( name='scaffoldtemplate', options={'ordering': ('-updated_at',), 'verbose_name': 'scaffold template', 'verbose_name_plural': 'scaffold template'}, ), migrations.AlterField( model_name='scaffoldtemplate', name='user', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='scaffoldtemplates', to=settings.AUTH_USER_MODEL), ), migrations.AlterUniqueTogether( name='scaffoldtemplate', unique_together=set([('user', 'slug')]), ), migrations.AddField( model_name='folder', name='template', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='folders', to='scaffold_templates.ScaffoldTemplate'), ), migrations.AddField( model_name='file', name='folder', field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='files', to='scaffold_templates.Folder'), ), migrations.AddField( model_name='file', name='template', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='files', to='scaffold_templates.ScaffoldTemplate'), ), migrations.AlterUniqueTogether( name='folder', unique_together=set([('template', 'name')]), ), migrations.AlterUniqueTogether( name='file', unique_together=set([('template', 'name')]), ), ]
true
true
f716cbce48a8f8203417dcba6fc313bd1d90bcd9
5,760
py
Python
test/visualization/test_visualization.py
chrhck/pyABC
731cfdec26bef3898bf6e244daa5c8f83f3fe19d
[ "BSD-3-Clause" ]
null
null
null
test/visualization/test_visualization.py
chrhck/pyABC
731cfdec26bef3898bf6e244daa5c8f83f3fe19d
[ "BSD-3-Clause" ]
null
null
null
test/visualization/test_visualization.py
chrhck/pyABC
731cfdec26bef3898bf6e244daa5c8f83f3fe19d
[ "BSD-3-Clause" ]
null
null
null
import pyabc import tempfile import pytest import os import numpy as np import pandas as pd import matplotlib.pyplot as plt # create and run some model def model(p): return {'ss0': p['p0'] + 0.1 * np.random.uniform(), 'ss1': p['p1'] + 0.1 * np.random.uniform()} p_true = {'p0': 3, 'p1': 4} observation = {'ss0': p_true['p0'], 'ss1': p_true['p1']} limits = {'p0': (0, 5), 'p1': (1, 8)} prior = pyabc.Distribution(**{ key: pyabc.RV('uniform', limits[key][0], limits[key][1] - limits[key][0]) for key in p_true.keys()}) db_path = "sqlite:///" \ + os.path.join(tempfile.gettempdir(), "test_visualize.db") distance = pyabc.PNormDistance(p=2) n_history = 2 sampler = pyabc.sampler.MulticoreEvalParallelSampler(n_procs=2) for _ in range(n_history): abc = pyabc.ABCSMC(model, prior, distance, 20, sampler=sampler) abc.new(db_path, observation) abc.run(minimum_epsilon=.1, max_nr_populations=3) histories = [] labels = [] for j in range(n_history): history = pyabc.History(db_path) history.id = j + 1 histories.append(history) labels.append("Some run " + str(j)) def test_epsilons(): pyabc.visualization.plot_epsilons(histories, labels) plt.close() def test_sample_numbers(): pyabc.visualization.plot_sample_numbers( histories, rotation=43, size=(5, 5)) _, ax = plt.subplots() pyabc.visualization.plot_sample_numbers(histories, labels, ax=ax) with pytest.raises(ValueError): pyabc.visualization.plot_sample_numbers(histories, [labels[0]]) plt.close() def test_sample_numbers_trajectory(): pyabc.visualization.plot_sample_numbers_trajectory( histories, labels, yscale='log', rotation=90) _, ax = plt.subplots() pyabc.visualization.plot_sample_numbers_trajectory( histories, labels, yscale='log10', size=(8, 8), ax=ax) plt.close() def test_acceptance_rates_trajectory(): pyabc.visualization.plot_acceptance_rates_trajectory( histories, labels, yscale='log', rotation=76) _, ax = plt.subplots() pyabc.visualization.plot_acceptance_rates_trajectory( histories, labels, yscale='log10', rotation=76, size=(10, 5), ax=ax) plt.close() def test_total_sample_numbers(): pyabc.visualization.plot_total_sample_numbers(histories) pyabc.visualization.plot_total_sample_numbers( histories, labels, yscale='log', size=(10, 5)) _, ax = plt.subplots() pyabc.visualization.plot_total_sample_numbers( histories, rotation=75, yscale='log10', ax=ax) plt.close() def test_effective_sample_sizes(): pyabc.visualization.plot_effective_sample_sizes( histories, labels, rotation=45, relative=True) plt.close() def test_histograms(): # 1d pyabc.visualization.plot_histogram_1d( histories[0], 'p0', bins=20, xmin=limits['p0'][0], xmax=limits['p0'][1], size=(5, 5), refval=p_true) # 2d pyabc.visualization.plot_histogram_2d(histories[0], 'p0', 'p1') pyabc.visualization.plot_histogram_2d( histories[0], 'p0', 'p1', xmin=limits['p0'][0], xmax=limits['p0'][1], ymin=limits['p1'][0], ymax=limits['p1'][1], size=(5, 6), refval=p_true) # matrix pyabc.visualization.plot_histogram_matrix( histories[0], bins=1000, size=(6, 7), refval=p_true) plt.close() def test_kdes(): history = histories[0] df, w = history.get_distribution(m=0, t=None) pyabc.visualization.plot_kde_1d( df, w, x='p0', xmin=limits['p0'][0], xmax=limits['p0'][1], label="PDF") pyabc.visualization.plot_kde_2d(df, w, x='p0', y='p1') pyabc.visualization.plot_kde_matrix(df, w) # also use the highlevel interfaces pyabc.visualization.plot_kde_1d_highlevel(history, x='p0', size=(4, 5), refval=p_true) pyabc.visualization.plot_kde_2d_highlevel(history, x='p0', y='p1', size=(7, 5), refval=p_true) pyabc.visualization.plot_kde_matrix_highlevel(history, height=27.43, refval=p_true) plt.close() def test_credible_intervals(): pyabc.visualization.plot_credible_intervals(histories[0]) pyabc.visualization.plot_credible_intervals( histories[0], levels=[0.2, 0.5, 0.9], show_kde_max_1d=True, show_kde_max=True, show_mean=True, refval=p_true) pyabc.visualization.plot_credible_intervals_for_time( histories, levels=[0.5, 0.99], show_kde_max_1d=True, show_kde_max=True, show_mean=True, refvals=p_true) plt.close() def test_model_probabilities(): pyabc.visualization.plot_model_probabilities(histories[0]) plt.close() def test_data_callback(): def plot_data(sum_stat, weight, ax, **kwargs): ax.plot(sum_stat['ss0'], alpha=weight, **kwargs) def plot_data_aggregated(sum_stats, weights, ax, **kwargs): data = np.array([sum_stat['ss0'] for sum_stat in sum_stats]) weights = np.array(weights).reshape((-1, 1)) mean = (data * weights).sum(axis=0) plot_data({'ss0': mean}, 1.0, ax) pyabc.visualization.plot_data_callback( histories[0], plot_data, plot_data_aggregated) def test_data_default(): obs_dict = {1: 0.7, 2: np.array([43, 423, 5.5]), 3: pd.DataFrame({'a': [1, 2], 'b': [4, 6]})} sim_dict = {1: 6.5, 2: np.array([32, 5, 6]), 3: pd.DataFrame({'a': [1.55, -0.1], 'b': [54, 6]})} pyabc.visualization.plot_data_default(obs_dict, sim_dict) for i in range(5): obs_dict[i] = i + 1 sim_dict[i] = i + 2 pyabc.visualization.plot_data_default(obs_dict, sim_dict) plt.close()
32.542373
79
0.64375
import pyabc import tempfile import pytest import os import numpy as np import pandas as pd import matplotlib.pyplot as plt def model(p): return {'ss0': p['p0'] + 0.1 * np.random.uniform(), 'ss1': p['p1'] + 0.1 * np.random.uniform()} p_true = {'p0': 3, 'p1': 4} observation = {'ss0': p_true['p0'], 'ss1': p_true['p1']} limits = {'p0': (0, 5), 'p1': (1, 8)} prior = pyabc.Distribution(**{ key: pyabc.RV('uniform', limits[key][0], limits[key][1] - limits[key][0]) for key in p_true.keys()}) db_path = "sqlite:///" \ + os.path.join(tempfile.gettempdir(), "test_visualize.db") distance = pyabc.PNormDistance(p=2) n_history = 2 sampler = pyabc.sampler.MulticoreEvalParallelSampler(n_procs=2) for _ in range(n_history): abc = pyabc.ABCSMC(model, prior, distance, 20, sampler=sampler) abc.new(db_path, observation) abc.run(minimum_epsilon=.1, max_nr_populations=3) histories = [] labels = [] for j in range(n_history): history = pyabc.History(db_path) history.id = j + 1 histories.append(history) labels.append("Some run " + str(j)) def test_epsilons(): pyabc.visualization.plot_epsilons(histories, labels) plt.close() def test_sample_numbers(): pyabc.visualization.plot_sample_numbers( histories, rotation=43, size=(5, 5)) _, ax = plt.subplots() pyabc.visualization.plot_sample_numbers(histories, labels, ax=ax) with pytest.raises(ValueError): pyabc.visualization.plot_sample_numbers(histories, [labels[0]]) plt.close() def test_sample_numbers_trajectory(): pyabc.visualization.plot_sample_numbers_trajectory( histories, labels, yscale='log', rotation=90) _, ax = plt.subplots() pyabc.visualization.plot_sample_numbers_trajectory( histories, labels, yscale='log10', size=(8, 8), ax=ax) plt.close() def test_acceptance_rates_trajectory(): pyabc.visualization.plot_acceptance_rates_trajectory( histories, labels, yscale='log', rotation=76) _, ax = plt.subplots() pyabc.visualization.plot_acceptance_rates_trajectory( histories, labels, yscale='log10', rotation=76, size=(10, 5), ax=ax) plt.close() def test_total_sample_numbers(): pyabc.visualization.plot_total_sample_numbers(histories) pyabc.visualization.plot_total_sample_numbers( histories, labels, yscale='log', size=(10, 5)) _, ax = plt.subplots() pyabc.visualization.plot_total_sample_numbers( histories, rotation=75, yscale='log10', ax=ax) plt.close() def test_effective_sample_sizes(): pyabc.visualization.plot_effective_sample_sizes( histories, labels, rotation=45, relative=True) plt.close() def test_histograms(): pyabc.visualization.plot_histogram_1d( histories[0], 'p0', bins=20, xmin=limits['p0'][0], xmax=limits['p0'][1], size=(5, 5), refval=p_true) pyabc.visualization.plot_histogram_2d(histories[0], 'p0', 'p1') pyabc.visualization.plot_histogram_2d( histories[0], 'p0', 'p1', xmin=limits['p0'][0], xmax=limits['p0'][1], ymin=limits['p1'][0], ymax=limits['p1'][1], size=(5, 6), refval=p_true) pyabc.visualization.plot_histogram_matrix( histories[0], bins=1000, size=(6, 7), refval=p_true) plt.close() def test_kdes(): history = histories[0] df, w = history.get_distribution(m=0, t=None) pyabc.visualization.plot_kde_1d( df, w, x='p0', xmin=limits['p0'][0], xmax=limits['p0'][1], label="PDF") pyabc.visualization.plot_kde_2d(df, w, x='p0', y='p1') pyabc.visualization.plot_kde_matrix(df, w) pyabc.visualization.plot_kde_1d_highlevel(history, x='p0', size=(4, 5), refval=p_true) pyabc.visualization.plot_kde_2d_highlevel(history, x='p0', y='p1', size=(7, 5), refval=p_true) pyabc.visualization.plot_kde_matrix_highlevel(history, height=27.43, refval=p_true) plt.close() def test_credible_intervals(): pyabc.visualization.plot_credible_intervals(histories[0]) pyabc.visualization.plot_credible_intervals( histories[0], levels=[0.2, 0.5, 0.9], show_kde_max_1d=True, show_kde_max=True, show_mean=True, refval=p_true) pyabc.visualization.plot_credible_intervals_for_time( histories, levels=[0.5, 0.99], show_kde_max_1d=True, show_kde_max=True, show_mean=True, refvals=p_true) plt.close() def test_model_probabilities(): pyabc.visualization.plot_model_probabilities(histories[0]) plt.close() def test_data_callback(): def plot_data(sum_stat, weight, ax, **kwargs): ax.plot(sum_stat['ss0'], alpha=weight, **kwargs) def plot_data_aggregated(sum_stats, weights, ax, **kwargs): data = np.array([sum_stat['ss0'] for sum_stat in sum_stats]) weights = np.array(weights).reshape((-1, 1)) mean = (data * weights).sum(axis=0) plot_data({'ss0': mean}, 1.0, ax) pyabc.visualization.plot_data_callback( histories[0], plot_data, plot_data_aggregated) def test_data_default(): obs_dict = {1: 0.7, 2: np.array([43, 423, 5.5]), 3: pd.DataFrame({'a': [1, 2], 'b': [4, 6]})} sim_dict = {1: 6.5, 2: np.array([32, 5, 6]), 3: pd.DataFrame({'a': [1.55, -0.1], 'b': [54, 6]})} pyabc.visualization.plot_data_default(obs_dict, sim_dict) for i in range(5): obs_dict[i] = i + 1 sim_dict[i] = i + 2 pyabc.visualization.plot_data_default(obs_dict, sim_dict) plt.close()
true
true
f716cd537ee2ce3b739c2b138de0ba36abc67390
8,949
py
Python
tools/vsnp/vsnp_statistics.py
supernord/tools-iuc
9a0c41967765d120a8fc519c0c7f09cbe3a6efbe
[ "MIT" ]
1
2019-07-05T13:19:51.000Z
2019-07-05T13:19:51.000Z
tools/vsnp/vsnp_statistics.py
mtekman/tools-iuc
95f1ae4ed1cdd56114df76d215f9e1ed549aa4c5
[ "MIT" ]
8
2019-05-27T20:54:44.000Z
2021-10-04T09:33:30.000Z
tools/vsnp/vsnp_statistics.py
mtekman/tools-iuc
95f1ae4ed1cdd56114df76d215f9e1ed549aa4c5
[ "MIT" ]
null
null
null
#!/usr/bin/env python import argparse import csv import gzip import os from functools import partial import numpy import pandas from Bio import SeqIO def nice_size(size): # Returns a readably formatted string with the size words = ['bytes', 'KB', 'MB', 'GB', 'TB', 'PB', 'EB'] prefix = '' try: size = float(size) if size < 0: size = abs(size) prefix = '-' except Exception: return '??? bytes' for ind, word in enumerate(words): step = 1024 ** (ind + 1) if step > size: size = size / float(1024 ** ind) if word == 'bytes': # No decimals for bytes return "%s%d bytes" % (prefix, size) return "%s%.1f %s" % (prefix, size, word) return '??? bytes' def output_statistics(fastq_files, idxstats_files, metrics_files, output_file, gzipped, dbkey): # Produce an Excel spreadsheet that # contains a row for each sample. columns = ['Reference', 'File Size', 'Mean Read Length', 'Mean Read Quality', 'Reads Passing Q30', 'Total Reads', 'All Mapped Reads', 'Unmapped Reads', 'Unmapped Reads Percentage of Total', 'Reference with Coverage', 'Average Depth of Coverage', 'Good SNP Count'] data_frames = [] for i, fastq_file in enumerate(fastq_files): idxstats_file = idxstats_files[i] metrics_file = metrics_files[i] file_name_base = os.path.basename(fastq_file) # Read fastq_file into a data frame. _open = partial(gzip.open, mode='rt') if gzipped else open with _open(fastq_file) as fh: identifiers = [] seqs = [] letter_annotations = [] for seq_record in SeqIO.parse(fh, "fastq"): identifiers.append(seq_record.id) seqs.append(seq_record.seq) letter_annotations.append(seq_record.letter_annotations["phred_quality"]) # Convert lists to Pandas series. s1 = pandas.Series(identifiers, name='id') s2 = pandas.Series(seqs, name='seq') # Gather Series into a data frame. fastq_df = pandas.DataFrame(dict(id=s1, seq=s2)).set_index(['id']) total_reads = int(len(fastq_df.index) / 4) current_sample_df = pandas.DataFrame(index=[file_name_base], columns=columns) # Reference current_sample_df.at[file_name_base, 'Reference'] = dbkey # File Size current_sample_df.at[file_name_base, 'File Size'] = nice_size(os.path.getsize(fastq_file)) # Mean Read Length sampling_size = 10000 if sampling_size > total_reads: sampling_size = total_reads fastq_df = fastq_df.iloc[3::4].sample(sampling_size) dict_mean = {} list_length = [] i = 0 for id, seq, in fastq_df.iterrows(): dict_mean[id] = numpy.mean(letter_annotations[i]) list_length.append(len(seq.array[0])) i += 1 current_sample_df.at[file_name_base, 'Mean Read Length'] = '%.1f' % numpy.mean(list_length) # Mean Read Quality df_mean = pandas.DataFrame.from_dict(dict_mean, orient='index', columns=['ave']) current_sample_df.at[file_name_base, 'Mean Read Quality'] = '%.1f' % df_mean['ave'].mean() # Reads Passing Q30 reads_gt_q30 = len(df_mean[df_mean['ave'] >= 30]) reads_passing_q30 = '{:10.2f}'.format(reads_gt_q30 / sampling_size) current_sample_df.at[file_name_base, 'Reads Passing Q30'] = reads_passing_q30 # Total Reads current_sample_df.at[file_name_base, 'Total Reads'] = total_reads # All Mapped Reads all_mapped_reads, unmapped_reads = process_idxstats_file(idxstats_file) current_sample_df.at[file_name_base, 'All Mapped Reads'] = all_mapped_reads # Unmapped Reads current_sample_df.at[file_name_base, 'Unmapped Reads'] = unmapped_reads # Unmapped Reads Percentage of Total if unmapped_reads > 0: unmapped_reads_percentage = '{:10.2f}'.format(unmapped_reads / total_reads) else: unmapped_reads_percentage = 0 current_sample_df.at[file_name_base, 'Unmapped Reads Percentage of Total'] = unmapped_reads_percentage # Reference with Coverage ref_with_coverage, avg_depth_of_coverage, good_snp_count = process_metrics_file(metrics_file) current_sample_df.at[file_name_base, 'Reference with Coverage'] = ref_with_coverage # Average Depth of Coverage current_sample_df.at[file_name_base, 'Average Depth of Coverage'] = avg_depth_of_coverage # Good SNP Count current_sample_df.at[file_name_base, 'Good SNP Count'] = good_snp_count data_frames.append(current_sample_df) output_df = pandas.concat(data_frames) output_df.to_csv(output_file, sep='\t', quoting=csv.QUOTE_NONE, escapechar='\\') def process_idxstats_file(idxstats_file): all_mapped_reads = 0 unmapped_reads = 0 with open(idxstats_file, "r") as fh: for i, line in enumerate(fh): line = line.rstrip('\r\n') items = line.split("\t") if i == 0: # NC_002945.4 4349904 213570 4047 all_mapped_reads = int(items[2]) elif i == 1: # * 0 0 82774 unmapped_reads = int(items[3]) return all_mapped_reads, unmapped_reads def process_metrics_file(metrics_file): ref_with_coverage = '0%' avg_depth_of_coverage = 0 good_snp_count = 0 with open(metrics_file, "r") as ifh: for i, line in enumerate(ifh): if i == 0: # Skip comments. continue line = line.rstrip('\r\n') items = line.split("\t") if i == 1: # MarkDuplicates 10.338671 98.74% ref_with_coverage = items[3] avg_depth_of_coverage = items[2] elif i == 2: # VCFfilter 611 good_snp_count = items[1] return ref_with_coverage, avg_depth_of_coverage, good_snp_count parser = argparse.ArgumentParser() parser.add_argument('--dbkey', action='store', dest='dbkey', help='Reference dbkey') parser.add_argument('--gzipped', action='store_true', dest='gzipped', required=False, default=False, help='Input files are gzipped') parser.add_argument('--input_idxstats_dir', action='store', dest='input_idxstats_dir', required=False, default=None, help='Samtools idxstats input directory') parser.add_argument('--input_metrics_dir', action='store', dest='input_metrics_dir', required=False, default=None, help='vSNP add zero coverage metrics input directory') parser.add_argument('--input_reads_dir', action='store', dest='input_reads_dir', required=False, default=None, help='Samples input directory') parser.add_argument('--list_paired', action='store_true', dest='list_paired', required=False, default=False, help='Input samples is a list of paired reads') parser.add_argument('--output', action='store', dest='output', help='Output Excel statistics file') parser.add_argument('--read1', action='store', dest='read1', help='Required: single read') parser.add_argument('--read2', action='store', dest='read2', required=False, default=None, help='Optional: paired read') parser.add_argument('--samtools_idxstats', action='store', dest='samtools_idxstats', help='Output of samtools_idxstats') parser.add_argument('--vsnp_azc', action='store', dest='vsnp_azc', help='Output of vsnp_add_zero_coverage') args = parser.parse_args() fastq_files = [] idxstats_files = [] metrics_files = [] # Accumulate inputs. if args.read1 is not None: # The inputs are not dataset collections, so # read1, read2 (possibly) and vsnp_azc will also # not be None. fastq_files.append(args.read1) idxstats_files.append(args.samtools_idxstats) metrics_files.append(args.vsnp_azc) if args.read2 is not None: fastq_files.append(args.read2) idxstats_files.append(args.samtools_idxstats) metrics_files.append(args.vsnp_azc) else: for file_name in sorted(os.listdir(args.input_reads_dir)): fastq_files.append(os.path.join(args.input_reads_dir, file_name)) for file_name in sorted(os.listdir(args.input_idxstats_dir)): idxstats_files.append(os.path.join(args.input_idxstats_dir, file_name)) if args.list_paired: # Add the idxstats file for reverse. idxstats_files.append(os.path.join(args.input_idxstats_dir, file_name)) for file_name in sorted(os.listdir(args.input_metrics_dir)): metrics_files.append(os.path.join(args.input_metrics_dir, file_name)) if args.list_paired: # Add the metrics file for reverse. metrics_files.append(os.path.join(args.input_metrics_dir, file_name)) output_statistics(fastq_files, idxstats_files, metrics_files, args.output, args.gzipped, args.dbkey)
46.128866
169
0.658956
import argparse import csv import gzip import os from functools import partial import numpy import pandas from Bio import SeqIO def nice_size(size): words = ['bytes', 'KB', 'MB', 'GB', 'TB', 'PB', 'EB'] prefix = '' try: size = float(size) if size < 0: size = abs(size) prefix = '-' except Exception: return '??? bytes' for ind, word in enumerate(words): step = 1024 ** (ind + 1) if step > size: size = size / float(1024 ** ind) if word == 'bytes': return "%s%d bytes" % (prefix, size) return "%s%.1f %s" % (prefix, size, word) return '??? bytes' def output_statistics(fastq_files, idxstats_files, metrics_files, output_file, gzipped, dbkey): columns = ['Reference', 'File Size', 'Mean Read Length', 'Mean Read Quality', 'Reads Passing Q30', 'Total Reads', 'All Mapped Reads', 'Unmapped Reads', 'Unmapped Reads Percentage of Total', 'Reference with Coverage', 'Average Depth of Coverage', 'Good SNP Count'] data_frames = [] for i, fastq_file in enumerate(fastq_files): idxstats_file = idxstats_files[i] metrics_file = metrics_files[i] file_name_base = os.path.basename(fastq_file) _open = partial(gzip.open, mode='rt') if gzipped else open with _open(fastq_file) as fh: identifiers = [] seqs = [] letter_annotations = [] for seq_record in SeqIO.parse(fh, "fastq"): identifiers.append(seq_record.id) seqs.append(seq_record.seq) letter_annotations.append(seq_record.letter_annotations["phred_quality"]) s1 = pandas.Series(identifiers, name='id') s2 = pandas.Series(seqs, name='seq') fastq_df = pandas.DataFrame(dict(id=s1, seq=s2)).set_index(['id']) total_reads = int(len(fastq_df.index) / 4) current_sample_df = pandas.DataFrame(index=[file_name_base], columns=columns) current_sample_df.at[file_name_base, 'Reference'] = dbkey current_sample_df.at[file_name_base, 'File Size'] = nice_size(os.path.getsize(fastq_file)) sampling_size = 10000 if sampling_size > total_reads: sampling_size = total_reads fastq_df = fastq_df.iloc[3::4].sample(sampling_size) dict_mean = {} list_length = [] i = 0 for id, seq, in fastq_df.iterrows(): dict_mean[id] = numpy.mean(letter_annotations[i]) list_length.append(len(seq.array[0])) i += 1 current_sample_df.at[file_name_base, 'Mean Read Length'] = '%.1f' % numpy.mean(list_length) df_mean = pandas.DataFrame.from_dict(dict_mean, orient='index', columns=['ave']) current_sample_df.at[file_name_base, 'Mean Read Quality'] = '%.1f' % df_mean['ave'].mean() reads_gt_q30 = len(df_mean[df_mean['ave'] >= 30]) reads_passing_q30 = '{:10.2f}'.format(reads_gt_q30 / sampling_size) current_sample_df.at[file_name_base, 'Reads Passing Q30'] = reads_passing_q30 current_sample_df.at[file_name_base, 'Total Reads'] = total_reads all_mapped_reads, unmapped_reads = process_idxstats_file(idxstats_file) current_sample_df.at[file_name_base, 'All Mapped Reads'] = all_mapped_reads current_sample_df.at[file_name_base, 'Unmapped Reads'] = unmapped_reads if unmapped_reads > 0: unmapped_reads_percentage = '{:10.2f}'.format(unmapped_reads / total_reads) else: unmapped_reads_percentage = 0 current_sample_df.at[file_name_base, 'Unmapped Reads Percentage of Total'] = unmapped_reads_percentage ref_with_coverage, avg_depth_of_coverage, good_snp_count = process_metrics_file(metrics_file) current_sample_df.at[file_name_base, 'Reference with Coverage'] = ref_with_coverage current_sample_df.at[file_name_base, 'Average Depth of Coverage'] = avg_depth_of_coverage current_sample_df.at[file_name_base, 'Good SNP Count'] = good_snp_count data_frames.append(current_sample_df) output_df = pandas.concat(data_frames) output_df.to_csv(output_file, sep='\t', quoting=csv.QUOTE_NONE, escapechar='\\') def process_idxstats_file(idxstats_file): all_mapped_reads = 0 unmapped_reads = 0 with open(idxstats_file, "r") as fh: for i, line in enumerate(fh): line = line.rstrip('\r\n') items = line.split("\t") if i == 0: all_mapped_reads = int(items[2]) elif i == 1: unmapped_reads = int(items[3]) return all_mapped_reads, unmapped_reads def process_metrics_file(metrics_file): ref_with_coverage = '0%' avg_depth_of_coverage = 0 good_snp_count = 0 with open(metrics_file, "r") as ifh: for i, line in enumerate(ifh): if i == 0: continue line = line.rstrip('\r\n') items = line.split("\t") if i == 1: ref_with_coverage = items[3] avg_depth_of_coverage = items[2] elif i == 2: good_snp_count = items[1] return ref_with_coverage, avg_depth_of_coverage, good_snp_count parser = argparse.ArgumentParser() parser.add_argument('--dbkey', action='store', dest='dbkey', help='Reference dbkey') parser.add_argument('--gzipped', action='store_true', dest='gzipped', required=False, default=False, help='Input files are gzipped') parser.add_argument('--input_idxstats_dir', action='store', dest='input_idxstats_dir', required=False, default=None, help='Samtools idxstats input directory') parser.add_argument('--input_metrics_dir', action='store', dest='input_metrics_dir', required=False, default=None, help='vSNP add zero coverage metrics input directory') parser.add_argument('--input_reads_dir', action='store', dest='input_reads_dir', required=False, default=None, help='Samples input directory') parser.add_argument('--list_paired', action='store_true', dest='list_paired', required=False, default=False, help='Input samples is a list of paired reads') parser.add_argument('--output', action='store', dest='output', help='Output Excel statistics file') parser.add_argument('--read1', action='store', dest='read1', help='Required: single read') parser.add_argument('--read2', action='store', dest='read2', required=False, default=None, help='Optional: paired read') parser.add_argument('--samtools_idxstats', action='store', dest='samtools_idxstats', help='Output of samtools_idxstats') parser.add_argument('--vsnp_azc', action='store', dest='vsnp_azc', help='Output of vsnp_add_zero_coverage') args = parser.parse_args() fastq_files = [] idxstats_files = [] metrics_files = [] if args.read1 is not None: fastq_files.append(args.read1) idxstats_files.append(args.samtools_idxstats) metrics_files.append(args.vsnp_azc) if args.read2 is not None: fastq_files.append(args.read2) idxstats_files.append(args.samtools_idxstats) metrics_files.append(args.vsnp_azc) else: for file_name in sorted(os.listdir(args.input_reads_dir)): fastq_files.append(os.path.join(args.input_reads_dir, file_name)) for file_name in sorted(os.listdir(args.input_idxstats_dir)): idxstats_files.append(os.path.join(args.input_idxstats_dir, file_name)) if args.list_paired: idxstats_files.append(os.path.join(args.input_idxstats_dir, file_name)) for file_name in sorted(os.listdir(args.input_metrics_dir)): metrics_files.append(os.path.join(args.input_metrics_dir, file_name)) if args.list_paired: metrics_files.append(os.path.join(args.input_metrics_dir, file_name)) output_statistics(fastq_files, idxstats_files, metrics_files, args.output, args.gzipped, args.dbkey)
true
true
f716cd5d7134a3961ffe080fd5955660a50ac9e2
4,759
py
Python
augmented_seq2seq/datasets/friends/data.py
jsedoc/nn_chatbot
7b4406687bad2efa14658cb5aa137065cd325073
[ "MIT" ]
2
2017-07-22T15:34:02.000Z
2017-12-07T07:28:56.000Z
augmented_seq2seq/datasets/friends/data.py
jsedoc/nn_chatbot
7b4406687bad2efa14658cb5aa137065cd325073
[ "MIT" ]
null
null
null
augmented_seq2seq/datasets/friends/data.py
jsedoc/nn_chatbot
7b4406687bad2efa14658cb5aa137065cd325073
[ "MIT" ]
3
2017-06-09T10:30:22.000Z
2020-02-25T02:29:58.000Z
FILENAME = 'sequences_full.csv' VOCAB_SIZE = None UNK = 'UNK' POS_TAGS = { 'CC' : '<CC>', 'CD' : '<CD>', 'DT' : '<DT>', 'EX' : '<EX>', 'FW' : '<FW>', 'IN' : '<IN>', 'JJ' : '<JJ>', 'JJR' : '<JJR>', 'JJS' : '<JJS>', 'LS' : '<LS>', 'MD' : '<MD>', 'NN' : '<NN>', 'NNS' : '<NNS>', 'NNP' : '<NNP>', 'NNPS' : '<NNPS>', 'PDT' : '<PDT>', 'POS' : '<POS>', 'PRP' : '<PRP>', 'PRP' : '<PRP>', 'RB' : '<RB>', 'RBR' : '<RBR>', 'RBS' : '<RBS>', 'RP' : '<RP>', 'SYM' : '<SYM>', 'TO' : '<TO>', 'UH' : '<UH>', 'VB' : '<VB>', 'VBD' : '<VBD>', 'VBG' : '<VBG>', 'VBN' : '<VBN>', 'VBP' : '<VBP>', 'VBZ' : '<VBZ>', 'WDT' : '<WDT>', 'WP' : '<WP>', 'WP$' : '<WP$>', 'WRB' : '<WRB>' } # imports : in the order of usage import itertools import nltk import random import sys import pickle ''' read lines from file return [list of lines] ''' def read_lines(filename): return fix_win_encode(open(filename).read()).split('\n')[1:-1] def fix_win_encode(text): return text.replace('\x92', "'").replace('\x97', ' ').replace('\x91', '').replace('_b_','').replace('*','').replace('\x93','') ''' split each row of form "query |respect| response" to [ query, response, respect ] ''' def split_row(lines): q,r,respect = [], [], [] for line in lines: line = line.split('|') r.append(split_and_tag(line[0])) q.append(split_and_tag(line[-1])) respect.append(int(line[1])) return q,r,respect ''' split sentences into words and tags with nltk replace foreign words and numbers into <FW> and <CD> tags ''' def split_and_tag(line): wtags = nltk.pos_tag(nltk.word_tokenize(line.strip())) words = [] for w,t in wtags: if t == 'CD' or t == 'FW': w = t words.append(w) return words ''' read list of words, create index to word, word to index dictionaries return tuple( vocab->(word, count), idx2w, w2idx ) ''' def index_(tokenized_sentences, vocab_size): # get frequency distribution freq_dist = nltk.FreqDist(itertools.chain(*tokenized_sentences)) # get vocabulary of 'vocab_size' most used words vocab = freq_dist.most_common(vocab_size) vocab = [ item for item in vocab if item[1] > 1 ] # index2word index2word = ['_'] + ['UNK'] + list(POS_TAGS.keys()) + [ x[0] for x in vocab ] # word2index word2index = dict([(w,i) for i,w in enumerate(index2word)] ) return index2word, word2index, freq_dist ''' There will be no zero padding! ''' def encode(q, r, w2idx): # num of rows data_len = len(q) idx_q, idx_r = [], [] for i in range(data_len): idx_q.append(encode_seq(q[i], w2idx)) idx_r.append(encode_seq(r[i], w2idx)) return idx_q, idx_r ''' replace words with indices in a sequence replace with unknown if word not in lookup return [list of indices] ''' def encode_seq(seq, lookup): indices = [] for word in seq: if word in lookup: indices.append(lookup[word]) else: tag = nltk.pos_tag([word])[-1][-1] if tag in lookup: indices.append(lookup[tag]) else: indices.append(lookup[UNK]) return indices def process_data(): print('\n>> Read lines from file') lines = read_lines(filename=FILENAME) # change to lower case lines = [ line.lower() for line in lines ] print('>> [read_lines] {} lines;\nexamples\n{}'. format(len(lines), lines[121:125])) # split row into query, response and respect q, r, respect = split_row(lines) print('\n>> [split_row] \n{} {} {}'. format( q[121:125], r[121:125], respect[121:125])) ############# # NL pipeline #### ## # [1] Spell Check # # [2] POS tagging # indexing -> idx2w, w2idx : en/ta print('\n >> Index words') idx2w, w2idx, freq_dist = index_(q+r, vocab_size=None) idx_q, idx_r = encode(q, r, w2idx) data = { 'q' : idx_q, 'r' : idx_r, 'respect' : respect } # let us now save the necessary dictionaries metadata = { 'w2idx' : w2idx, 'idx2w' : idx2w, 'freq_dist' : freq_dist, 'respect_size' : max(respect) + 1 } # write to disk : data control dictionaries with open('metadata.pkl', 'wb') as f: pickle.dump(metadata, f) with open('data.pkl', 'wb') as f: pickle.dump(data, f) def load_data(PATH=''): # read data control dictionaries with open(PATH + 'metadata.pkl', 'rb') as f: metadata = pickle.load(f) with open(PATH + 'data.pkl', 'rb') as f: data = pickle.load(f) return data, metadata if __name__ == '__main__': process_data()
25.86413
595
0.548645
FILENAME = 'sequences_full.csv' VOCAB_SIZE = None UNK = 'UNK' POS_TAGS = { 'CC' : '<CC>', 'CD' : '<CD>', 'DT' : '<DT>', 'EX' : '<EX>', 'FW' : '<FW>', 'IN' : '<IN>', 'JJ' : '<JJ>', 'JJR' : '<JJR>', 'JJS' : '<JJS>', 'LS' : '<LS>', 'MD' : '<MD>', 'NN' : '<NN>', 'NNS' : '<NNS>', 'NNP' : '<NNP>', 'NNPS' : '<NNPS>', 'PDT' : '<PDT>', 'POS' : '<POS>', 'PRP' : '<PRP>', 'PRP' : '<PRP>', 'RB' : '<RB>', 'RBR' : '<RBR>', 'RBS' : '<RBS>', 'RP' : '<RP>', 'SYM' : '<SYM>', 'TO' : '<TO>', 'UH' : '<UH>', 'VB' : '<VB>', 'VBD' : '<VBD>', 'VBG' : '<VBG>', 'VBN' : '<VBN>', 'VBP' : '<VBP>', 'VBZ' : '<VBZ>', 'WDT' : '<WDT>', 'WP' : '<WP>', 'WP$' : '<WP$>', 'WRB' : '<WRB>' } import itertools import nltk import random import sys import pickle def read_lines(filename): return fix_win_encode(open(filename).read()).split('\n')[1:-1] def fix_win_encode(text): return text.replace('\x92', "'").replace('\x97', ' ').replace('\x91', '').replace('_b_','').replace('*','').replace('\x93','') def split_row(lines): q,r,respect = [], [], [] for line in lines: line = line.split('|') r.append(split_and_tag(line[0])) q.append(split_and_tag(line[-1])) respect.append(int(line[1])) return q,r,respect def split_and_tag(line): wtags = nltk.pos_tag(nltk.word_tokenize(line.strip())) words = [] for w,t in wtags: if t == 'CD' or t == 'FW': w = t words.append(w) return words def index_(tokenized_sentences, vocab_size): # get frequency distribution freq_dist = nltk.FreqDist(itertools.chain(*tokenized_sentences)) # get vocabulary of 'vocab_size' most used words vocab = freq_dist.most_common(vocab_size) vocab = [ item for item in vocab if item[1] > 1 ] # index2word index2word = ['_'] + ['UNK'] + list(POS_TAGS.keys()) + [ x[0] for x in vocab ] # word2index word2index = dict([(w,i) for i,w in enumerate(index2word)] ) return index2word, word2index, freq_dist def encode(q, r, w2idx): # num of rows data_len = len(q) idx_q, idx_r = [], [] for i in range(data_len): idx_q.append(encode_seq(q[i], w2idx)) idx_r.append(encode_seq(r[i], w2idx)) return idx_q, idx_r def encode_seq(seq, lookup): indices = [] for word in seq: if word in lookup: indices.append(lookup[word]) else: tag = nltk.pos_tag([word])[-1][-1] if tag in lookup: indices.append(lookup[tag]) else: indices.append(lookup[UNK]) return indices def process_data(): print('\n>> Read lines from file') lines = read_lines(filename=FILENAME) # change to lower case lines = [ line.lower() for line in lines ] print('>> [read_lines] {} lines;\nexamples\n{}'. format(len(lines), lines[121:125])) # split row into query, response and respect q, r, respect = split_row(lines) print('\n>> [split_row] \n{} {} {}'. format( q[121:125], r[121:125], respect[121:125])) ############# # NL pipeline #### ## # [1] Spell Check # # [2] POS tagging # indexing -> idx2w, w2idx : en/ta print('\n >> Index words') idx2w, w2idx, freq_dist = index_(q+r, vocab_size=None) idx_q, idx_r = encode(q, r, w2idx) data = { 'q' : idx_q, 'r' : idx_r, 'respect' : respect } # let us now save the necessary dictionaries metadata = { 'w2idx' : w2idx, 'idx2w' : idx2w, 'freq_dist' : freq_dist, 'respect_size' : max(respect) + 1 } # write to disk : data control dictionaries with open('metadata.pkl', 'wb') as f: pickle.dump(metadata, f) with open('data.pkl', 'wb') as f: pickle.dump(data, f) def load_data(PATH=''): # read data control dictionaries with open(PATH + 'metadata.pkl', 'rb') as f: metadata = pickle.load(f) with open(PATH + 'data.pkl', 'rb') as f: data = pickle.load(f) return data, metadata if __name__ == '__main__': process_data()
true
true
f716cd6b0558dfb762b9277089b16cb9576044b2
1,613
py
Python
venv/lib/python3.6/site-packages/xero_python/payrollau/models/timesheets.py
6enno/FarmXero
881b1e6648e927631b276e66a4c5287e4de2cbc1
[ "MIT" ]
null
null
null
venv/lib/python3.6/site-packages/xero_python/payrollau/models/timesheets.py
6enno/FarmXero
881b1e6648e927631b276e66a4c5287e4de2cbc1
[ "MIT" ]
null
null
null
venv/lib/python3.6/site-packages/xero_python/payrollau/models/timesheets.py
6enno/FarmXero
881b1e6648e927631b276e66a4c5287e4de2cbc1
[ "MIT" ]
null
null
null
# coding: utf-8 """ Xero Payroll AU This is the Xero Payroll API for orgs in Australia region. # noqa: E501 Contact: api@xero.com Generated by: https://openapi-generator.tech """ import re # noqa: F401 from xero_python.models import BaseModel class Timesheets(BaseModel): """NOTE: This class is auto generated by OpenAPI Generator. Ref: https://openapi-generator.tech Do not edit the class manually. """ """ Attributes: openapi_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ openapi_types = {"timesheets": "list[Timesheet]"} attribute_map = {"timesheets": "Timesheets"} def __init__(self, timesheets=None): # noqa: E501 """Timesheets - a model defined in OpenAPI""" # noqa: E501 self._timesheets = None self.discriminator = None if timesheets is not None: self.timesheets = timesheets @property def timesheets(self): """Gets the timesheets of this Timesheets. # noqa: E501 :return: The timesheets of this Timesheets. # noqa: E501 :rtype: list[Timesheet] """ return self._timesheets @timesheets.setter def timesheets(self, timesheets): """Sets the timesheets of this Timesheets. :param timesheets: The timesheets of this Timesheets. # noqa: E501 :type: list[Timesheet] """ self._timesheets = timesheets
24.815385
76
0.619343
import re from xero_python.models import BaseModel class Timesheets(BaseModel): openapi_types = {"timesheets": "list[Timesheet]"} attribute_map = {"timesheets": "Timesheets"} def __init__(self, timesheets=None): self._timesheets = None self.discriminator = None if timesheets is not None: self.timesheets = timesheets @property def timesheets(self): return self._timesheets @timesheets.setter def timesheets(self, timesheets): self._timesheets = timesheets
true
true
f716ce82d68d18fd209d91f8e1db052c5725c571
6,938
py
Python
assignment2/cs231n/optim.py
Abhijeet8901/CS231n
c8e715028b453899d5069cdb34faf3fc2959c270
[ "MIT" ]
null
null
null
assignment2/cs231n/optim.py
Abhijeet8901/CS231n
c8e715028b453899d5069cdb34faf3fc2959c270
[ "MIT" ]
null
null
null
assignment2/cs231n/optim.py
Abhijeet8901/CS231n
c8e715028b453899d5069cdb34faf3fc2959c270
[ "MIT" ]
null
null
null
import numpy as np """ This file implements various first-order update rules that are commonly used for training neural networks. Each update rule accepts current weights and the gradient of the loss with respect to those weights and produces the next set of weights. Each update rule has the same interface: def update(w, dw, config=None): Inputs: - w: A numpy array giving the current weights. - dw: A numpy array of the same shape as w giving the gradient of the loss with respect to w. - config: A dictionary containing hyperparameter values such as learning rate, momentum, etc. If the update rule requires caching values over many iterations, then config will also hold these cached values. Returns: - next_w: The next point after the update. - config: The config dictionary to be passed to the next iteration of the update rule. NOTE: For most update rules, the default learning rate will probably not perform well; however the default values of the other hyperparameters should work well for a variety of different problems. For efficiency, update rules may perform in-place updates, mutating w and setting next_w equal to w. """ def sgd(w, dw, config=None): """ Performs vanilla stochastic gradient descent. config format: - learning_rate: Scalar learning rate. """ if config is None: config = {} config.setdefault("learning_rate", 1e-2) w -= config["learning_rate"] * dw return w, config def sgd_momentum(w, dw, config=None): """ Performs stochastic gradient descent with momentum. config format: - learning_rate: Scalar learning rate. - momentum: Scalar between 0 and 1 giving the momentum value. Setting momentum = 0 reduces to sgd. - velocity: A numpy array of the same shape as w and dw used to store a moving average of the gradients. """ if config is None: config = {} config.setdefault("learning_rate", 1e-2) config.setdefault("momentum", 0.9) v = config.get("velocity", np.zeros_like(w)) next_w=None ########################################################################### # TODO: Implement the momentum update formula. Store the updated value in # # the next_w variable. You should also use and update the velocity v. # ########################################################################### # *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)***** v= config["momentum"]*v - config["learning_rate"]*dw next_w=w+v pass # *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)***** ########################################################################### # END OF YOUR CODE # ########################################################################### config["velocity"] = v return next_w, config def rmsprop(w, dw, config=None): """ Uses the RMSProp update rule, which uses a moving average of squared gradient values to set adaptive per-parameter learning rates. config format: - learning_rate: Scalar learning rate. - decay_rate: Scalar between 0 and 1 giving the decay rate for the squared gradient cache. - epsilon: Small scalar used for smoothing to avoid dividing by zero. - cache: Moving average of second moments of gradients. """ if config is None: config = {} config.setdefault("learning_rate", 1e-2) config.setdefault("decay_rate", 0.99) config.setdefault("epsilon", 1e-8) config.setdefault("cache", np.zeros_like(w)) next_w = None ########################################################################### # TODO: Implement the RMSprop update formula, storing the next value of w # # in the next_w variable. Don't forget to update cache value stored in # # config['cache']. # ########################################################################### # *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)***** cache=config["cache"] cache=config["decay_rate"]*cache + (1-config["decay_rate"])*dw**2 w+=(-config["learning_rate"]*dw)/(np.sqrt(cache)+config["epsilon"]) next_w=w config["cache"]=cache pass # *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)***** ########################################################################### # END OF YOUR CODE # ########################################################################### return next_w, config def adam(w, dw, config=None): """ Uses the Adam update rule, which incorporates moving averages of both the gradient and its square and a bias correction term. config format: - learning_rate: Scalar learning rate. - beta1: Decay rate for moving average of first moment of gradient. - beta2: Decay rate for moving average of second moment of gradient. - epsilon: Small scalar used for smoothing to avoid dividing by zero. - m: Moving average of gradient. - v: Moving average of squared gradient. - t: Iteration number. """ if config is None: config = {} config.setdefault("learning_rate", 1e-3) config.setdefault("beta1", 0.9) config.setdefault("beta2", 0.999) config.setdefault("epsilon", 1e-8) config.setdefault("m", np.zeros_like(w)) config.setdefault("v", np.zeros_like(w)) config.setdefault("t", 0) next_w = None ########################################################################### # TODO: Implement the Adam update formula, storing the next value of w in # # the next_w variable. Don't forget to update the m, v, and t variables # # stored in config. # # # # NOTE: In order to match the reference output, please modify t _before_ # # using it in any calculations. # ########################################################################### # *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)***** lr=config["learning_rate"] b1,b2,ep=config["beta1"],config["beta2"],config["epsilon"] m=config["m"] v=config["v"] t=config["t"] t+=1 m=b1*m+(1-b1)*dw mt=m/(1-b1**t) v=b2*v+(1-b2)*dw**2 vt=v/(1-b2**t) w-=(lr*mt)/(np.sqrt(vt)+ep) config["m"],config["v"],config["t"]=m,v,t next_w=w pass # *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)***** ########################################################################### # END OF YOUR CODE # ########################################################################### return next_w, config
38.544444
79
0.537763
import numpy as np def sgd(w, dw, config=None): if config is None: config = {} config.setdefault("learning_rate", 1e-2) w -= config["learning_rate"] * dw return w, config def sgd_momentum(w, dw, config=None): if config is None: config = {} config.setdefault("learning_rate", 1e-2) config.setdefault("momentum", 0.9) v = config.get("velocity", np.zeros_like(w)) next_w=None
true
true
f716cea90be05811860d24af0a9d540d7d2e2e6c
4,451
py
Python
code/distributeHI.py
modichirag/21cmhod
0807a7b0b880f4ba5bc7161b843d500ddcece5a7
[ "MIT" ]
null
null
null
code/distributeHI.py
modichirag/21cmhod
0807a7b0b880f4ba5bc7161b843d500ddcece5a7
[ "MIT" ]
null
null
null
code/distributeHI.py
modichirag/21cmhod
0807a7b0b880f4ba5bc7161b843d500ddcece5a7
[ "MIT" ]
null
null
null
import numpy as np import re, os from pmesh.pm import ParticleMesh from nbodykit.lab import BigFileCatalog, BigFileMesh, MultipleSpeciesCatalog, FFTPower from nbodykit import setup_logging from mpi4py import MPI import HImodels # enable logging, we have some clue what's going on. setup_logging('info') #Get model as parameter import argparse parser = argparse.ArgumentParser() parser.add_argument('-s', '--size', help='for small or big box', default='small') parser.add_argument('-m', '--model', help='model name to use') args = parser.parse_args() if args.model == None: print('Specify a model name') sys.exit() #print(args, args.model) model = args.model #'ModelD' boxsize = args.size # # #Global, fixed things scratchyf = '/global/cscratch1/sd/yfeng1/m3127/' scratchcm = '/global/cscratch1/sd/chmodi/m3127/H1mass/' project = '/project/projectdirs/m3127/H1mass/' cosmodef = {'omegam':0.309167, 'h':0.677, 'omegab':0.048} alist = [0.1429,0.1538,0.1667,0.1818,0.2000,0.2222,0.2500,0.2857,0.3333] #Parameters, box size, number of mesh cells, simulation, ... if boxsize == 'small': bs, nc, ncsim, sim, prefix = 256, 512, 2560, 'highres/%d-9100-fixed'%2560, 'highres' elif boxsize == 'big': bs, nc, ncsim, sim, prefix = 1024, 1024, 10240, 'highres/%d-9100-fixed'%10240, 'highres' else: print('Box size not understood, should be "big" or "small"') sys.exit() # It's useful to have my rank for printing... pm = ParticleMesh(BoxSize=bs, Nmesh=[nc, nc, nc]) rank = pm.comm.rank comm = pm.comm #Which model & configuration to use modeldict = {'ModelA':HImodels.ModelA, 'ModelB':HImodels.ModelB, 'ModelC':HImodels.ModelC} modedict = {'ModelA':'galaxies', 'ModelB':'galaxies', 'ModelC':'halos'} HImodel = modeldict[model] #HImodels.ModelB modelname = model mode = modedict[model] ofolder = '../data/outputs/' def distribution(aa, halocat, cencat, satcat, outfolder, mbins=None): '''Compute the fraction of HI in halos, centrals, satellites''' if rank==0: print('Calculating distribution') if mbins is None: mbins = np.logspace(9, 15, 100) hmass = halocat['Mass'].compute() htotal, hsize, h1total = [], [], [] for im in range(mbins.size-1): mask = (hmass >= mbins[im]) & (hmass < mbins[im+1]) rankweight = (hmass*mask).sum() htotal.append(comm.allreduce(rankweight)) rankweight = (mask).sum() hsize.append(comm.allreduce(rankweight)) h1bin = [] for cat in [halocat['HImass'], cencat['HImass'], cencat['HIsat']]: rankweight = (cat.compute()*mask).sum() h1bin.append(comm.allreduce(rankweight)) h1total.append(h1bin) # if rank==0: tosave = np.zeros((len(hsize), 5)) tosave[:, 1] = hsize tosave[:, 0] = htotal / (tosave[:, 1]) tosave[:, 2:] = h1total/ (tosave[:, 1].reshape(-1, 1)) tosave[np.isnan(tosave)] = 0 header = 'Halo Mass, Number Halos, HI halos, HI centrals, HI satellites' np.savetxt(outfolder + "HI_dist_{:6.4f}.txt".format(aa), tosave, fmt='%0.6e', header=header) if __name__=="__main__": if rank==0: print('Starting') suff='-m1_00p3mh-alpha-0p8-subvol' outfolder = ofolder + suff[1:] if bs == 1024: outfolder = outfolder + "-big" outfolder += "/%s/"%modelname if rank == 0: print(outfolder) #outfolder = ofolder + suff[1:] + "/%s/"%modelname try: os.makedirs(outfolder) except : pass for aa in alist: if rank == 0: print('\n ############## Redshift = %0.2f ############## \n'%(1/aa-1)) halocat = BigFileCatalog(scratchyf + sim+ '/fastpm_%0.4f//'%aa, dataset='LL-0.200') mp = halocat.attrs['MassTable'][1]*1e10## halocat['Mass'] = halocat['Length'].compute() * mp cencat = BigFileCatalog(scratchcm + sim+'/fastpm_%0.4f/cencat'%aa+suff) satcat = BigFileCatalog(scratchcm + sim+'/fastpm_%0.4f/satcat'%aa+suff) # HImodelz = HImodel(aa) halocat['HImass'], cencat['HImass'], satcat['HImass'] = HImodelz.assignHI(halocat, cencat, satcat) cencat['HIsat'] = HImodelz.getinsat(satcat['HImass'].compute(), satcat['GlobalID'].compute(), cencat.csize, cencat['Mass'].size, cencat.comm).local mbins = 10**np.arange(9, 15.1, 0.2) distribution(aa, halocat, cencat, satcat, outfolder, mbins=mbins)
34.238462
106
0.625927
import numpy as np import re, os from pmesh.pm import ParticleMesh from nbodykit.lab import BigFileCatalog, BigFileMesh, MultipleSpeciesCatalog, FFTPower from nbodykit import setup_logging from mpi4py import MPI import HImodels setup_logging('info') #Get model as parameter import argparse parser = argparse.ArgumentParser() parser.add_argument('-s', '--size', help='for small or big box', default='small') parser.add_argument('-m', '--model', help='model name to use') args = parser.parse_args() if args.model == None: print('Specify a model name') sys.exit() #print(args, args.model) model = args.model #'ModelD' boxsize = args.size # # #Global, fixed things scratchyf = '/global/cscratch1/sd/yfeng1/m3127/' scratchcm = '/global/cscratch1/sd/chmodi/m3127/H1mass/' project = '/project/projectdirs/m3127/H1mass/' cosmodef = {'omegam':0.309167, 'h':0.677, 'omegab':0.048} alist = [0.1429,0.1538,0.1667,0.1818,0.2000,0.2222,0.2500,0.2857,0.3333] #Parameters, box size, number of mesh cells, simulation, ... if boxsize == 'small': bs, nc, ncsim, sim, prefix = 256, 512, 2560, 'highres/%d-9100-fixed'%2560, 'highres' elif boxsize == 'big': bs, nc, ncsim, sim, prefix = 1024, 1024, 10240, 'highres/%d-9100-fixed'%10240, 'highres' else: print('Box size not understood, should be "big" or "small"') sys.exit() # It's useful to have my rank for printing... pm = ParticleMesh(BoxSize=bs, Nmesh=[nc, nc, nc]) rank = pm.comm.rank comm = pm.comm modeldict = {'ModelA':HImodels.ModelA, 'ModelB':HImodels.ModelB, 'ModelC':HImodels.ModelC} modedict = {'ModelA':'galaxies', 'ModelB':'galaxies', 'ModelC':'halos'} HImodel = modeldict[model] modelname = model mode = modedict[model] ofolder = '../data/outputs/' def distribution(aa, halocat, cencat, satcat, outfolder, mbins=None): if rank==0: print('Calculating distribution') if mbins is None: mbins = np.logspace(9, 15, 100) hmass = halocat['Mass'].compute() htotal, hsize, h1total = [], [], [] for im in range(mbins.size-1): mask = (hmass >= mbins[im]) & (hmass < mbins[im+1]) rankweight = (hmass*mask).sum() htotal.append(comm.allreduce(rankweight)) rankweight = (mask).sum() hsize.append(comm.allreduce(rankweight)) h1bin = [] for cat in [halocat['HImass'], cencat['HImass'], cencat['HIsat']]: rankweight = (cat.compute()*mask).sum() h1bin.append(comm.allreduce(rankweight)) h1total.append(h1bin) if rank==0: tosave = np.zeros((len(hsize), 5)) tosave[:, 1] = hsize tosave[:, 0] = htotal / (tosave[:, 1]) tosave[:, 2:] = h1total/ (tosave[:, 1].reshape(-1, 1)) tosave[np.isnan(tosave)] = 0 header = 'Halo Mass, Number Halos, HI halos, HI centrals, HI satellites' np.savetxt(outfolder + "HI_dist_{:6.4f}.txt".format(aa), tosave, fmt='%0.6e', header=header) if __name__=="__main__": if rank==0: print('Starting') suff='-m1_00p3mh-alpha-0p8-subvol' outfolder = ofolder + suff[1:] if bs == 1024: outfolder = outfolder + "-big" outfolder += "/%s/"%modelname if rank == 0: print(outfolder) try: os.makedirs(outfolder) except : pass for aa in alist: if rank == 0: print('\n ############## Redshift = %0.2f ############## \n'%(1/aa-1)) halocat = BigFileCatalog(scratchyf + sim+ '/fastpm_%0.4f//'%aa, dataset='LL-0.200') mp = halocat.attrs['MassTable'][1]*1e10 halocat['Mass'] = halocat['Length'].compute() * mp cencat = BigFileCatalog(scratchcm + sim+'/fastpm_%0.4f/cencat'%aa+suff) satcat = BigFileCatalog(scratchcm + sim+'/fastpm_%0.4f/satcat'%aa+suff) HImodelz = HImodel(aa) halocat['HImass'], cencat['HImass'], satcat['HImass'] = HImodelz.assignHI(halocat, cencat, satcat) cencat['HIsat'] = HImodelz.getinsat(satcat['HImass'].compute(), satcat['GlobalID'].compute(), cencat.csize, cencat['Mass'].size, cencat.comm).local mbins = 10**np.arange(9, 15.1, 0.2) distribution(aa, halocat, cencat, satcat, outfolder, mbins=mbins)
true
true
f716cec14ad98ba7140fbdbb91cb9e7cbc9274b0
11,751
py
Python
couplet_composer/project.py
anttikivi/couplet-composer
0f6aaf894b35ea60d8047c0072ec146d4e1d2a6f
[ "MIT" ]
null
null
null
couplet_composer/project.py
anttikivi/couplet-composer
0f6aaf894b35ea60d8047c0072ec146d4e1d2a6f
[ "MIT" ]
16
2020-10-29T17:31:47.000Z
2022-03-07T17:01:52.000Z
couplet_composer/project.py
anttikivi/couplet-composer
0f6aaf894b35ea60d8047c0072ec146d4e1d2a6f
[ "MIT" ]
null
null
null
# Copyright (c) 2020 Antti Kivi # Licensed under the MIT License """A module that contains the class that represents the project that the build script acts on. """ import importlib import json import logging import os from typing import Any, List from .support.system import System from .support import environment from .binary_dependency import BinaryDependency from .dependency import Dependency class Project: """A class that that represents the project that the build script acts on. Attributes: project_keys (list): The keys of the different subproject under this project. {key}_version (str): The version number of the subproject which has the key '{key}'. {key}_name (str): The name of the subproject which has the key '{key}'. gl_version (str): The target version number of OpenGL. dependencies (list): A list containing the representation objects of the dependencies of the project. cmake_options (dict): A dictionary of CMake options to pass to the build of the project. """ SHARED_VERSION_KEY = "shared_version" SHARED_USAGE_VALUE = "shared" VERSION_KEY = "version" OPENGL_KEY = "opengl" DEPENDENCIES_KEY = "dependencies" NAME_KEY = "name" COMMIT_KEY = "commit" MODULE_KEY = "module" MODULE_DEFAULT_VALUE = "default" CLASS_KEY = "className" FILES_KEY = "files" TEST_ONLY_KEY = "testOnly" BENCHMARK_ONLY_KEY = "benchmarkOnly" ASSET_KEY = "asset" REPOSITORY_KEY = "repository" TAG_PREFIX_KEY = "tagPrefix" CMAKE_OPTIONS_KEY = "cmakeOptions" BINARY_KEY = "binary" PLATFORMS_KEY = "platforms" def __init__( self, source_root: str, repo: str, script_package: str, platform: System ) -> None: """Initializes the project object. Arguments: source_root (str): The root directory of the invocation in which the project and the build files are. repo (str): The name of the repository directory of the project that is being built. script_package (str): The name of the root Python package of the build script. platform (System): The platform that the build script is invoked on. """ if not environment.is_path_source_root(path=source_root, repo=repo): logging.critical( "The root directory for the build script invocation is " "invalid: %s", source_root ) raise ValueError product_json = os.path.join(source_root, repo, "product.json") dependency_data = None try: with open(product_json) as f: json_data = json.load(f) self.project_keys = list() for key in json_data: logging.debug( "Checking if the key '%s' should be added to the " "project keys", key ) if key != self.DEPENDENCIES_KEY \ and key != self.OPENGL_KEY \ and key != self.SHARED_VERSION_KEY \ and key != self.CMAKE_OPTIONS_KEY: self.project_keys.append(key) logging.debug( "Added the key '%s' to the project keys", key ) for key in self.project_keys: logging.debug("Setting the project values for %s", key) setattr( self, "{}_version".format(key), self._get_version_from_project_data( data=json_data, key=key ) ) setattr( self, "{}_name".format(key), json_data[key][self.NAME_KEY] ) self.gl_version = json_data[self.OPENGL_KEY][self.VERSION_KEY] dependency_data = self._get_from_project_data( data=json_data, key=self.DEPENDENCIES_KEY ) self.cmake_options = json_data[self.CMAKE_OPTIONS_KEY] \ if self.CMAKE_OPTIONS_KEY in json_data else None except OSError: logging.critical( "The project value file wasn't found: %s", product_json ) if not dependency_data: raise ValueError self.dependencies: List[Dependency] = list() for key, value in dependency_data.items(): self.dependencies.append(self._create_dependency_object( key=key, data=value, root_package=script_package, platform=platform )) def _get_from_project_data(self, data: object, key: str) -> Any: """Reads and resolves the given entry from the data got from the project data JSON file. Args: data (Object): The data object read from the project data JSON file. key (str): The key for the data. Returns: The number, string, or object read from the project data JSON file. """ if key not in data: raise ValueError return data[key] def _get_version_from_project_data(self, data: object, key: str) -> str: """Reads and resolves the correct version from the data got from the project data JSON file. Args: data (Object): The data object read from the project data JSON file. key (str): The key for the project part that the version is resolved for. Returns: A 'str' that contains the resolved version number. """ shared = None if self.SHARED_VERSION_KEY not in data \ else data[self.SHARED_VERSION_KEY] if key not in data: raise ValueError key_data = data[key] if self.VERSION_KEY not in key_data: if shared: return shared else: raise ValueError elif key_data[self.VERSION_KEY] == self.SHARED_USAGE_VALUE: return shared else: return key_data[self.VERSION_KEY] def _create_dependency_object( self, key: str, data: dict, root_package: str, platform: System ) -> Dependency: """Creates the representation object of the given dependency by resolving the correct module and class to use. Args: key (str): The simple identifier of the dependency. data (dict): The dependency data for the given key read from the project data file. root_package (str): The name of the root Python package of the build script. platform (System): The platform that the build script is invoked on. Returns: The constructed dependency object. """ commit = None if self.COMMIT_KEY not in data else data[self.COMMIT_KEY] library_files = None if self.FILES_KEY not in data \ else data[self.FILES_KEY] platform_files = None if self.PLATFORMS_KEY in data \ and platform.name in data[self.PLATFORMS_KEY] \ and self.FILES_KEY in data[self.PLATFORMS_KEY][platform.name]: platform_data = data[self.PLATFORMS_KEY][platform.name] platform_files = platform_data[self.FILES_KEY] test_only = False if self.TEST_ONLY_KEY in data: test_only = data[self.TEST_ONLY_KEY] benchmark_only = False if self.BENCHMARK_ONLY_KEY in data: benchmark_only = data[self.BENCHMARK_ONLY_KEY] asset_name = None if self.ASSET_KEY in data: if isinstance(data[self.ASSET_KEY], dict): asset_name = data[self.ASSET_KEY][platform.value] else: asset_name = data[self.ASSET_KEY] repository = data[self.REPOSITORY_KEY] if self.REPOSITORY_KEY in data \ else None tag_prefix = data[self.TAG_PREFIX_KEY] if self.TAG_PREFIX_KEY in data \ else Dependency.DEFAULT_TAG_PREFIX cmake_options = data[self.CMAKE_OPTIONS_KEY] \ if self.CMAKE_OPTIONS_KEY in data else None needs_binary = data[self.BINARY_KEY] if self.BINARY_KEY in data \ else None if self.MODULE_KEY not in data or \ data[self.MODULE_KEY] == self.MODULE_DEFAULT_VALUE: if needs_binary: return BinaryDependency( key=key, name=data[self.NAME_KEY], version=data[self.VERSION_KEY], commit=commit, files=library_files, platform_files=platform_files, test_only=test_only, benchmark_only=benchmark_only, asset_name=asset_name, repository=repository, tag_prefix=tag_prefix, cmake_options=cmake_options ) else: return Dependency( key=key, name=data[self.NAME_KEY], version=data[self.VERSION_KEY], commit=commit, files=library_files, platform_files=platform_files, test_only=test_only, benchmark_only=benchmark_only, asset_name=asset_name, repository=repository, tag_prefix=tag_prefix ) else: if self.CLASS_KEY not in data: raise ValueError # TODO Add explanation or logging. package_name = "{}.support.dependencies.{}".format( root_package, data[self.MODULE_KEY] ) module = importlib.import_module(package_name) dependency_class = getattr(module, data[self.CLASS_KEY]) if needs_binary: return dependency_class( key=key, name=data[self.NAME_KEY], version=data[self.VERSION_KEY], commit=commit, files=library_files, platform_files=platform_files, test_only=test_only, benchmark_only=benchmark_only, asset_name=asset_name, repository=repository, tag_prefix=tag_prefix, cmake_options=cmake_options ) else: return dependency_class( key=key, name=data[self.NAME_KEY], version=data[self.VERSION_KEY], commit=commit, files=library_files, platform_files=platform_files, test_only=test_only, benchmark_only=benchmark_only, asset_name=asset_name, repository=repository, tag_prefix=tag_prefix )
33.864553
79
0.537997
import importlib import json import logging import os from typing import Any, List from .support.system import System from .support import environment from .binary_dependency import BinaryDependency from .dependency import Dependency class Project: SHARED_VERSION_KEY = "shared_version" SHARED_USAGE_VALUE = "shared" VERSION_KEY = "version" OPENGL_KEY = "opengl" DEPENDENCIES_KEY = "dependencies" NAME_KEY = "name" COMMIT_KEY = "commit" MODULE_KEY = "module" MODULE_DEFAULT_VALUE = "default" CLASS_KEY = "className" FILES_KEY = "files" TEST_ONLY_KEY = "testOnly" BENCHMARK_ONLY_KEY = "benchmarkOnly" ASSET_KEY = "asset" REPOSITORY_KEY = "repository" TAG_PREFIX_KEY = "tagPrefix" CMAKE_OPTIONS_KEY = "cmakeOptions" BINARY_KEY = "binary" PLATFORMS_KEY = "platforms" def __init__( self, source_root: str, repo: str, script_package: str, platform: System ) -> None: if not environment.is_path_source_root(path=source_root, repo=repo): logging.critical( "The root directory for the build script invocation is " "invalid: %s", source_root ) raise ValueError product_json = os.path.join(source_root, repo, "product.json") dependency_data = None try: with open(product_json) as f: json_data = json.load(f) self.project_keys = list() for key in json_data: logging.debug( "Checking if the key '%s' should be added to the " "project keys", key ) if key != self.DEPENDENCIES_KEY \ and key != self.OPENGL_KEY \ and key != self.SHARED_VERSION_KEY \ and key != self.CMAKE_OPTIONS_KEY: self.project_keys.append(key) logging.debug( "Added the key '%s' to the project keys", key ) for key in self.project_keys: logging.debug("Setting the project values for %s", key) setattr( self, "{}_version".format(key), self._get_version_from_project_data( data=json_data, key=key ) ) setattr( self, "{}_name".format(key), json_data[key][self.NAME_KEY] ) self.gl_version = json_data[self.OPENGL_KEY][self.VERSION_KEY] dependency_data = self._get_from_project_data( data=json_data, key=self.DEPENDENCIES_KEY ) self.cmake_options = json_data[self.CMAKE_OPTIONS_KEY] \ if self.CMAKE_OPTIONS_KEY in json_data else None except OSError: logging.critical( "The project value file wasn't found: %s", product_json ) if not dependency_data: raise ValueError self.dependencies: List[Dependency] = list() for key, value in dependency_data.items(): self.dependencies.append(self._create_dependency_object( key=key, data=value, root_package=script_package, platform=platform )) def _get_from_project_data(self, data: object, key: str) -> Any: if key not in data: raise ValueError return data[key] def _get_version_from_project_data(self, data: object, key: str) -> str: shared = None if self.SHARED_VERSION_KEY not in data \ else data[self.SHARED_VERSION_KEY] if key not in data: raise ValueError key_data = data[key] if self.VERSION_KEY not in key_data: if shared: return shared else: raise ValueError elif key_data[self.VERSION_KEY] == self.SHARED_USAGE_VALUE: return shared else: return key_data[self.VERSION_KEY] def _create_dependency_object( self, key: str, data: dict, root_package: str, platform: System ) -> Dependency: commit = None if self.COMMIT_KEY not in data else data[self.COMMIT_KEY] library_files = None if self.FILES_KEY not in data \ else data[self.FILES_KEY] platform_files = None if self.PLATFORMS_KEY in data \ and platform.name in data[self.PLATFORMS_KEY] \ and self.FILES_KEY in data[self.PLATFORMS_KEY][platform.name]: platform_data = data[self.PLATFORMS_KEY][platform.name] platform_files = platform_data[self.FILES_KEY] test_only = False if self.TEST_ONLY_KEY in data: test_only = data[self.TEST_ONLY_KEY] benchmark_only = False if self.BENCHMARK_ONLY_KEY in data: benchmark_only = data[self.BENCHMARK_ONLY_KEY] asset_name = None if self.ASSET_KEY in data: if isinstance(data[self.ASSET_KEY], dict): asset_name = data[self.ASSET_KEY][platform.value] else: asset_name = data[self.ASSET_KEY] repository = data[self.REPOSITORY_KEY] if self.REPOSITORY_KEY in data \ else None tag_prefix = data[self.TAG_PREFIX_KEY] if self.TAG_PREFIX_KEY in data \ else Dependency.DEFAULT_TAG_PREFIX cmake_options = data[self.CMAKE_OPTIONS_KEY] \ if self.CMAKE_OPTIONS_KEY in data else None needs_binary = data[self.BINARY_KEY] if self.BINARY_KEY in data \ else None if self.MODULE_KEY not in data or \ data[self.MODULE_KEY] == self.MODULE_DEFAULT_VALUE: if needs_binary: return BinaryDependency( key=key, name=data[self.NAME_KEY], version=data[self.VERSION_KEY], commit=commit, files=library_files, platform_files=platform_files, test_only=test_only, benchmark_only=benchmark_only, asset_name=asset_name, repository=repository, tag_prefix=tag_prefix, cmake_options=cmake_options ) else: return Dependency( key=key, name=data[self.NAME_KEY], version=data[self.VERSION_KEY], commit=commit, files=library_files, platform_files=platform_files, test_only=test_only, benchmark_only=benchmark_only, asset_name=asset_name, repository=repository, tag_prefix=tag_prefix ) else: if self.CLASS_KEY not in data: raise ValueError # TODO Add explanation or logging. package_name = "{}.support.dependencies.{}".format( root_package, data[self.MODULE_KEY] ) module = importlib.import_module(package_name) dependency_class = getattr(module, data[self.CLASS_KEY]) if needs_binary: return dependency_class( key=key, name=data[self.NAME_KEY], version=data[self.VERSION_KEY], commit=commit, files=library_files, platform_files=platform_files, test_only=test_only, benchmark_only=benchmark_only, asset_name=asset_name, repository=repository, tag_prefix=tag_prefix, cmake_options=cmake_options ) else: return dependency_class( key=key, name=data[self.NAME_KEY], version=data[self.VERSION_KEY], commit=commit, files=library_files, platform_files=platform_files, test_only=test_only, benchmark_only=benchmark_only, asset_name=asset_name, repository=repository, tag_prefix=tag_prefix )
true
true
f716cf2c2f40b9fb26a797c10a0e167af41d51ea
1,354
py
Python
petpetgif/petpet.py
Gst0ne/pet-pet-gif
e219a859558df99424625e3dc51b287e5c7674ff
[ "MIT" ]
1
2021-11-06T13:14:01.000Z
2021-11-06T13:14:01.000Z
petpetgif/petpet.py
Gst0ne/pet-pet-gif
e219a859558df99424625e3dc51b287e5c7674ff
[ "MIT" ]
null
null
null
petpetgif/petpet.py
Gst0ne/pet-pet-gif
e219a859558df99424625e3dc51b287e5c7674ff
[ "MIT" ]
1
2021-11-12T08:50:24.000Z
2021-11-12T08:50:24.000Z
from PIL import Image from petpetgif.saveGif import save_transparent_gif from pkg_resources import resource_stream frames = 10 resolution = (128, 128) delay = 20 def make(source, dest): """ :param source: A filename (string), pathlib.Path object or a file object. (This parameter corresponds and is passed to the PIL.Image.open() method.) :param dest: A filename (string), pathlib.Path object or a file object. (This parameter corresponds and is passed to the PIL.Image.save() method.) :return: None """ images = [] base = Image.open(source).convert('RGBA').resize(resolution) for i in range(frames): squeeze = i if i < frames/2 else frames - i width = 0.8 + squeeze * 0.02 height = 0.8 - squeeze * 0.05 offsetX = (1 - width) * 0.5 + 0.1 offsetY = (1 - height) - 0.08 canvas = Image.new('RGBA', size=resolution, color=(0, 0, 0, 0)) canvas.paste(base.resize((round(width * resolution[0]), round(height * resolution[1]))), (round(offsetX * resolution[0]), round(offsetY * resolution[1]))) pet = Image.open(resource_stream(__name__, f"img/pet{i}.gif")).convert('RGBA').resize(resolution) canvas.paste(pet, mask=pet) images.append(canvas) save_transparent_gif(images, durations=30, save_file=dest)
38.685714
162
0.639586
from PIL import Image from petpetgif.saveGif import save_transparent_gif from pkg_resources import resource_stream frames = 10 resolution = (128, 128) delay = 20 def make(source, dest): images = [] base = Image.open(source).convert('RGBA').resize(resolution) for i in range(frames): squeeze = i if i < frames/2 else frames - i width = 0.8 + squeeze * 0.02 height = 0.8 - squeeze * 0.05 offsetX = (1 - width) * 0.5 + 0.1 offsetY = (1 - height) - 0.08 canvas = Image.new('RGBA', size=resolution, color=(0, 0, 0, 0)) canvas.paste(base.resize((round(width * resolution[0]), round(height * resolution[1]))), (round(offsetX * resolution[0]), round(offsetY * resolution[1]))) pet = Image.open(resource_stream(__name__, f"img/pet{i}.gif")).convert('RGBA').resize(resolution) canvas.paste(pet, mask=pet) images.append(canvas) save_transparent_gif(images, durations=30, save_file=dest)
true
true
f716cf6f63fdd848b405bbb16c421fdd80bde9ff
1,873
py
Python
remora/tests/test_tracker.py
Hugoch/remora-python
1bb19200135bb84ee5e6e28fe25057ed096c8e31
[ "MIT" ]
null
null
null
remora/tests/test_tracker.py
Hugoch/remora-python
1bb19200135bb84ee5e6e28fe25057ed096c8e31
[ "MIT" ]
null
null
null
remora/tests/test_tracker.py
Hugoch/remora-python
1bb19200135bb84ee5e6e28fe25057ed096c8e31
[ "MIT" ]
null
null
null
import unittest from remora.tracker import Tracker from remora.collectors import AsyncCollector import requests_mock import json class TestTracker(unittest.TestCase): def test_send_payload(self): url = 'http://127.0.0.1:31311' with requests_mock.mock() as m: req = m.put(url) collector = AsyncCollector('http://127.0.0.1:31311') t = Tracker(collector, namespace='foo', app_id='bar') t.track_application_start() collector.queue.join() res = json.loads(req.last_request.text) assert res['name'] == 'start' assert res['app_id'] == 'bar' assert res['namespace'] == 'foo' def test_send_payload_with_custom_fields(self): url = 'http://127.0.0.1:31311' with requests_mock.mock() as m: req = m.put(url) collector = AsyncCollector('http://127.0.0.1:31311') t = Tracker(collector, namespace='foo', app_id='bar', cpu_count=4, user_type='internal') t.track_application_start() collector.queue.join() res = json.loads(req.last_request.text) assert res['name'] == 'start' assert res['app_id'] == 'bar' assert res['namespace'] == 'foo' assert res['cpu_count'] == 4 assert res['user_type'] == 'internal' def test_duration_decorator(self): url = 'http://127.0.0.1:31311' with requests_mock.mock() as m: req = m.put(url) def test(arg): pass collector = AsyncCollector('http://127.0.0.1:31311') t = Tracker(collector, namespace='foo', app_id='bar') t.track_duration('a_duration')(test)(1) collector.queue.join() res = json.loads(req.last_request.text) assert 'duration' in res
39.020833
100
0.571276
import unittest from remora.tracker import Tracker from remora.collectors import AsyncCollector import requests_mock import json class TestTracker(unittest.TestCase): def test_send_payload(self): url = 'http://127.0.0.1:31311' with requests_mock.mock() as m: req = m.put(url) collector = AsyncCollector('http://127.0.0.1:31311') t = Tracker(collector, namespace='foo', app_id='bar') t.track_application_start() collector.queue.join() res = json.loads(req.last_request.text) assert res['name'] == 'start' assert res['app_id'] == 'bar' assert res['namespace'] == 'foo' def test_send_payload_with_custom_fields(self): url = 'http://127.0.0.1:31311' with requests_mock.mock() as m: req = m.put(url) collector = AsyncCollector('http://127.0.0.1:31311') t = Tracker(collector, namespace='foo', app_id='bar', cpu_count=4, user_type='internal') t.track_application_start() collector.queue.join() res = json.loads(req.last_request.text) assert res['name'] == 'start' assert res['app_id'] == 'bar' assert res['namespace'] == 'foo' assert res['cpu_count'] == 4 assert res['user_type'] == 'internal' def test_duration_decorator(self): url = 'http://127.0.0.1:31311' with requests_mock.mock() as m: req = m.put(url) def test(arg): pass collector = AsyncCollector('http://127.0.0.1:31311') t = Tracker(collector, namespace='foo', app_id='bar') t.track_duration('a_duration')(test)(1) collector.queue.join() res = json.loads(req.last_request.text) assert 'duration' in res
true
true
f716d0756983d74be295f8050487b4e269c28f44
3,562
py
Python
src/client/user_bonus.py
ZackPashkin/toloka-kit
8f650e5d8cdded1949ca633cf78f9b851ce839bb
[ "Apache-2.0" ]
153
2021-02-06T13:41:11.000Z
2022-03-19T17:51:01.000Z
src/client/user_bonus.py
ZackPashkin/toloka-kit
8f650e5d8cdded1949ca633cf78f9b851ce839bb
[ "Apache-2.0" ]
29
2021-01-15T12:54:37.000Z
2022-02-07T07:45:32.000Z
src/client/user_bonus.py
ZackPashkin/toloka-kit
8f650e5d8cdded1949ca633cf78f9b851ce839bb
[ "Apache-2.0" ]
17
2021-01-29T15:20:04.000Z
2022-01-30T07:21:03.000Z
__all__ = [ 'UserBonus', 'UserBonusCreateRequestParameters' ] from attr.validators import optional, instance_of import datetime from decimal import Decimal from typing import Any from .primitives.base import BaseTolokaObject from .primitives.parameter import Parameters from ..util._codegen import attribute class UserBonus(BaseTolokaObject): """Issuing a bonus to a specific performer It's addition to payment for completed tasks. Attributes: user_id: Performer ID to whom the bonus will be issued. amount: The bonus amount in dollars. Can be from 0.01 to 100 dollars per user per time. private_comment: Comments that are only visible to the requester. public_title: Message header for the user. You can provide a title in several languages (the message will come in the user's language). public_message: Message text for the user. You can provide text in several languages (the message will come in the user's language). without_message: Do not send a bonus message to the user. To award a bonus without a message, specify null for public_title and public_message and True for without_message. assignment_id: The answer to the task for which this bonus was issued. id: Internal ID of the issued bonus. Read only. created: Date the bonus was awarded, in UTC. Read only. Example: How to create bonus with message for specific assignment. >>> new_bonus = toloka_client.create_user_bonus( >>> UserBonus( >>> user_id='1', >>> amount='0.50', >>> public_title='Perfect job!', >>> public_message='You are the best performer EVER!' >>> assignment_id='012345' >>> ) >>> ) ... Hoiw to create bonus with message in several languages. >>> new_bonus = toloka_client.create_user_bonus( >>> UserBonus( >>> user_id='1', >>> amount='0.10', >>> public_title= { >>> 'EN': 'Good Job!', >>> 'RU': 'Молодец!', >>> }, >>> public_message: { >>> 'EN': 'Ten tasks completed', >>> 'RU': 'Выполнено 10 заданий', >>> }, >>> ) >>> ) ... """ user_id: str amount: Decimal = attribute(validator=optional(instance_of(Decimal))) private_comment: str public_title: Any public_message: Any without_message: bool assignment_id: str # Readonly id: str = attribute(readonly=True) created: datetime.datetime = attribute(readonly=True) class UserBonusCreateRequestParameters(Parameters): """Parameters for creating performer bonuses Used in methods 'create_user_bonus', 'create_user_bonuses' и 'create_user_bonuses_async' of the class TolokaClient, to clarify the behavior when creating bonuses. Attributes: operation_id: Operation ID. If asynchronous creation is used, by this identifier you can later get results of creating bonuses. skip_invalid_items: Validation parameters of objects: * True - Award a bonus if the object with bonus information passed validation. Otherwise, skip the bonus. * False - Default behaviour. Stop the operation and don't award bonuses if at least one object didn't pass validation. """ operation_id: str skip_invalid_items: bool
36.721649
130
0.630264
__all__ = [ 'UserBonus', 'UserBonusCreateRequestParameters' ] from attr.validators import optional, instance_of import datetime from decimal import Decimal from typing import Any from .primitives.base import BaseTolokaObject from .primitives.parameter import Parameters from ..util._codegen import attribute class UserBonus(BaseTolokaObject): user_id: str amount: Decimal = attribute(validator=optional(instance_of(Decimal))) private_comment: str public_title: Any public_message: Any without_message: bool assignment_id: str id: str = attribute(readonly=True) created: datetime.datetime = attribute(readonly=True) class UserBonusCreateRequestParameters(Parameters): operation_id: str skip_invalid_items: bool
true
true
f716d0a144b04a18cf4e4f949f0d63ac16f60306
120,881
py
Python
src/sage/misc/sage_input.py
switzel/sage
7eb8510dacf61b691664cd8f1d2e75e5d473e5a0
[ "BSL-1.0" ]
null
null
null
src/sage/misc/sage_input.py
switzel/sage
7eb8510dacf61b691664cd8f1d2e75e5d473e5a0
[ "BSL-1.0" ]
null
null
null
src/sage/misc/sage_input.py
switzel/sage
7eb8510dacf61b691664cd8f1d2e75e5d473e5a0
[ "BSL-1.0" ]
1
2020-07-24T12:20:37.000Z
2020-07-24T12:20:37.000Z
r""" Sage Input Formatting This module provides the function :func:`sage_input` that takes an arbitrary sage value and produces a sequence of commands that, if typed at the ``sage:`` prompt, will recreate the value. If this is not implemented for a particular value, then an exception is raised instead. This might be useful in understanding a part of Sage, or for debugging. For instance, if you have a value produced in a complicated way in the middle of a debugging session, you could use :func:`sage_input` to find a simple way to produce the same value. We attempt to produce commands that are readable and idiomatic.:: sage: sage_input(3) 3 sage: sage_input((polygen(RR) + RR(pi))^2, verify=True) # Verified R.<x> = RR[] x^2 + 6.2831853071795862*x + 9.869604401089358 With ``verify=True``, :func:`sage_input` also verifies the results, by calling :func:`~sage.misc.sage_eval.sage_eval` on the result and verifying that it is equal to the input.:: sage: sage_input(GF(2)(1), verify=True) # Verified GF(2)(1) We can generate code that works without the preparser, with ``preparse=False``; or we can generate code that will work whether or not the preparser is enabled, with ``preparse=None``. Generating code with ``preparse=False`` may be useful to see how to create a certain value in a Python or Cython source file.:: sage: sage_input(5, verify=True) # Verified 5 sage: sage_input(5, preparse=False) ZZ(5) sage: sage_input(5, preparse=None) ZZ(5) sage: sage_input(5r, verify=True) # Verified 5r sage: sage_input(5r, preparse=False) 5 sage: sage_input(5r, preparse=None) int(5) Adding :func:`sage_input` support to your own classes is straightforward. You need to add a :func:`_sage_input_` method which returns a :class:`SageInputExpression` (henceforth abbreviated as SIE) which will reconstruct this instance of your class. A ``_sage_input_`` method takes two parameters, conventionally named ``sib`` and ``coerced``. The first argument is a :class:`SageInputBuilder`; it has methods to build SIEs. The second argument, ``coerced``, is a boolean. This is only useful if your class is a subclass of :class:`Element` (although it is always present). If ``coerced`` is ``False``, then your method must generate an expression which will evaluate to a value of the correct type with the correct parent. If ``coerced`` is ``True``, then your method may generate an expression of a type that has a canonical coercion to your type; and if ``coerced`` is 2, then your method may generate an expression of a type that has a conversion to your type. Let's work through some examples. We'll build a sequence of functions that would be acceptable as ``_sage_input_`` methods for the :class:`~sage.rings.rational.Rational` class. Here's the first and simplest version.:: sage: def qq_sage_input_v1(self, sib, coerced): ....: return sib(self.numerator())/sib(self.denominator()) We see that given a :class:`SageInputBuilder` ``sib``, you can construct a SIE for a value ``v`` simply with ``sib(v)``, and you can construct a SIE for a quotient with the division operator. Of course, the other operators also work, and so do function calls, method calls, subscripts, etc. We'll test with the following code, which you don't need to understand. (It produces a list of 8 results, showing the formatted versions of -5/7 and 3, with the preparser either enabled or disabled and either with or without an automatic coercion to QQ.):: sage: from sage.misc.sage_input import SageInputBuilder sage: def test_qq_formatter(fmt): ....: results = [] ....: for v in [-5/7, QQ(3)]: ....: for pp in [False, True]: ....: for coerced in [False, True]: ....: sib = SageInputBuilder(preparse=pp) ....: results.append(sib.result(fmt(v, sib, coerced))) ....: return results sage: test_qq_formatter(qq_sage_input_v1) [-ZZ(5)/ZZ(7), -ZZ(5)/ZZ(7), -5/7, -5/7, ZZ(3)/ZZ(1), ZZ(3)/ZZ(1), 3/1, 3/1] Let's try for some shorter, perhaps nicer-looking output. We'll start by getting rid of the ``ZZ`` in the denominators; even without the preparser, ``-ZZ(5)/7 == -ZZ(5)/ZZ(7)``.:: sage: def qq_sage_input_v2(self, sib, coerced): ....: return sib(self.numerator())/sib.int(self.denominator()) The ``int`` method on :class:`SageInputBuilder` returns a SIE for an integer that is always represented in the simple way, without coercions. (So, depending on the preparser mode, it might read in as an :class:`~sage.rings.integer.Integer`, an ``int``, or a ``long``.):: sage: test_qq_formatter(qq_sage_input_v2) [-ZZ(5)/7, -ZZ(5)/7, -5/7, -5/7, ZZ(3)/1, ZZ(3)/1, 3/1, 3/1] Next let's get rid of the divisions by 1. These are more complicated, since if we're not careful we'll get results in \ZZ instead of \QQ.:: sage: def qq_sage_input_v3(self, sib, coerced): ....: if self.denominator() == 1: ....: if coerced: ....: return sib.int(self.numerator()) ....: else: ....: return sib.name('QQ')(sib.int(self.numerator())) ....: return sib(self.numerator())/sib.int(self.denominator()) We see that the \method{name} method gives an SIE representing a \sage constant or function.:: sage: test_qq_formatter(qq_sage_input_v3) [-ZZ(5)/7, -ZZ(5)/7, -5/7, -5/7, QQ(3), 3, QQ(3), 3] This is the prettiest output we're going to get, but let's make one further refinement. Other :class:`_sage_input_` methods, like the one for polynomials, analyze the structure of SIEs; they work better (give prettier output) if negations are at the outside. If the above code were used for rationals, then ``sage_input(polygen(QQ) - 2/3)`` would produce ``x + (-2/3)``; if we change to the following code, then we would get ``x - 2/3`` instead.:: sage: def qq_sage_input_v4(self, sib, coerced): ....: num = self.numerator() ....: neg = (num < 0) ....: if neg: num = -num ....: if self.denominator() == 1: ....: if coerced: ....: v = sib.int(num) ....: else: ....: v = sib.name('QQ')(sib.int(num)) ....: else: ....: v = sib(num)/sib.int(self.denominator()) ....: if neg: v = -v ....: return v sage: test_qq_formatter(qq_sage_input_v4) [-ZZ(5)/7, -ZZ(5)/7, -5/7, -5/7, QQ(3), 3, QQ(3), 3] AUTHORS: - Carl Witty (2008-04): new file - Vincent Delecroix (2015-02): documentation formatting """ ########################################################################## # # Copyright (C) 2008 Carl Witty <Carl.Witty@gmail.com> # 2015 Vincent Delecroix <20100.delecroix@gmail.com> # # Distributed under the terms of the GNU General Public License (GPL) # # http://www.gnu.org/licenses/ # ########################################################################## def sage_input(x, preparse=True, verify=False, allow_locals=False): r""" Return a sequence of commands that can be used to rebuild the object ``x``. INPUT: - ``x`` - the value we want to find an input form for - ``preparse`` - (default ``True``) Whether to generate code that requires the preparser. With ``True``, generated code requires the preparser. With ``False``, generated code requires that the preparser not be used. With ``None``, generated code will work whether or not the preparser is used. - ``verify`` - (default ``False``) If ``True``, then the answer will be evaluated with :func:`sage_eval`, and an exception will be raised if the result is not equal to the original value. (In fact, for ``verify=True``, :func:`sage_input` is effectively run three times, with ``preparse`` set to ``True``, ``False``, and ``None``, and all three results are checked.) This is particularly useful for doctests. - ``allow_locals`` - (default ``False``) If ``True``, then values that :func:`sage_input` cannot handle are returned in a dictionary, and the returned code assumes that this dictionary is passed as the ``locals`` parameter of :func:`sage_eval`. (Otherwise, if :func:`sage_input` cannot handle a value, an exception is raised.) EXAMPLES:: sage: sage_input(GF(2)(1)) GF(2)(1) sage: sage_input((GF(2)(0), GF(2)(1)), verify=True) # Verified GF_2 = GF(2) (GF_2(0), GF_2(1)) When the preparser is enabled, we use the \sage generator syntax.:: sage: K.<x> = GF(5)[] sage: sage_input(x^3 + 2*x, verify=True) # Verified R.<x> = GF(5)[] x^3 + 2*x sage: sage_input(x^3 + 2*x, preparse=False) R = GF(5)['x'] x = R.gen() x**3 + 2*x The result of :func:`sage_input` is actually a pair of strings with a special ``__repr__`` method to print nicely.:: sage: r = sage_input(RealField(20)(pi), verify=True) sage: r # Verified RealField(20)(3.1415939) sage: isinstance(r, tuple) True sage: len(r) 2 sage: tuple(r) ('# Verified\n', 'RealField(20)(3.1415939)') We cannot find an input form for a function.:: sage: sage_input((3, lambda x: x)) Traceback (most recent call last): ... ValueError: Can't convert <function <lambda> at 0x...> to sage_input form But we can have :func:`sage_input` continue anyway, and return an input form for the rest of the expression, with ``allow_locals=True``.:: sage: r = sage_input((3, lambda x: x), verify=True, allow_locals=True) sage: r LOCALS: _sil1: <function <lambda> at 0x...> # Verified (3, _sil1) sage: tuple(r) ('# Verified\n', '(3, _sil1)', {'_sil1': <function <lambda> at 0x...>}) """ if not verify: sib = SageInputBuilder(allow_locals=allow_locals, preparse=preparse) return sib.result(sib(x)) # In verify mode, we actually compute and verify the answer with # all three settings of preparse. for pp in (True, False, None): sib = SageInputBuilder(allow_locals=allow_locals, preparse=pp) ans = sib.result(sib(x)) verify_si_answer(x, ans, pp) if pp == preparse: ans_l = list(ans) ans_l[0] = '# Verified\n' + ans_l[0] final_answer = SageInputAnswer(*ans_l) return final_answer class SageInputBuilder: r""" An instance of this class is passed to ``_sage_input_`` methods. It keeps track of the current state of the ``_sage_input_`` process, and contains many utility methods for building :class:`SageInputExpression` objects. In normal use, instances of :class:`SageInputBuilder` are created internally by :func:`sage_input`, but it may be useful to create an instance directly for testing or doctesting. EXAMPLES:: sage: from sage.misc.sage_input import SageInputBuilder We can create a :class:`SageInputBuilder`, use it to create some :class:`SageInputExpression` s, and get a result. (As mentioned above, this is only useful for testing or doctesting; normally you would just use :func:`sage_input`.):: sage: sib = SageInputBuilder() sage: sib.result((sib(3) + sib(4)) * (sib(5) + sib(6))) (3 + 4)*(5 + 6) """ def __init__(self, allow_locals=False, preparse=True): r""" Initialize an instance of :class:`SageInputBuilder`. In normal use, instances of :class:`SageInputBuilder` are created internally by :func:`sage_input`, but it may be useful to create an instance directly for testing or doctesting. INPUT: - ``allow_locals`` - (default ``False``) If true, then values that cannot be converted to input form will be stored in a dictionary, which must be passed as the ``locals`` when evaluating the result. - ``preparse`` -- (default ``True``) If true, then the result will assume that the preparser is enabled. If false, then the result will assume that the preparser is disabled. If ``None``, then the result will work whether or not the preparser is enabled. EXAMPLES:: sage: from sage.misc.sage_input import SageInputBuilder sage: SageInputBuilder().preparse() True sage: SageInputBuilder(preparse=False).preparse() False """ self._allow_locals = allow_locals self._preparse = preparse self._cached_types = set() self._cache = {} self._id_cache = {} self._parent_gens = {} self._next_local = 1 self._locals = {} def __call__(self, x, coerced=False): r""" Tries to convert an arbitrary value ``x`` into a :class:`SageInputExpression` (an SIE). We first check to see if an SIE has been cached for ``x``; if so, we return it. If ``x`` is already an SIE, we return it unchanged. If ``x`` has a \method{_sage_input_} method, we call that method. Otherwise, if ``x`` is a value of some Python type that we know how to deal with, we convert it directly. Finally, for values we don't know how to convert, if ``self._allow_locals`` is true, we add it to a ``locals`` dictionary. EXAMPLES:: sage: from sage.misc.sage_input import SageInputBuilder sage: sib = SageInputBuilder() sage: sib.result(sib(sib(3))) 3 sage: sib = SageInputBuilder() sage: sib.result(sib(GF(17)(5))) GF(17)(5) The argument ``coerced=True`` or ``coerced=2`` will get passed to the \method{_sage_input_} method of the argument.:: sage: sib = SageInputBuilder() sage: sib.result(sib(GF(17)(5), True)) 5 sage: sib.result(sib(RealField(200)(1.5), True)) 1.5000000000000000000000000000000000000000000000000000000000000 sage: sib.result(sib(RealField(200)(1.5), 2)) 1.5 Since :func:`sage_input` directly calls this method, all of the following are indirect doctests.:: sage: sage_input(True) True sage: sage_input(-5r, verify=True) # Verified -5r sage: sage_input(7r, preparse=False, verify=True) # Verified 7 sage: sage_input(-11r, preparse=None, verify=True) # Verified -int(11) sage: sage_input(long(-5), verify=True) # Verified -long(5) sage: sage_input(long(-7), preparse=False, verify=True) # Verified -7L sage: sage_input(long(11), preparse=None, verify=True) # Verified long(11) sage: sage_input(long(2^70), verify=True) # Verified 1180591620717411303424r sage: sage_input(-long(2^80), preparse=False, verify=True) # Verified -1208925819614629174706176 sage: sage_input(long(2^75), preparse=None, verify=True) # Verified long(37778931862957161709568) sage: sage_input(float(-infinity), preparse=True, verify=True) # Verified -float(infinity) sage: sage_input(float(NaN), preparse=True, verify=True) # Verified float(NaN) sage: sage_input(float(-pi), preparse=True, verify=True) # Verified float(-RR(3.1415926535897931)) sage: sage_input(float(42), preparse=True, verify=True) # Verified float(42) sage: sage_input("Hello, world\n", verify=True) # Verified 'Hello, world\n' sage: sage_input("'", verify=True) # Verified "'" sage: sage_input('"', verify=True) # Verified '"' sage: sage_input(''' "'Hi,' she said." ''', verify=True) # Verified ' "\'Hi,\' she said." ' sage: sage_input('Icky chars: \0\n\t\b\'\"\200\300\234', verify=True) # Verified 'Icky chars: \x00\n\t\x08\'"\x80\xc0\x9c' sage: sage_input(u'unicode with spectral: \u1234\U00012345', verify=True) # Verified u'unicode with spectral: \u1234\U00012345' sage: sage_input((2, 3.5, 'Hi'), verify=True) # Verified (2, 3.5, 'Hi') sage: sage_input(lambda x: x) Traceback (most recent call last): ... ValueError: Can't convert <function <lambda> at 0x...> to sage_input form sage: sage_input(lambda x: x, allow_locals=True, verify=True) LOCALS: _sil1: <function <lambda> at 0x...> # Verified _sil1 """ # We want to look up x in our cache, to see if we've seen it before. # However, we don't want to assume that hashing x is always # efficient, so we only try the lookup if some value of the same # type as x has been cached. from sage.structure.all import parent if type(x) in self._cached_types: v = self._cache.get((parent(x), x)) if v is not None: return v v = self._id_cache.get(id(x)) if v is not None: return v[1] if isinstance(x, SageInputExpression): return x if hasattr(x, '_sage_input_'): return x._sage_input_(self, coerced) if x is None: return SIE_literal_stringrep(self, 'None') if isinstance(x, bool): return SIE_literal_stringrep(self, str(x)) if isinstance(x, int) or \ (isinstance(x, long) and isinstance(int(x), long)): # For longs that don't fit in an int, we just use the int # code; it will get extended to long automatically. if self._preparse == True: if x < 0: return -SIE_literal_stringrep(self, str(-x) + 'r') else: return SIE_literal_stringrep(self, str(x) + 'r') elif self._preparse == False: return self.int(x) else: tyname = 'int' if isinstance(x, int) else 'long' if x < 0: return -self.name(tyname)(self.int(-x)) else: return self.name(tyname)(self.int(x)) if isinstance(x, long): # This must be a long that does fit in an int, so we need either # long(x) or an 'L' suffix. # With the current preparser, 1Lr does not work. # 1rL does work; but that's just ugly, so I don't use it. if self._preparse == False: if x < 0: return -SIE_literal_stringrep(self, str(-x) + 'L') else: return SIE_literal_stringrep(self, str(x) + 'L') else: if x < 0: return -self.name('long')(self.int(-x)) else: return self.name('long')(self.int(x)) if isinstance(x, float): # floats could often have prettier output, # but I think they're rare enough in Sage that it's not # worth the effort. from sage.all import RR, ZZ, infinity if x == float(infinity): return self.name('float')(self.name('infinity')) if x != x: return self.name('float')(self.name('NaN')) if x == -float(infinity): return -self.name('float')(self.name('infinity')) if self._preparse == False and float(str(x)) == x: if x < 0: return -SIE_literal_stringrep(self, str(-x)) else: return SIE_literal_stringrep(self, str(x)) rrx = RR(x) if rrx in ZZ and abs(rrx) < (1 << 53): return self.name('float')(self.int(ZZ(rrx))) return self.name('float')(RR(x)) if isinstance(x, (str, unicode)): return SIE_literal_stringrep(self, repr(x)) if isinstance(x, tuple): return SIE_tuple(self, [self(_) for _ in x], False) if isinstance(x, list): return SIE_tuple(self, [self(_) for _ in x], True) if isinstance(x, dict): return self.dict(x) if self._allow_locals: loc = self._next_local self._next_local += 1 loc_name = '_sil%d' % loc self._locals[loc_name] = x return SIE_literal_stringrep(self, loc_name) else: raise ValueError("Can't convert {} to sage_input form".format(x)) def preparse(self): r""" Checks the preparse status. It returns ``True`` if the preparser will be enabled, ``False`` if it will be disabled, and ``None`` if the result must work whether or not the preparser is enabled. For example, this is useful in the \method{_sage_input_} methods of :class:`~sage.rings.integer.Integer` and :class:`RealNumber`; but most \method{_sage_input_} methods will not need to examine this. EXAMPLES:: sage: from sage.misc.sage_input import SageInputBuilder sage: SageInputBuilder().preparse() True sage: SageInputBuilder(preparse=False).preparse() False """ return self._preparse def int(self, n): r""" Return a raw SIE from the integer ``n`` As it is raw, it may read back as a Sage Integer, a Python int or a Python long, depending on its size and whether the preparser is enabled. INPUT: - ``n`` - a Sage Integer, a Python int or a Python long EXAMPLES:: sage: from sage.misc.sage_input import SageInputBuilder sage: sib = SageInputBuilder() sage: sib.result(sib.int(-3^50)) -717897987691852588770249 sage: sib = SageInputBuilder() sage: sib.result(sib.int(long(2^65))) 36893488147419103232 sage: sib = SageInputBuilder() sage: sib.result(sib.int(-42r)) -42 """ if n < 0: return -SIE_literal_stringrep(self, -n) else: return SIE_literal_stringrep(self, n) def float_str(self, n): r""" Given a string representing a floating-point number, produces a :class:`SageInputExpression` that formats as that string. EXAMPLES:: sage: from sage.misc.sage_input import SageInputBuilder sage: sib = SageInputBuilder() sage: sib.result(sib.float_str(repr(RR(e)))) 2.71828182845905 """ return SIE_literal_stringrep(self, n) def name(self, n): r""" Given a string representing a Python name, produces a :class:`SageInputExpression` for that name. EXAMPLES:: sage: from sage.misc.sage_input import SageInputBuilder sage: sib = SageInputBuilder() sage: sib.result(sib.name('pi') + sib.name('e')) pi + e """ return SIE_literal_stringrep(self, n) def cache(self, x, sie, name): r""" INPUT: - ``x`` - an arbitrary value - ``sie`` - a :class:`SageInputExpression` - ``name`` - a requested variable name Enters ``x`` and ``sie`` in a cache, so that subsequent calls ``self(x)`` will directly return ``sie``. Also, marks the requested name of this ``sie`` to be ``name``. This should almost always be called as part of the \method{_sage_input_} method of a parent. It may also be called on values of an arbitrary type, which may be useful if the values are both large and likely to be used multiple times in a single expression. EXAMPLES:: sage: from sage.misc.sage_input import SageInputBuilder sage: sib = SageInputBuilder() sage: sie42 = sib(GF(101)(42)) sage: sib.cache(GF(101)(42), sie42, 'the_ultimate_answer') sage: sib.result(sib(GF(101)(42)) + sib(GF(101)(42))) the_ultimate_answer = GF(101)(42) the_ultimate_answer + the_ultimate_answer Note that we don't assign the result to a variable if the value is only used once.:: sage: sib = SageInputBuilder() sage: sie42 = sib(GF(101)(42)) sage: sib.cache(GF(101)(42), sie42, 'the_ultimate_answer') sage: sib.result(sib(GF(101)(42)) + sib(GF(101)(43))) GF_101 = GF(101) GF_101(42) + GF_101(43) """ from sage.structure.all import parent self._cached_types.add(type(x)) self._cache[(parent(x), x)] = sie sie._sie_preferred_varname = name def id_cache(self, x, sie, name): r""" INPUT: - ``x`` - an arbitrary value - ``sie`` - a :class:`SageInputExpression` - ``name`` - a requested variable name Enters ``x`` and ``sie`` in a cache, so that subsequent calls ``self(x)`` will directly return ``sie``. Also, marks the requested name of this ``sie`` to be ``name``. Differs from the \method{cache} method in that the cache is keyed by ``id(x)`` instead of by ``x``. This may be called on values of an arbitrary type, which may be useful if the values are both large and likely to be used multiple times in a single expression; it should be preferred to \method{cache} if equality on the values is difficult or impossible to compute. EXAMPLES:: sage: from sage.misc.sage_input import SageInputBuilder sage: x = polygen(ZZ) sage: sib = SageInputBuilder() sage: my_42 = 42*x sage: sie42 = sib(my_42) sage: sib.id_cache(my_42, sie42, 'the_ultimate_answer') sage: sib.result(sib(my_42) + sib(my_42)) R.<x> = ZZ[] the_ultimate_answer = 42*x the_ultimate_answer + the_ultimate_answer Since id_cache keys off of object identity ("is"), the following does not trigger the cache.:: sage: sib.result(sib(42*x) + sib(42*x)) 42*x + 42*x Note that we don't assign the result to a variable if the value is only used once.:: sage: sib = SageInputBuilder() sage: my_42 = 42*x sage: sie42 = sib(my_42) sage: sib.id_cache(my_42, sie42, 'the_ultimate_answer') sage: sib.result(sib(my_42) + sib(43*x)) R.<x> = ZZ[] 42*x + 43*x """ # If we just mapped id(x) -> sie, then it's possible that x could # be freed and another value allocated at the same position, # corrupting the cache. But since we store x, that can't happen; # we don't even have to look at x when we read the cache. self._id_cache[id(x)] = (x, sie) sie._sie_preferred_varname = name def import_name(self, module, name, alt_name=None): r""" INPUT: - ``module``, ``name``, ``alt_name`` -- strings Creates an expression that will import a name from a module and then use that name. EXAMPLES:: sage: from sage.misc.sage_input import SageInputBuilder sage: sib = SageInputBuilder() sage: v1 = sib.import_name('sage.foo.bar', 'baz') sage: v2 = sib.import_name('sage.foo.bar', 'ZZ', 'not_the_real_ZZ') sage: sib.result(v1+v2) from sage.foo.bar import baz from sage.foo.bar import ZZ as not_the_real_ZZ baz + not_the_real_ZZ We adjust the names if there is a conflict.:: sage: sib = SageInputBuilder() sage: v1 = sib.import_name('sage.foo', 'poly') sage: v2 = sib.import_name('sage.bar', 'poly') sage: sib.result(v1+v2) from sage.foo import poly as poly1 from sage.bar import poly as poly2 poly1 + poly2 """ return SIE_import_name(self, module, name, alt_name) def assign(self, e, val): r""" Constructs a command that performs the assignment ``e=val``. Can only be used as an argument to the ``command`` method. INPUT: - ``e``, ``val`` -- SageInputExpression EXAMPLES:: sage: from sage.misc.sage_input import SageInputBuilder sage: sib = SageInputBuilder() sage: circular = sib([None]) sage: sib.command(circular, sib.assign(circular[0], circular)) sage: sib.result(circular) si = [None] si[0] = si si """ e = self(e) val = self(val) return SIE_assign(self, e, val) def command(self, v, cmd): r""" INPUT: - ``v``, ``cmd`` -- SageInputExpression Attaches a command to v, which will be executed before v is used. Multiple commands will be executed in the order added. EXAMPLES:: sage: from sage.misc.sage_input import SageInputBuilder sage: sib = SageInputBuilder() sage: incr_list = sib([]) sage: sib.command(incr_list, incr_list.append(1)) sage: sib.command(incr_list, incr_list.extend([2, 3])) sage: sib.result(incr_list) si = [] si.append(1) si.extend([2, 3]) si """ v = self(v) cmd = self(cmd) v._sie_commands.append(cmd) def dict(self, entries): r""" Given a dictionary, or a list of (key, value) pairs, produces a :class:`SageInputExpression` representing the dictionary. EXAMPLES:: sage: from sage.misc.sage_input import SageInputBuilder sage: sib = SageInputBuilder() sage: sib.result(sib.dict({1:1, 2:5/2, 3:100/3})) {1:1, 2:5/2, 3:100/3} sage: sib.result(sib.dict([('hello', 'sunshine'), ('goodbye', 'rain')])) {'hello':'sunshine', 'goodbye':'rain'} """ if isinstance(entries, dict): entries = list(entries.items()) entries = [(self(key),self(val)) for (key,val) in entries] return SIE_dict(self, entries) def getattr(self, sie, attr): r""" Given a :class:`SageInputExpression` representing ``foo`` and an attribute name bar, produce a :class:`SageInputExpression` representing ``foo.bar``. Normally, you could just use attribute-access syntax, but that doesn't work if bar is some attribute that bypasses __getattr__ (such as if bar is '__getattr__' itself). EXAMPLES:: sage: from sage.misc.sage_input import SageInputBuilder sage: sib = SageInputBuilder() sage: sib.getattr(ZZ, '__getattr__') {getattr: {atomic:ZZ}.__getattr__} sage: sib.getattr(sib.name('foo'), '__new__') {getattr: {atomic:foo}.__new__} """ return SIE_getattr(self, self(sie), attr) def empty_subscript(self, parent): r""" Given a :class:`SageInputExpression` representing ``foo``, produces a :class:`SageInputExpression` representing ``foo[]``. Since this is not legal Python syntax, it is useful only for producing the \sage generator syntax for a polynomial ring. EXAMPLES:: sage: from sage.misc.sage_input import SageInputBuilder sage: sib = SageInputBuilder() sage: sib.result(sib.empty_subscript(sib(2) + sib(3))) (2 + 3)[] The following calls this method indirectly.:: sage: sage_input(polygen(ZZ['y'])) R.<x> = ZZ['y'][] x """ return SIE_subscript(self, parent, None) def use_variable(self, sie, name): r""" Marks the :class:`SageInputExpression` ``sie`` to use a variable even if it is only referenced once. (If ``sie`` is the final top-level expression, though, it will not use a variable.) EXAMPLES:: sage: from sage.misc.sage_input import SageInputBuilder sage: sib = SageInputBuilder() sage: e = sib.name('MatrixSpace')(ZZ, 10, 10) sage: sib.use_variable(e, 'MS') sage: sib.result(e.zero_matrix()) MS = MatrixSpace(ZZ, 10, 10) MS.zero_matrix() Without the call to use_variable, we get this instead:: sage: sib = SageInputBuilder() sage: e = sib.name('MatrixSpace')(ZZ, 10, 10) sage: sib.result(e.zero_matrix()) MatrixSpace(ZZ, 10, 10).zero_matrix() And even with the call to use_variable, we don't use a variable here:: sage: sib = SageInputBuilder() sage: e = sib.name('MatrixSpace')(ZZ, 10, 10) sage: sib.use_variable(e, 'MS') sage: sib.result(e) MatrixSpace(ZZ, 10, 10) """ sie._sie_preferred_varname = name sie._sie_request_use_var = True def share(self, sie): r""" Mark the given expression as sharable, so that it will be replaced by a variable if it occurs multiple times in the expression. (Most non-single-token expressions are already sharable.) EXAMPLES:: sage: from sage.misc.sage_input import SageInputBuilder Without explicitly using .share(), string literals are not shared:: sage: sib = SageInputBuilder() sage: e = sib('hello') sage: sib.result(sib((e, e))) ('hello', 'hello') See the difference if we use .share():: sage: sib = SageInputBuilder() sage: e = sib('hello') sage: sib.share(e) sage: sib.result(sib((e, e))) si = 'hello' (si, si) """ sie._sie_share = True def parent_with_gens(self, parent, sie, gen_names, name, gens_syntax=None): r""" This method is used for parents with generators, to manage the \sage preparser generator syntax (like ``K.<x> = QQ[]``). The \method{_sage_input_} method of a parent class with generators should construct a :class:`SageInputExpression` for the parent, and then call this method with the parent itself, the constructed SIE, a sequence containing the names of the generators, and (optionally) another SIE to use if the \sage generator syntax is used; typically this will be the same as the first SIE except omitting a ``names`` parameter. EXAMPLES:: sage: from sage.misc.sage_input import SageInputBuilder sage: def test_setup(use_gens=True, preparse=True): ... sib = SageInputBuilder(preparse=preparse) ... gen_names=('foo', 'bar') ... parent = "some parent" ... normal_sie = sib.name('make_a_parent')(names=gen_names) ... if use_gens: ... gens_sie = sib.name('make_a_parent')() ... else: ... gens_sie = None ... name = 'the_thing' ... result = sib.parent_with_gens(parent, normal_sie, ... gen_names, name, ... gens_syntax=gens_sie) ... return sib, result sage: sib, par_sie = test_setup() sage: sib.result(par_sie) make_a_parent(names=('foo', 'bar')) sage: sib, par_sie = test_setup() sage: sib.result(sib(3) * sib.gen("some parent", 0)) the_thing.<foo,bar> = make_a_parent() 3*foo sage: sib, par_sie = test_setup(preparse=False) sage: sib.result(par_sie) make_a_parent(names=('foo', 'bar')) sage: sib, par_sie = test_setup(preparse=False) sage: sib.result(sib(3) * sib.gen("some parent", 0)) the_thing = make_a_parent(names=('foo', 'bar')) foo,bar = the_thing.gens() ZZ(3)*foo sage: sib, par_sie = test_setup(use_gens=False) sage: sib.result(par_sie) make_a_parent(names=('foo', 'bar')) sage: sib, par_sie = test_setup(use_gens=False) sage: sib.result(sib(3) * sib.gen("some parent", 0)) the_thing = make_a_parent(names=('foo', 'bar')) foo,bar = the_thing.gens() 3*foo sage: sib, par_sie = test_setup() sage: sib.result(par_sie - sib.gen("some parent", 1)) the_thing.<foo,bar> = make_a_parent() the_thing - bar """ v = SIE_gens_constructor(self, sie, gen_names, gens_syntax=gens_syntax) self.cache(parent, v, name) gens = [SIE_gen(self, v, n) for n in gen_names] self._parent_gens[parent] = gens v._sie_gens = gens return v def gen(self, parent, n=0): r""" Given a parent, returns a :class:`SageInputExpression` for the `n`-th (default 0) generator of the parent. EXAMPLES:: sage: from sage.misc.sage_input import SageInputBuilder sage: sib = SageInputBuilder() sage: sib.result(sib.gen(ZZ['y'])) R.<y> = ZZ[] y """ if not parent in self._parent_gens: self(parent) if not parent in self._parent_gens: raise ValueError("{} did not register generators for sage_input".format(parent)) gens = self._parent_gens[parent] if n > len(gens): raise ValueError("{} registered only {} generators for sage_input".format(parent, len(gens))) return gens[n] def prod(self, factors, simplify=False): r""" Given a sequence, returns a :class:`SageInputExpression` for the product of the elements. With ``simplify=True``, performs some simplifications first. If any element is formatted as a string ``'0'``, then that element is returned directly. If any element is formatted as a string ``'1'``, then it is removed from the sequence (unless it is the only element in the sequence). And any negations are removed from the elements and moved to the outside of the product. EXAMPLES:: sage: from sage.misc.sage_input import SageInputBuilder sage: sib = SageInputBuilder() sage: sib.result(sib.prod([-1, 0, 1, -2])) -1*0*1*-2 sage: sib = SageInputBuilder() sage: sib.result(sib.prod([-1, 0, 1, 2], simplify=True)) 0 sage: sib = SageInputBuilder() sage: sib.result(sib.prod([-1, 2, -3, -4], simplify=True)) -2*3*4 sage: sib = SageInputBuilder() sage: sib.result(sib.prod([-1, 1, -1, -1], simplify=True)) -1 sage: sib = SageInputBuilder() sage: sib.result(sib.prod([1, 1, 1], simplify=True)) 1 """ neg = False factors = [self(factor) for factor in factors] if simplify: i = 0 while i < len(factors): factor = factors[i] while isinstance(factor, SIE_unary) and factor._sie_op == '-': neg = not neg factor = factor._sie_operand factors[i] = factor if isinstance(factor, SIE_literal_stringrep) and factor._sie_value == '0': factors = [factor] neg = False break if isinstance(factor, SIE_literal_stringrep) and factor._sie_value == '1': factors[i:i+1] = [] else: i += 1 if len(factors) == 0: factors.append(SIE_literal_stringrep(self, '1')) prod = factors[0] for factor in factors[1:]: prod = prod * factor if neg: prod = -prod return prod def sum(self, terms, simplify=False): r""" Given a sequence, returns a :class:`SageInputExpression` for the product of the elements. With ``simplify=True``, performs some simplifications first. If any element is formatted as a string ``'0'``, then it is removed from the sequence (unless it is the only element in the sequence); and any instances of ``a + -b`` are changed to ``a - b``. EXAMPLES:: sage: from sage.misc.sage_input import SageInputBuilder sage: sib = SageInputBuilder() sage: sib.result(sib.sum([-1, 0, 1, 0, -1])) -1 + 0 + 1 + 0 + -1 sage: sib = SageInputBuilder() sage: sib.result(sib.sum([-1, 0, 1, 0, -1], simplify=True)) -1 + 1 - 1 sage: sib = SageInputBuilder() sage: sib.result(sib.sum([0, 0, 0], simplify=True)) 0 """ terms = [self(term) for term in terms] if simplify: i = 0 while i < len(terms): term = terms[i] if isinstance(term, SIE_literal_stringrep) and term._sie_value == '0': terms[i:i+1] = [] else: i += 1 if len(terms) == 0: terms.append(SIE_literal_stringrep(self, '0')) sum = terms[0] for term in terms[1:]: negate = False while simplify and isinstance(term, SIE_unary) and term._sie_op == '-': negate = not negate term = term._sie_operand if negate: sum = sum - term else: sum = sum + term return sum def result(self, e): r""" Given a :class:`SageInputExpression` constructed using ``self``, returns a tuple of a list of commands and an expression (and possibly a dictionary of local variables) suitable for :func:`sage_eval`. EXAMPLES:: sage: from sage.misc.sage_input import SageInputBuilder sage: sib = SageInputBuilder() sage: r = sib.result(sib(6) * sib(7)); r 6*7 sage: tuple(r) ('', '6*7') """ sif = SageInputFormatter() # Even if use_variable was called on e, don't automatically # use a variable for it. e._sie_request_use_var = False e._sie_prepare(sif) s = sif.format(e, 0) locals = self._locals if len(locals): return SageInputAnswer(sif._commands, sif.format(e, 0), locals) else: return SageInputAnswer(sif._commands, sif.format(e, 0)) # Python's precedence levels. Hand-transcribed from section 5.14 of # the Python reference manual. _prec_lambda = 2 _prec_or = 4 _prec_and = 6 _prec_not = 8 _prec_membership = 10 _prec_identity = 12 _prec_comparison = 14 _prec_bitor = 16 _prec_bitxor = 18 _prec_bitand = 20 _prec_shift = 22 _prec_addsub = 24 _prec_muldiv = 26 _prec_negate = 28 _prec_bitnot = 30 _prec_exponent = 32 _prec_attribute = 34 _prec_subscript = 36 _prec_slicing = 38 _prec_funcall = 40 _prec_atomic = 42 class SageInputExpression(object): r""" Subclasses of this class represent expressions for :func:`sage_input`. \sage classes should define a \method{_sage_input_} method, which will return an instance of :class:`SageInputExpression`, created using methods of :class:`SageInputBuilder`. To the extent possible, operations on :class:`SageInputExpression` objects construct a new :class:`SageInputExpression` representing that operation. That is, if ``a`` is a :class:`SageInputExpression`, then ``a + b`` constructs a :class:`SageInputExpression` representing this sum. This also works for attribute access, function calls, subscripts, etc. Since arbitrary attribute accesses might be used to construct a new attribute-access expression, all internal attributes and methods have names that begin with ``_sie_`` to reduce the chance of collisions. It is expected that instances of this class will not be directly created outside this module; instead, instances will be created using methods of :class:`SageInputBuilder` and :class:`SageInputExpression`. Values of type :class:`SageInputExpression` print in a fairly ugly way, that reveals the internal structure of the expression tree. """ def __init__(self, sib): r""" Initialize a :class:`SageInputExpression`. EXAMPLES:: sage: from sage.misc.sage_input import SageInputBuilder sage: sib = SageInputBuilder() sage: sie = sib(3) # indirect doctest sage: sie {atomic:3} sage: sie._sie_builder is sib True """ self._sie_refcount = 0 self._sie_builder = sib self._sie_context = None self._sie_preferred_varname = None self._sie_varname = None self._sie_request_use_var = False self._sie_use_var = False self._sie_requested_varname = False self._sie_commands = [] def _sie_is_simple(self): r""" Returns ``True`` if this :class:`SageInputExpression` is simple enough that duplicate uses are not worth caching. Normally this will be true if the expression represents a single token. EXAMPLES:: sage: from sage.misc.sage_input import SageInputBuilder sage: sib = SageInputBuilder() sage: sib.name('QQ')._sie_is_simple() True sage: sib(GF(2))._sie_is_simple() False """ return False def _sie_referenced(self): r""" Returns a list of the immediate subexpressions of this :class:`SageInputExpression`. EXAMPLES:: sage: from sage.misc.sage_input import SageInputBuilder sage: sib = SageInputBuilder() sage: len(sib(GF(2))._sie_referenced()) 2 sage: sib(5)._sie_referenced() [] """ return [] def _sie_prepare(self, sif): r""" We traverse the entire expression DAG to prepare for printing. Here, we notice nodes with more than one parent, and mark them to replace with a variable (rather than generating the value multiple times). EXAMPLES:: sage: from sage.misc.sage_input import SageInputBuilder, SageInputFormatter sage: sib = SageInputBuilder() sage: sif = SageInputFormatter() sage: pair = sib((GF(2), GF(2))) sage: single = sib(GF(2)) sage: single._sie_refcount 0 sage: single._sie_use_var False sage: sib((GF(2), GF(2)))._sie_prepare(sif) sage: single._sie_refcount 2 sage: single._sie_use_var True """ if self._sie_context is not sif: self._sie_context = sif self._sie_refcount = 0 self._sie_refcount += 1 if self._sie_request_use_var: self._sie_require_varname(sif) self._sie_use_var = True if not self._sie_is_simple(): if self._sie_refcount == 2: self._sie_require_varname(sif) self._sie_use_var = True if self._sie_refcount == 1: for r in self._sie_referenced(): r._sie_prepare(sif) for r in self._sie_commands: r._sie_prepare(sif) def _sie_require_varname(self, sif): r""" Mark this :class:`SageInputExpression` as requiring a variable name, and register it with a :class:`SageInputFormatter` (which will allocate a variable name at the end of the preparatory phase). EXAMPLES:: sage: from sage.misc.sage_input import SageInputBuilder, SageInputFormatter sage: sib = SageInputBuilder() sage: sif = SageInputFormatter() sage: sie = sib(3) sage: sie._sie_require_varname(sif) sage: sie._sie_requested_varname True """ if not self._sie_requested_varname: sif.register_name(self._sie_preferred_varname) self._sie_requested_varname = True self._sie_generated = False def _sie_get_varname(self, sif): r""" Get the variable name that the :class:`SageInputFormatter` allocated for this :class:`SageInputExpression`. EXAMPLES:: sage: from sage.misc.sage_input import SageInputBuilder, SageInputFormatter sage: sib = SageInputBuilder() sage: sif = SageInputFormatter() sage: sie = sib(3) sage: sie._sie_require_varname(sif) sage: sie._sie_get_varname(sif) 'si' """ if self._sie_varname is None: self._sie_varname = sif.get_name(self._sie_preferred_varname) return self._sie_varname def _sie_is_negation(self): r""" Test whether a :class:`SageInputExpression` is a negation. Despite the obscure name, this is intended to be a public method. See the documentation for \method{SIE_unary._sie_is_negation} for useful examples. EXAMPLES:: sage: from sage.misc.sage_input import SageInputBuilder, SageInputFormatter sage: sib = SageInputBuilder() sage: sie = sib.name('foo') sage: sie._sie_is_negation() False """ return False def __call__(self, *args, **kwargs): r""" Given a :class:`SageInputExpression`, build a new :class:`SageInputExpression` representing a function call node (with ``self`` as the function). EXAMPLES:: sage: from sage.misc.sage_input import SageInputBuilder sage: sib = SageInputBuilder() sage: sie = sib(3) sage: sie(4) {call: {atomic:3}({atomic:4})} """ args = [self._sie_builder(_) for _ in args] for k in kwargs: kwargs[k] = self._sie_builder(kwargs[k]) return SIE_call(self._sie_builder, self, args, kwargs) def __getitem__(self, key): r""" Given a :class:`SageInputExpression`, build a new :class:`SageInputExpression` representing a subscript expression (with ``self`` as the value being subscripted). Currently, slices are not supported. EXAMPLES:: sage: from sage.misc.sage_input import SageInputBuilder sage: sib = SageInputBuilder() sage: sie = sib(3) sage: sie[4] {subscr: {atomic:3}[{atomic:4}]} sage: sie[sib.name('x'), sib.name('y')] {subscr: {atomic:3}[{tuple: ({atomic:x}, {atomic:y})}]} """ skey = self._sie_builder(key) return SIE_subscript(self._sie_builder, self, skey) def __getattr__(self, attr): r""" Given a :class:`SageInputExpression`, build a new :class:`SageInputExpression` representing an attribute access. EXAMPLES:: sage: from sage.misc.sage_input import SageInputBuilder sage: sib = SageInputBuilder() sage: sie = sib.name('x') sage: sie.foo {getattr: {atomic:x}.foo} sage: sie.foo() {call: {getattr: {atomic:x}.foo}()} """ return SIE_getattr(self._sie_builder, self, attr) def _rich_repr_(self, display_manager, **kwds): """ Disable rich output. This is necessary because otherwise our :meth:`__getattr__` would be called. EXAMPLES:: sage: from sage.repl.rich_output import get_display_manager sage: from sage.misc.sage_input import SageInputBuilder sage: sib = SageInputBuilder() sage: sie = sib.name('x') sage: sie._rich_repr_(get_display_manager()) is None True """ return None def __pow__(self, other): r""" Compute an expression tree for ``self ** other``. EXAMPLES:: sage: from sage.misc.sage_input import SageInputBuilder sage: sib = SageInputBuilder() sage: sie = sib(3) sage: sie ^ 4 {binop:** {atomic:3} {atomic:4}} """ return self._sie_binop('**', other) def __mul__(self, other): r""" Compute an expression tree for ``self * other``. EXAMPLES:: sage: from sage.misc.sage_input import SageInputBuilder sage: sib = SageInputBuilder() sage: sie = sib(3) sage: sie * 4 {binop:* {atomic:3} {atomic:4}} """ return self._sie_binop('*', other) def __div__(self, other): r""" Compute an expression tree for ``self / other``. EXAMPLES:: sage: from sage.misc.sage_input import SageInputBuilder sage: sib = SageInputBuilder() sage: sie = sib(3) sage: sie / 4 {binop:/ {atomic:3} {atomic:4}} """ return self._sie_binop('/', other) def __add__(self, other): r""" Compute an expression tree for ``self + other``. EXAMPLES:: sage: from sage.misc.sage_input import SageInputBuilder sage: sib = SageInputBuilder() sage: sie = sib(3) sage: sie + 4 {binop:+ {atomic:3} {atomic:4}} """ return self._sie_binop('+', other) def __sub__(self, other): r""" Compute an expression tree for ``self - other``. EXAMPLES:: sage: from sage.misc.sage_input import SageInputBuilder sage: sib = SageInputBuilder() sage: sie = sib(3) sage: sie - 4 {binop:- {atomic:3} {atomic:4}} """ return self._sie_binop('-', other) def _sie_binop(self, op, other): r""" Compute an expression tree for ``self OP other``, where OP is a string representing a binary operator (such as '+' or '**'). EXAMPLES:: sage: from sage.misc.sage_input import SageInputBuilder sage: sib = SageInputBuilder() sage: v = sib.name('x')._sie_binop('%', sib.name('y')) sage: type(v) <class 'sage.misc.sage_input.SIE_binary'> sage: (v)._sie_op '%' sage: v {binop:% {atomic:x} {atomic:y}} """ return SIE_binary(self._sie_builder, op, self, self._sie_builder(other)) def __neg__(self): r""" Compute an expression tree for ``-self``. EXAMPLES:: sage: from sage.misc.sage_input import SageInputBuilder sage: sib = SageInputBuilder() sage: sie = sib(3) sage: -sie {unop:- {atomic:3}} """ return self._sie_unop('-') def __invert__(self): r""" Compute an expression tree for ``~self``. EXAMPLES:: sage: from sage.misc.sage_input import SageInputBuilder sage: sib = SageInputBuilder() sage: sie = sib(3) sage: ~sie {unop:~ {atomic:3}} """ return self._sie_unop('~') def __abs__(self): r""" Compute an expression tree for ``abs(self)``. EXAMPLES:: sage: from sage.misc.sage_input import SageInputBuilder sage: sib = SageInputBuilder() sage: sie = sib(3) sage: abs(sie) {call: {atomic:abs}({atomic:3})} """ return self._sie_builder.name('abs')(self) def _sie_unop(self, op): r""" Compute an expression tree for ``OP self``, where OP is a string representing a unary operator (such as '-' or '~'). EXAMPLES:: sage: from sage.misc.sage_input import SageInputBuilder sage: sib = SageInputBuilder() sage: sie = sib(3) sage: v = sie._sie_unop('~') sage: type(v) <class 'sage.misc.sage_input.SIE_unary'> sage: (v)._sie_op '~' sage: v {unop:~ {atomic:3}} """ return SIE_unary(self._sie_builder, op, self) def _sie_format(self, sif): r""" Return the formatted string value of this expression, and the precedence of the top-level operator in the expression. EXAMPLES: Actually, all of these are examples of the \method{_sie_format} method on subclasses of :class:`SageInputExpression`; :class:`SageInputExpression` itself is an abstract base class (that cannot be instantiated).:: sage: from sage.misc.sage_input import SageInputBuilder, SageInputFormatter sage: sib = SageInputBuilder() sage: sif = SageInputFormatter() sage: sie = sib(3) sage: for v in (sie, sie+7, sie/5): ... v._sie_prepare(sif) ... v._sie_format(sif) ('3', 42) ('3 + 7', 24) ('3/5', 26) sage: v = sib.assign(sib.name('foo').x, 3) sage: v._sie_prepare(sif) sage: v._sie_format(sif) Traceback (most recent call last): ... ValueError: Cannot format SIE_assign as expression """ raise NotImplementedError def _sie_format_statement(self, sif): r""" Return the formatted string value of this expression, when used as a statement. On most :class:`SageInputExpression`s, this forwards directly to the \method{_sie_format} method. However, on :class:`SageInputExpression`s that actually represent statements (such as :class:`SIE_assign`), this method has an implementation and \method{_sie_format} raises an error. (This is to prevent accidental use of :class:`SIE_assign` as a value.) EXAMPLES:: sage: from sage.misc.sage_input import SageInputBuilder, SageInputFormatter sage: sib = SageInputBuilder() sage: sif = SageInputFormatter() sage: v = sib(3) sage: v._sie_prepare(sif) sage: v._sie_format_statement(sif) '3' sage: v = sib.assign(sib.name('foo').x, 3) sage: v._sie_prepare(sif) sage: v._sie_format_statement(sif) 'foo.x = 3' """ result, prec = self._sie_format(sif) return result class SIE_literal(SageInputExpression): r""" An abstract base class for ``literals`` (basically, values which consist of a single token). EXAMPLES:: sage: from sage.misc.sage_input import SageInputBuilder, SIE_literal sage: sib = SageInputBuilder() sage: sie = sib(3) sage: sie {atomic:3} sage: isinstance(sie, SIE_literal) True """ def _sie_is_simple(self): r""" Report that :class:`SIE_literal` values are not worth replacing by variables (for ``common subexpression elimination``) even if they occur multiple times in an expression. EXAMPLES:: sage: from sage.misc.sage_input import SageInputBuilder sage: sib = SageInputBuilder() sage: sie = sib(3) sage: sie._sie_is_simple() True sage: sib.share(sie) sage: sie._sie_is_simple() False sage: sie._sie_share True """ # Perhaps this should actually look at the formatted length of self, # and sometimes return false? If some 50-digit integer occurs multiple # times in an expression, it might be better to do the replacement. return not self._sie_share class SIE_literal_stringrep(SIE_literal): r""" Values in this class are leaves in a :func:`sage_input` expression tree. Typically they represent a single token, and consist of the string representation of that token. They are used for integer, floating-point, and string literals, and for name expressions. EXAMPLES:: sage: from sage.misc.sage_input import SageInputBuilder, SIE_literal_stringrep sage: sib = SageInputBuilder() sage: isinstance(sib(3), SIE_literal_stringrep) True sage: isinstance(sib(3.14159, True), SIE_literal_stringrep) True sage: isinstance(sib.name('pi'), SIE_literal_stringrep) True sage: isinstance(sib(False), SIE_literal_stringrep) True sage: sib(False) {atomic:False} """ def __init__(self, sib, n): r""" Initialize a :class:`SIE_literal_stringrep` value. INPUT: - ``sib`` - a :class:`SageInputBuilder` - ``n`` - a string; the value to be printed for this expression EXAMPLES:: sage: from sage.misc.sage_input import SageInputBuilder sage: sib = SageInputBuilder() sage: sib(3) {atomic:3} sage: sib(3)._sie_value '3' """ super(SIE_literal_stringrep, self).__init__(sib) self._sie_value = str(n) self._sie_share = False def __repr__(self): r""" Returns a string representing this :class:`SIE_literal_stringrep` value. EXAMPLES:: sage: from sage.misc.sage_input import SageInputBuilder sage: sib = SageInputBuilder() sage: sib(3) {atomic:3} sage: sib("\n") {atomic:'\n'} """ return "{atomic:%s}" % self._sie_value def _sie_format(self, sif): r""" Return the formatted string value of this expression, and an indication that it is ``atomic`` (never needs to be parenthesized). EXAMPLES:: sage: from sage.misc.sage_input import SageInputBuilder, SageInputFormatter sage: sib = SageInputBuilder() sage: sif = SageInputFormatter() sage: sie = sib(True) sage: sie._sie_prepare(sif) sage: sie._sie_format(sif) ('True', 42) """ return self._sie_value, _prec_atomic class SIE_call(SageInputExpression): r""" This class represents a function-call node in a :func:`sage_input` expression tree. EXAMPLES:: sage: from sage.misc.sage_input import SageInputBuilder sage: sib = SageInputBuilder() sage: sie = sib.name('GF') sage: sie(49) {call: {atomic:GF}({atomic:49})} """ def __init__(self, sib, func, args, kwargs): r""" Initialize an instance of :class:`SIE_call`. INPUT: - ``sib`` - a :class:`SageInputBuilder` - ``func`` - a :class:`SageInputExpression` representing a function - ``args`` - a list of :class:`SageInputExpression`s representing the positional arguments - ``kwargs`` -- a dictionary mapping strings to :class:`SageInputExpression`s representing the keyword arguments EXAMPLES:: sage: from sage.misc.sage_input import SageInputBuilder sage: sib = SageInputBuilder() sage: sie = sib('RealField')(53, rnd='RNDZ') """ super(SIE_call, self).__init__(sib) self._sie_func = func self._sie_args = args self._sie_kwargs = kwargs def __repr__(self): r""" Returns a string representing this :class:`SIE_call` value. EXAMPLES:: sage: from sage.misc.sage_input import SageInputBuilder sage: sib = SageInputBuilder() sage: sie = sib('RealField')(53, rnd='RNDZ') """ func = repr(self._sie_func) args = [repr(arg) for arg in self._sie_args] kwargs = sorted(k + '=' + repr(v) for k, v in self._sie_kwargs.iteritems()) all_args = ', '.join(args + kwargs) return "{call: %s(%s)}" % (func, all_args) def _sie_referenced(self): r""" Returns a list of the immediate subexpressions of this :class:`SIE_call`. EXAMPLES:: sage: from sage.misc.sage_input import SageInputBuilder sage: sib = SageInputBuilder() sage: sie = sib('RealField')(53, rnd='RNDZ') sage: sie._sie_referenced() [{atomic:53}, {atomic:'RealField'}, {atomic:'RNDZ'}] """ refs = self._sie_args[:] refs.append(self._sie_func) refs.extend(self._sie_kwargs.itervalues()) return refs def _sie_format(self, sif): r""" Return the formatted string value of this expression, and an indication that it is a function call. EXAMPLES:: sage: from sage.misc.sage_input import SageInputBuilder, SageInputFormatter sage: sib = SageInputBuilder() sage: sif = SageInputFormatter() sage: sie = sib.name('RealField')(53, rnd='RNDZ') sage: sie._sie_prepare(sif) sage: sie._sie_format(sif) ("RealField(53, rnd='RNDZ')", 40) """ func = sif.format(self._sie_func, _prec_attribute) args = [sif.format(arg, 0) for arg in self._sie_args] kwargs = sorted(k + '=' + sif.format(v, 0) for k, v in self._sie_kwargs.iteritems()) all_args = ', '.join(args + kwargs) return ('%s(%s)' % (func, all_args), _prec_funcall) class SIE_subscript(SageInputExpression): r""" This class represents a subscript node in a :func:`sage_input` expression tree. EXAMPLES:: sage: from sage.misc.sage_input import SageInputBuilder sage: sib = SageInputBuilder() sage: sie = sib.name('QQ')['x,y'] sage: sie {subscr: {atomic:QQ}[{atomic:'x,y'}]} """ def __init__(self, sib, coll, key): r""" Initialize an instance of :class:`SIE_subscript`. INPUT: - ``sib`` -- a :class:`SageInputBuilder` - ``coll`` -- a :class:`SageInputExpression` representing a collection - ``key`` -- a :class:`SageInputExpression` representing the subscript/key As a special case, ``key`` may be ``None``; this represents an empty subscript. This is not legal Python syntax, but it is legal in the \sage preparser in examples like ``K.<x> = QQ[]``. EXAMPLES:: sage: from sage.misc.sage_input import SageInputBuilder sage: sib = SageInputBuilder() sage: sib.name('QQ')['x'] {subscr: {atomic:QQ}[{atomic:'x'}]} sage: sib.name('x')[1,2,3] {subscr: {atomic:x}[{tuple: ({atomic:1}, {atomic:2}, {atomic:3})}]} sage: sib.empty_subscript(sib.name('QQ')) {subscr: {atomic:QQ}[]} """ super(SIE_subscript, self).__init__(sib) self._sie_coll = coll self._sie_key = key def __repr__(self): r""" Returns a string representing this :class:`SIE_subscript` value. EXAMPLES:: sage: from sage.misc.sage_input import SageInputBuilder sage: sib = SageInputBuilder() sage: sib.name('ZZ')['x,y'] {subscr: {atomic:ZZ}[{atomic:'x,y'}]} """ coll = repr(self._sie_coll) if self._sie_key is None: key = '' else: key = repr(self._sie_key) return "{subscr: %s[%s]}" % (coll, key) def _sie_referenced(self): r""" Returns a list of the immediate subexpressions of this :class:`SIE_subscript`. EXAMPLES:: sage: from sage.misc.sage_input import SageInputBuilder sage: sib = SageInputBuilder() sage: sie = sib.name('GF')(5)['x,y'] sage: sie._sie_referenced() [{call: {atomic:GF}({atomic:5})}, {atomic:'x,y'}] """ refs = [self._sie_coll] if self._sie_key is not None: refs.append(self._sie_key) return refs def _sie_format(self, sif): r""" Return the formatted string value of this expression, and an indication that it is a subscript. EXAMPLES:: sage: from sage.misc.sage_input import SageInputBuilder, SageInputFormatter sage: sib = SageInputBuilder() sage: sif = SageInputFormatter() sage: sie = sib.name('QQ')['x'] sage: sie._sie_prepare(sif) sage: sie._sie_format(sif) ("QQ['x']", 36) """ coll = sif.format(self._sie_coll, _prec_attribute) if self._sie_key is None: key = '' else: key = sif.format(self._sie_key, 0) return '%s[%s]' % (coll, key), _prec_subscript class SIE_getattr(SageInputExpression): r""" This class represents a getattr node in a :func:`sage_input` expression tree. EXAMPLES:: sage: from sage.misc.sage_input import SageInputBuilder sage: sib = SageInputBuilder() sage: sie = sib.name('CC').gen() sage: sie {call: {getattr: {atomic:CC}.gen}()} """ def __init__(self, sib, obj, attr): r""" Initialize an instance of :class:`SIE_getattr`. INPUT: - ``sib`` - a :class:`SageInputBuilder` - ``obj`` - a :class:`SageInputExpression` representing an object - ``attr`` - a string; the attribute name EXAMPLES:: sage: from sage.misc.sage_input import SageInputBuilder sage: sib = SageInputBuilder() sage: sib.name('QQbar').zeta(5) {call: {getattr: {atomic:QQbar}.zeta}({atomic:5})} """ super(SIE_getattr, self).__init__(sib) self._sie_obj = obj self._sie_attr = attr def __repr__(self): r""" Returns a string representing this :class:`SIE_getattr` value. EXAMPLES:: sage: from sage.misc.sage_input import SageInputBuilder sage: sib = SageInputBuilder() sage: sib.name('AA')(3).sqrt() {call: {getattr: {call: {atomic:AA}({atomic:3})}.sqrt}()} """ obj = repr(self._sie_obj) return "{getattr: %s.%s}" % (obj, self._sie_attr) def _sie_referenced(self): r""" Returns a list of the immediate subexpressions of this :class:`SIE_subscript`. EXAMPLES:: sage: from sage.misc.sage_input import SageInputBuilder sage: sib = SageInputBuilder() sage: sie = sib.name('CDF').gen sage: sie._sie_referenced() [{atomic:CDF}] """ return [self._sie_obj] def _sie_format(self, sif): r""" Return the formatted string value of this expression, and an indication that it is an attribute reference. EXAMPLES:: sage: from sage.misc.sage_input import SageInputBuilder, SageInputFormatter sage: sib = SageInputBuilder() sage: sif = SageInputFormatter() sage: sie = sib.name('AA').common_polynomial sage: sie._sie_prepare(sif) sage: sie._sie_format(sif) ('AA.common_polynomial', 34) """ obj = sif.format(self._sie_obj, _prec_exponent) return '%s.%s' % (obj, self._sie_attr), _prec_attribute class SIE_tuple(SageInputExpression): r""" This class represents a tuple or list node in a :func:`sage_input` expression tree. EXAMPLES:: sage: from sage.misc.sage_input import SageInputBuilder sage: sib = SageInputBuilder() sage: sib((1, 'howdy')) {tuple: ({atomic:1}, {atomic:'howdy'})} sage: sib(["lists"]) {list: ({atomic:'lists'})} """ def __init__(self, sib, values, is_list): r""" Initialize an instance of :class:`SIE_tuple`. INPUT: - ``sib`` -- a :class:`SageInputBuilder` - ``values`` -- a list of :class:`SageInputExpression`s representing the elements of this tuple - ``is_list`` -- is True if this class represents a list, False for a tuple EXAMPLES:: sage: from sage.misc.sage_input import SageInputBuilder sage: sib = SageInputBuilder() sage: sib((3.5, -2)) {tuple: ({atomic:3.5}, {unop:- {atomic:2}})} sage: sib(["Hello", "world"]) {list: ({atomic:'Hello'}, {atomic:'world'})} """ super(SIE_tuple, self).__init__(sib) self._sie_values = values self._sie_is_list = is_list def __repr__(self): r""" Returns a string representing this :class:`SIE_tuple` value. EXAMPLES:: sage: from sage.misc.sage_input import SageInputBuilder sage: sib = SageInputBuilder() sage: sib((2,3,5)) {tuple: ({atomic:2}, {atomic:3}, {atomic:5})} sage: sib(["Hello", "world"]) {list: ({atomic:'Hello'}, {atomic:'world'})} """ kind = "list" if self._sie_is_list else "tuple" return "{%s: (%s)}" % \ (kind, ', '.join([repr(v) for v in self._sie_values])) def _sie_referenced(self): r""" Returns a list of the immediate subexpressions of this :class:`SIE_tuple`. EXAMPLES:: sage: from sage.misc.sage_input import SageInputBuilder sage: sib = SageInputBuilder() sage: sie = sib((ZZ, GF(5))) sage: sie._sie_referenced() [{atomic:ZZ}, {call: {atomic:GF}({atomic:5})}] """ return self._sie_values def _sie_format(self, sif): r""" Return the formatted string value of this tuple or list, and an indication that it is atomic (never needs to be parenthesized). EXAMPLES:: sage: from sage.misc.sage_input import SageInputBuilder, SageInputFormatter sage: sib = SageInputBuilder() sage: sif = SageInputFormatter() sage: for v in ((), (1,), (1,2), [], [1], [1,2]): ... sie = sib(v) ... sie._sie_prepare(sif) ... sie._sie_format(sif) ('()', 42) ('(1,)', 42) ('(1, 2)', 42) ('[]', 42) ('[1]', 42) ('[1, 2]', 42) """ values = [sif.format(val, 0) for val in self._sie_values] if self._sie_is_list: return '[%s]' % ', '.join(values), _prec_atomic else: if len(values) == 1: return '(%s,)' % values[0], _prec_atomic else: return '(%s)' % ', '.join(values), _prec_atomic class SIE_dict(SageInputExpression): r""" This class represents a dict node in a :func:`sage_input` expression tree. EXAMPLES:: sage: from sage.misc.sage_input import SageInputBuilder sage: sib = SageInputBuilder() sage: sib.dict([('TeX', RR(pi)), ('Metafont', RR(e))]) {dict: {{atomic:'TeX'}:{call: {atomic:RR}({atomic:3.1415926535897931})}, {atomic:'Metafont'}:{call: {atomic:RR}({atomic:2.7182818284590451})}}} sage: sib.dict({-40:-40, 0:32, 100:212}) {dict: {{unop:- {atomic:40}}:{unop:- {atomic:40}}, {atomic:0}:{atomic:32}, {atomic:100}:{atomic:212}}} """ def __init__(self, sib, entries): r""" Initialize an instance of :class:`SIE_dict`. INPUT: - ``sib`` -- a :class:`SageInputBuilder` - ``entries`` -- a list of pairs of :class:`SageInputExpression`s representing the entries of this dict EXAMPLES:: sage: from sage.misc.sage_input import SageInputBuilder sage: sib = SageInputBuilder() sage: sib.dict({'us':'good', 'them':'bad'}) {dict: {{atomic:'them'}:{atomic:'bad'}, {atomic:'us'}:{atomic:'good'}}} sage: sib.dict([(10, 'PS2'), (12, 'PS2'), (13, 'PS3')]) {dict: {{atomic:10}:{atomic:'PS2'}, {atomic:12}:{atomic:'PS2'}, {atomic:13}:{atomic:'PS3'}}} """ super(SIE_dict, self).__init__(sib) self._sie_entries = entries def __repr__(self): r""" Returns a string representing this :class:`SIE_dict` value. EXAMPLES:: sage: from sage.misc.sage_input import SageInputBuilder sage: sib = SageInputBuilder() sage: sib.dict({'keaton':'general', 'chan':'master'}) {dict: {{atomic:'keaton'}:{atomic:'general'}, {atomic:'chan'}:{atomic:'master'}}} """ return "{dict: {%s}}" % \ ', '.join([repr(key) + ':' + repr(val) for key,val in self._sie_entries]) def _sie_referenced(self): r""" Returns a list of the immediate subexpressions of this :class:`SIE_dict`. EXAMPLES:: sage: from sage.misc.sage_input import SageInputBuilder sage: sib = SageInputBuilder() sage: sie = sib.dict({1:'beguilement', 2:'legacy', 3:'passage'}) sage: sie._sie_referenced() [{atomic:1}, {atomic:2}, {atomic:3}, {atomic:'beguilement'}, {atomic:'legacy'}, {atomic:'passage'}] """ return [k for k,v in self._sie_entries] + [v for k,v in self._sie_entries] def _sie_format(self, sif): r""" Return the formatted string value of this dict, and an indication that it is atomic (never needs to be parenthesized). EXAMPLES:: sage: from sage.misc.sage_input import SageInputBuilder, SageInputFormatter sage: sib = SageInputBuilder() sage: sif = SageInputFormatter() sage: sie = sib.dict({'carnivores':1, 'thinking':2, 'triumph':3}) sage: sie._sie_prepare(sif) sage: sie._sie_format(sif) ("{'carnivores':1, 'thinking':2, 'triumph':3}", 42) """ return "{%s}" %\ ', '.join(sif.format(k, 0)+':'+sif.format(v, 0) for k,v in self._sie_entries), _prec_atomic class SIE_binary(SageInputExpression): r""" This class represents an arithmetic expression with a binary operator and its two arguments, in a :func:`sage_input` expression tree. EXAMPLES:: sage: from sage.misc.sage_input import SageInputBuilder sage: sib = SageInputBuilder() sage: sib(3)+5 {binop:+ {atomic:3} {atomic:5}} """ def __init__(self, sib, op, lhs, rhs): r""" Initialize an instance of :class:`SIE_binary`. INPUT: - ``sib`` - a :class:`SageInputBuilder` - ``op`` - a string representing a binary operator, such as '*' or '%' - ``lhs`` - a :class:`SageInputExpression` - ``rhs`` - a :class:`SageInputExpression` EXAMPLES:: sage: from sage.misc.sage_input import SageInputBuilder sage: sib = SageInputBuilder() sage: sib(3)*5 {binop:* {atomic:3} {atomic:5}} """ super(SIE_binary, self).__init__(sib) self._sie_op = op self._sie_operands = (lhs, rhs) def __repr__(self): r""" Returns a string representing this :class:`SIE_binary` value. EXAMPLES:: sage: from sage.misc.sage_input import SageInputBuilder sage: sib = SageInputBuilder() sage: sib(7)/9 {binop:/ {atomic:7} {atomic:9}} """ return "{binop:%s %s %s}" % (self._sie_op, repr(self._sie_operands[0]), repr(self._sie_operands[1])) def _sie_referenced(self): r""" Returns a tuple of the immediate subexpressions of this :class:`SIE_binary`. EXAMPLES:: sage: from sage.misc.sage_input import SageInputBuilder sage: sib = SageInputBuilder() sage: sie = sib.name('x') + 5 sage: sie._sie_referenced() ({atomic:x}, {atomic:5}) """ return self._sie_operands def _sie_format(self, sif): r""" Return the formatted string value of this expression, and the precedence of the top-level operator in the expression. EXAMPLES:: sage: from sage.misc.sage_input import SageInputBuilder, SageInputFormatter sage: sib = SageInputBuilder() sage: sif = SageInputFormatter() sage: x = sib.name('x') sage: y = sib.name('y') sage: for v in (x+y, x*y, x**y): ... v._sie_prepare(sif) ... v._sie_format(sif) ('x + y', 24) ('x*y', 26) ('x^y', 32) Note that the printing for $x^y$ varies depending on whether the preparser is enabled.:: sage: sibnp = SageInputBuilder(preparse=False) sage: sif = SageInputFormatter() sage: v = x**y sage: v._sie_prepare(sif) sage: v._sie_format(sif) ('x^y', 32) TESTS:: sage: x = sib.name('x') sage: y = sib.name('y') sage: z = sib.name('z') sage: sib.result((x+y)+z) x + y + z sage: sib.result(x+(y+z)) x + (y + z) sage: sib.result((x*y)*z) x*y*z sage: sib.result(x*(y*z)) x*(y*z) sage: sib.result(x+(y*z)) x + y*z sage: sib.result((x+y)*z) (x + y)*z sage: sib.result((x^y)^z) (x^y)^z sage: sib.result(x^(y^z)) x^y^z """ op = self._sie_op fop = op if op == '**': lhs = sif.format(self._sie_operands[0], _prec_exponent+1) rhs = sif.format(self._sie_operands[1], _prec_exponent) if self._sie_builder.preparse(): return '%s^%s' % (lhs, rhs), _prec_exponent else: return '%s**%s' % (lhs, rhs), _prec_exponent if op == '*': prec = _prec_muldiv elif op == '/': prec = _prec_muldiv elif op == '+': fop = ' + ' prec = _prec_addsub elif op == '-': fop = ' - ' prec = _prec_addsub else: raise ValueError('Unhandled op {} in SIE_binary'.format(op)) lhs = sif.format(self._sie_operands[0], prec) rhs = sif.format(self._sie_operands[1], prec+1) return '%s%s%s' % (lhs, fop, rhs), prec class SIE_unary(SageInputExpression): r""" This class represents an arithmetic expression with a unary operator and its argument, in a :func:`sage_input` expression tree. EXAMPLES:: sage: from sage.misc.sage_input import SageInputBuilder sage: sib = SageInputBuilder() sage: -sib(256) {unop:- {atomic:256}} """ def __init__(self, sib, op, operand): r""" Initialize an instance of :class:`SIE_unary`. INPUT: - ``sib`` - a :class:`SageInputBuilder` - ``op`` - a string representing a unary operator, such as '-' - ``operand`` -- a :class:`SageInputExpression` EXAMPLES:: sage: from sage.misc.sage_input import SageInputBuilder sage: sib = SageInputBuilder() sage: -sib(3) {unop:- {atomic:3}} """ super(SIE_unary, self).__init__(sib) self._sie_op = op self._sie_operand = operand def __repr__(self): r""" Returns a string representing this :class:`SIE_unary` value. EXAMPLES:: sage: from sage.misc.sage_input import SageInputBuilder sage: sib = SageInputBuilder() sage: -sib(15) {unop:- {atomic:15}} """ return "{unop:%s %s}" % (self._sie_op, repr(self._sie_operand)) def _sie_referenced(self): r""" Returns a list of the immediate subexpressions of this :class:`SIE_unary`. EXAMPLES:: sage: from sage.misc.sage_input import SageInputBuilder sage: sib = SageInputBuilder() sage: sie = -sib.name('x') sage: sie._sie_referenced() [{atomic:x}] """ return [self._sie_operand] def _sie_format(self, sif): r""" Return the formatted string value of this expression, and the precedence of the top-level operator in the expression. EXAMPLES:: sage: from sage.misc.sage_input import SageInputBuilder, SageInputFormatter sage: sib = SageInputBuilder() sage: sif = SageInputFormatter() sage: x = sib.name('x') sage: v = -x sage: v._sie_prepare(sif) sage: v._sie_format(sif) ('-x', 28) sage: v = ~x sage: v._sie_prepare(sif) sage: v._sie_format(sif) ('~x', 30) TESTS:: sage: x = sib.name('x') sage: y = sib.name('y') sage: sib.result((-x)+y) -x + y sage: sib.result(x+(-y)) x + -y sage: sib.result(-(x+y)) -(x + y) sage: sib.result(-(-x)) --x sage: sib.result(x-(-y)) x - -y We assume that -(x*y) is always equal to (-x)*y. Using this assumption, we print -(x*y) as -x*y, which parses as (-x)*y.:: sage: sib.result(-(x*y)) -x*y sage: sib.result((-x)*y) -x*y sage: sib.result(x*(-y)) x*-y """ op = self._sie_op fop = op rprec = None if op == '-': # We print -(a*b) as -a*b, even though that will parse as # (-a)*b. prec = _prec_muldiv rprec = _prec_negate elif op == '~': prec = _prec_bitnot else: raise ValueError('Unhandled op {} in SIE_unary'.format(op)) if rprec is None: rprec = prec return '%s%s' % (fop, sif.format(self._sie_operand, prec)), rprec def _sie_is_negation(self): r""" Test whether a :class:`SageInputExpression` is a negation. Despite the obscure name, this is intended to be a public method. This is used in the \method{_sage_input_} method for :class:`ComplexNumber`, so that ``sage_input(CC(-3))`` will produce ``-CC(3)`` instead of ``CC(-3)``. (This is preferred so that you get ``x - CC(3)`` instead of ``x + CC(-3)``.) EXAMPLES:: sage: from sage.misc.sage_input import SageInputBuilder, SageInputFormatter sage: sib = SageInputBuilder() sage: x = sib.name('x') sage: v = -x sage: def mk_CC(b): ... if b._sie_is_negation(): ... return -sib.name('CC')(b._sie_operand) ... else: ... return sib.name('CC')(b) sage: mk_CC(x) {call: {atomic:CC}({atomic:x})} sage: mk_CC(v) {unop:- {call: {atomic:CC}({atomic:x})}} """ return self._sie_op == '-' class SIE_gens_constructor(SageInputExpression): r""" This class represents an expression that can create a \sage parent with named generators, optionally using the \sage preparser generators syntax (like ``K.<x> = QQ[]``). EXAMPLES:: sage: from sage.misc.sage_input import SageInputBuilder sage: sib = SageInputBuilder() sage: qq = sib.name('QQ') sage: sib.parent_with_gens("some parent", qq['x'], ... ('x',), 'QQx', ... gens_syntax=sib.empty_subscript(qq)) {constr_parent: {subscr: {atomic:QQ}[{atomic:'x'}]} with gens: ('x',)} """ def __init__(self, sib, constr, gen_names, gens_syntax=None): r""" Initialize an instance of :class:`SIE_gens_constructor`. INPUT: - ``sib`` - a :class:`SageInputBuilder` - ``constr`` - a :class:`SageInputExpression` for constructing this parent ``normally`` - ``gen_names`` - a tuple of generator names - ``gens_syntax`` -- an optional :class:`SageInputExpression` for constructing this parent using the \sage preparser generators syntax EXAMPLES:: sage: from sage.misc.sage_input import SageInputBuilder sage: sib = SageInputBuilder() sage: qq = sib.name('QQ') sage: sib.parent_with_gens("some parent", qq['x'], ... ('x',), 'QQx', ... gens_syntax=sib.empty_subscript(qq)) {constr_parent: {subscr: {atomic:QQ}[{atomic:'x'}]} with gens: ('x',)} """ super(SIE_gens_constructor, self).__init__(sib) self._sie_constr = constr self._sie_gen_names = gen_names self._sie_gens = None # will be overwritten from .parent_with_gens() self._sie_gens_constr = gens_syntax self._sie_assign_gens = False self._sie_generated = False def __repr__(self): r""" Returns a string representing this :class:`SIE_gens_constructor` value. EXAMPLES:: sage: from sage.misc.sage_input import SageInputBuilder sage: sib = SageInputBuilder() sage: qq = sib.name('QQ') sage: sib.parent_with_gens("some parent", qq['x'], ... ('x',), 'QQx', ... gens_syntax=sib.empty_subscript(qq)) {constr_parent: {subscr: {atomic:QQ}[{atomic:'x'}]} with gens: ('x',)} """ return "{constr_parent: %s with gens: %s}" % (repr(self._sie_constr), self._sie_gen_names) def _sie_referenced(self): r""" Returns a list of the immediate subexpressions of this :class:`SIE_gens_constructor`. EXAMPLES:: sage: from sage.misc.sage_input import SageInputBuilder sage: sib = SageInputBuilder() sage: qq = sib.name('QQ') sage: gc = sib.parent_with_gens("some parent", qq['x'], ... ('x',), 'QQx', ... gens_syntax=sib.empty_subscript(qq)) sage: gc._sie_referenced() [{subscr: {atomic:QQ}[{atomic:'x'}]}] """ # This is used to determine if some expressions should be replaced # by variables (if the expression has more than one parent in # the expression DAG). We assume that all expressions in # self._sie_gens_constr also occur in self._sie_constr. return [self._sie_constr] def _sie_gens_referenced(self, sif): r""" Mark that at least one of the generators in this :class:`SIE_gens_constructor` is used. (This means we will actually construct all of the generators.) EXAMPLES:: sage: from sage.misc.sage_input import SageInputBuilder, SageInputFormatter sage: sib = SageInputBuilder() sage: sif = SageInputFormatter() sage: qq = sib.name('QQ') sage: gc = sib.parent_with_gens("some parent", qq['x'], ... ('x',), 'QQx', ... gens_syntax=sib.empty_subscript(qq)) sage: gc._sie_assign_gens False sage: gc._sie_gens_referenced(sif) sage: gc._sie_assign_gens True """ self._sie_assign_gens = True self._sie_require_varname(sif) for gen in self._sie_gens: gen._sie_require_varname(sif) def _sie_add_command(self, sif): r""" Build commands to construct this parent and (if necessary) its associated generators. EXAMPLES:: sage: from sage.misc.sage_input import SageInputBuilder, SageInputFormatter sage: sib = SageInputBuilder() sage: sif = SageInputFormatter() sage: qq = sib.name('QQ') sage: gc = sib.parent_with_gens("some parent", qq['x'], ... ('x',), 'QQx', ... gens_syntax=sib.empty_subscript(qq)) sage: gc._sie_gens_referenced(sif) sage: gc._sie_prepare(sif) sage: gc._sie_add_command(sif) sage: sif._commands 'QQx.<x> = QQ[]\n' TESTS: There are several tricky cases here. We prefer the \sage preparser generators syntax:: sage: sage_input(polygen(ZZ)) R.<x> = ZZ[] x But of course we can't use that without the preparser:: sage: sage_input(polygen(ZZ), preparse=False) R = ZZ['x'] x = R.gen() x We also can't use the preparser syntax if there is a conflict between generator names. For example, this works:: sage: sage_input((polygen(ZZ), polygen(GF(17), 'y'))) R1.<x> = ZZ[] R2.<y> = GF(17)[] (x, y) but this can't use the preparser syntax.:: sage: sage_input((polygen(ZZ), polygen(GF(17)))) R1 = ZZ['x'] x1 = R1.gen() R2 = GF(17)['x'] x2 = R2.gen() (x1, x2) If we never use the generators, then we don't bother with the preparser syntax.:: sage: sage_input((ZZ['x'], ZZ['x'], GF(17)['y'])) R = ZZ['x'] (R, R, GF(17)['y']) """ if not self._sie_generated: if self._sie_builder.preparse() and \ self._sie_gens_constr is not None and \ all(g._sie_got_preferred(sif) for g in self._sie_gens): s, _ = self._sie_gens_constr._sie_format(sif) sif._commands += '%s.<%s> = %s\n' % (self._sie_get_varname(sif), ','.join(self._sie_gen_names), s) else: s, _ = self._sie_constr._sie_format(sif) sif._commands += '%s = %s\n' % (self._sie_get_varname(sif), s) if self._sie_assign_gens: if len(self._sie_gens) == 1: sif._commands += '%s = %s.gen()\n' % (self._sie_gens[0]._sie_get_varname(sif), self._sie_get_varname(sif)) else: sif._commands += '%s = %s.gens()\n' % (','.join([g._sie_get_varname(sif) for g in self._sie_gens]), self._sie_get_varname(sif)) self._sie_generated = True def _sie_format(self, sif): r""" Return the formatted string value of this parent-construction expression, and its precedence. As a side effect, if the generators of this parent are used, this adds commands to assign the generators to names. EXAMPLES:: sage: from sage.misc.sage_input import SageInputBuilder, SageInputFormatter sage: sib = SageInputBuilder() sage: sif = SageInputFormatter() sage: qq = sib.name('QQ') sage: gc = sib.parent_with_gens("some parent", qq['x'], ... ('x',), 'QQx', ... gens_syntax=sib.empty_subscript(qq)) sage: gc._sie_gens_referenced(sif) sage: gc._sie_prepare(sif) sage: gc._sie_format(sif) ('QQx', 42) sage: sif._commands 'QQx.<x> = QQ[]\n' """ if self._sie_assign_gens: self._sie_add_command(sif) return self._sie_get_varname(sif), _prec_atomic return self._sie_constr._sie_format(sif) class SIE_gen(SageInputExpression): r""" This class represents a named generator of a parent with named generators. EXAMPLES:: sage: from sage.misc.sage_input import SageInputBuilder sage: sib = SageInputBuilder() sage: sib.gen(ZZ['x']) {gen:x {constr_parent: {subscr: {atomic:ZZ}[{atomic:'x'}]} with gens: ('x',)}} """ def __init__(self, sib, parent, name): r""" Initializes an instance of :class:`SIE_gen`. INPUT: - ``sib`` - a :class:`SageInputBuilder` - ``parent`` - a :class:`SIE_gens_constructor` - ``name`` - a string with the name of this generator EXAMPLES:: sage: from sage.misc.sage_input import SageInputBuilder sage: sib = SageInputBuilder() sage: sib.gen(ZZ['x']) # indirect doctest {gen:x {constr_parent: {subscr: {atomic:ZZ}[{atomic:'x'}]} with gens: ('x',)}} """ super(SIE_gen, self).__init__(sib) self._sie_parent = parent self._sie_preferred_varname = name def __repr__(self): r""" Returns a string representing this :class:`SIE_gen` value. EXAMPLES:: sage: from sage.misc.sage_input import SageInputBuilder sage: sib = SageInputBuilder() sage: sib.gen(ZZ['x']) # indirect doctest {gen:x {constr_parent: {subscr: {atomic:ZZ}[{atomic:'x'}]} with gens: ('x',)}} """ return "{gen:%s %s}" % (self._sie_preferred_varname, repr(self._sie_parent)) def _sie_is_simple(self): r""" Report that :class:`SIE_gen` values are single tokens. EXAMPLES:: sage: from sage.misc.sage_input import SageInputBuilder sage: sib = SageInputBuilder() sage: sib.gen(ZZ['x'])._sie_is_simple() True """ return True def _sie_prepare(self, sif): r""" We override the \method{_sie_prepare} method from :class:`SageInputExpression` to additionally mark the parent of this generator that the generator names must be assigned. EXAMPLES:: sage: from sage.misc.sage_input import SageInputBuilder, SageInputFormatter sage: sib = SageInputBuilder() sage: sif = SageInputFormatter() sage: sie = sib.gen(GF(13)['z']) sage: sie._sie_parent._sie_assign_gens False sage: sie._sie_prepare(sif) sage: sie._sie_parent._sie_assign_gens True """ super(SIE_gen, self)._sie_prepare(sif) self._sie_parent._sie_gens_referenced(sif) def _sie_format(self, sif): r""" Return the formatted string value of this named generator, and an indication that it is atomic. As a side effect, this generates commands to assign the generators of the parent to variables. EXAMPLES:: sage: from sage.misc.sage_input import SageInputBuilder, SageInputFormatter sage: sib = SageInputBuilder() sage: sif = SageInputFormatter() sage: sie = sib.gen(GF(41)['x']) sage: sie._sie_prepare(sif) sage: sie._sie_format(sif) ('x', 42) sage: sif._commands 'R.<x> = GF(41)[]\n' """ self._sie_parent._sie_add_command(sif) return self._sie_get_varname(sif), _prec_atomic def _sie_got_preferred(self, sif): r""" Check whether the :class:`SageInputFormatter` assigned us a variable name which is the same as the name of the generator name. EXAMPLES:: sage: from sage.misc.sage_input import SageInputBuilder, SageInputFormatter First we verify that if we use two generators with different names, then they get their preferred names.:: sage: sib = SageInputBuilder() sage: sif = SageInputFormatter() sage: v = sib.gen(GF(2)['x']); w = sib.gen(GF(3)['y']) sage: v._sie_prepare(sif); w._sie_prepare(sif) sage: v._sie_got_preferred(sif) True sage: w._sie_got_preferred(sif) True Now, we repeat the experiment, except that the generators now have the same names. In this case, the :class:`SageInputFormatter` will not use the generator name as the variable name, because of this conflict.:: sage: sib = SageInputBuilder() sage: sif = SageInputFormatter() sage: v = sib.gen(GF(2)['x']); w = sib.gen(GF(3)['x']) sage: v._sie_prepare(sif); w._sie_prepare(sif) sage: v._sie_got_preferred(sif) False sage: w._sie_got_preferred(sif) False """ return self._sie_get_varname(sif) == self._sie_preferred_varname class SIE_import_name(SageInputExpression): r""" This class represents a name which has been imported from a module. EXAMPLES:: sage: from sage.misc.sage_input import SageInputBuilder sage: sib = SageInputBuilder() sage: sib.import_name('sage.rings.integer', 'make_integer') {import:sage.rings.integer/make_integer} sage: sib.import_name('sage.foo', 'happy', 'sad') {import:sage.foo/happy as sad} """ def __init__(self, sib, module, name, alt_name=None): r""" Initializes an instance of :class:`SIE_import_name`. INPUT: - ``sib`` - a :class:`SageInputBuilder` - ``module`` - a module name - ``name`` - an object name - ``alt_name`` - an alternate object name, or None (the default) to use name EXAMPLES:: sage: from sage.misc.sage_input import SageInputBuilder sage: sib = SageInputBuilder() sage: sib.import_name('sage.rings.integer', 'make_integer') # indirect doctest {import:sage.rings.integer/make_integer} sage: sib.import_name('sage.foo', 'happy', 'sad') {import:sage.foo/happy as sad} """ super(SIE_import_name, self).__init__(sib) self._sie_formatted = False self._sie_module_name = module self._sie_object_name = name if alt_name is None: self._sie_preferred_varname = name else: self._sie_preferred_varname = alt_name def __repr__(self): r""" Returns a string representing this :class:`SIE_import_name` value. EXAMPLES:: sage: from sage.misc.sage_input import SageInputBuilder sage: sib = SageInputBuilder() sage: sib.import_name('sage.rings.integer', 'make_integer') # indirect doctest {import:sage.rings.integer/make_integer} sage: sib.import_name('sage.foo', 'happy', 'sad') {import:sage.foo/happy as sad} """ return "{import:%s/%s%s}" % (self._sie_module_name, self._sie_object_name, "" if self._sie_object_name == self._sie_preferred_varname else " as %s" % self._sie_preferred_varname) def _sie_is_simple(self): r""" Report that :class:`SIE_import_name` values are single tokens. EXAMPLES:: sage: from sage.misc.sage_input import SageInputBuilder sage: sib = SageInputBuilder() sage: sib.import_name('sage.rings.integer', 'make_integer')._sie_is_simple() True """ return True def _sie_prepare(self, sif): r""" We override the \method{_sie_prepare} method from :class:`SageInputExpression` to request a variable name. EXAMPLES:: sage: from sage.misc.sage_input import SageInputBuilder, SageInputFormatter sage: sib = SageInputBuilder() sage: sif = SageInputFormatter() sage: sie = sib.import_name('sage.rings.integer', 'make_integer') sage: sie._sie_requested_varname False sage: sie._sie_prepare(sif) sage: sie._sie_requested_varname True """ super(SIE_import_name, self)._sie_prepare(sif) self._sie_require_varname(sif) def _sie_format(self, sif): r""" Return the formatted string value of this import, and an indication that it is atomic. EXAMPLES:: sage: from sage.misc.sage_input import SageInputBuilder, SageInputFormatter sage: sib = SageInputBuilder() sage: sif = SageInputFormatter() sage: v1 = sib.import_name('sage.rings.integer', 'make_integer') sage: v2 = sib.import_name('sage.foo', 'happy', 'sad') sage: sie = v1(v2) sage: sie._sie_prepare(sif) sage: sie._sie_format(sif) ('make_integer(sad)', 40) sage: print sif._commands from sage.rings.integer import make_integer from sage.foo import happy as sad """ name = self._sie_get_varname(sif) if self._sie_formatted: # Only run the import command once return name, _prec_atomic self._sie_formatted = True rename = '' if name != self._sie_object_name: rename = ' as ' + name sif._commands += 'from %s import %s%s\n' % (self._sie_module_name, self._sie_object_name, rename) return name, _prec_atomic class SIE_assign(SageInputExpression): r""" This class represents an assignment command. EXAMPLES:: sage: from sage.misc.sage_input import SageInputBuilder sage: sib = SageInputBuilder() sage: sib.assign(sib.name('foo').x, sib.name('pi')) {assign: {getattr: {atomic:foo}.x} {atomic:pi}} """ def __init__(self, sib, lhs, rhs): r""" Initializes an instance of :class:`SIE_assign`. INPUT: - ``sib`` - a :class:`SageInputBuilder` - ``lhs`` - the left-hand side of the assignment - ``rhs`` - the right-hand side of the assignment EXAMPLES:: sage: from sage.misc.sage_input import SageInputBuilder sage: sib = SageInputBuilder() sage: sib.assign(sib.name('foo').x, sib.name('pi')) {assign: {getattr: {atomic:foo}.x} {atomic:pi}} """ super(SIE_assign, self).__init__(sib) self._sie_lhs = lhs self._sie_rhs = rhs def __repr__(self): r""" Returns a string representing this :class:`SIE_assign` command. EXAMPLES:: sage: from sage.misc.sage_input import SageInputBuilder sage: sib = SageInputBuilder() sage: sib.assign(sib.name('foo').x, sib.name('pi')) {assign: {getattr: {atomic:foo}.x} {atomic:pi}} """ return "{assign: %s %s}" % (repr(self._sie_lhs), repr(self._sie_rhs)) def _sie_referenced(self): r""" Returns a list of the immediate subexpressions of this :class:`SIE_assign`. EXAMPLES:: sage: from sage.misc.sage_input import SageInputBuilder sage: sib = SageInputBuilder() sage: sie = sib.assign(sib.name('foo').x, sib.name('pi')) sage: sie._sie_referenced() [{getattr: {atomic:foo}.x}, {atomic:pi}] """ return [self._sie_lhs, self._sie_rhs] def _sie_format(self, sif): r""" Return the formatted string value of this :class:`SIE_assign` as an expression. Since an assignment is a statement, not an expression, always raises an error. EXAMPLES:: sage: from sage.misc.sage_input import SageInputBuilder, SageInputFormatter sage: sib = SageInputBuilder() sage: sif = SageInputFormatter() sage: sie = sib.assign(sib.name('foo').x, sib.name('pi')) sage: sie._sie_prepare(sif) sage: sie._sie_format(sif) Traceback (most recent call last): ... ValueError: Cannot format SIE_assign as expression """ raise ValueError("Cannot format SIE_assign as expression") def _sie_format_statement(self, sif): r""" Return the formatted string of this :class:`SIE_assign` as a statement. EXAMPLES:: sage: from sage.misc.sage_input import SageInputBuilder, SageInputFormatter sage: sib = SageInputBuilder() sage: sif = SageInputFormatter() sage: sie = sib.assign(sib.name('foo').x, sib.name('pi')) sage: sie._sie_prepare(sif) sage: sie._sie_format_statement(sif) 'foo.x = pi' """ return '%s = %s' % (sif.format(self._sie_lhs, 0), sif.format(self._sie_rhs, 0)) class SageInputFormatter: r""" An instance of this class is used to keep track of variable names and a sequence of generated commands during the :func:`sage_input` formatting process. """ def __init__(self): r""" Initialize an instance of :class:`SageInputFormatter`. EXAMPLES:: sage: from sage.misc.sage_input import SageInputFormatter sage: sif = SageInputFormatter() """ self._commands = '' self._names = set() self._dup_names = {} def format(self, e, prec): r""" Format a Sage input expression into a string. INPUT: - ``e`` - a :class:`SageInputExpression` - ``prec`` - an integer representing a precedence level First, we check to see if ``e`` should be replaced by a variable. If so, we generate the command to assign the variable, and return the name of the variable. Otherwise, we format the expression by calling its \method{_sie_format} method, and add parentheses if necessary. EXAMPLES:: sage: from sage.misc.sage_input import SageInputBuilder, SageInputFormatter sage: sib = SageInputBuilder() sage: sif = SageInputFormatter() sage: sie = sib(GF(5)) Here we ``cheat`` by calling \method{_sie_prepare} twice, to make it use a variable.:: sage: sie._sie_prepare(sif) sage: sie._sie_prepare(sif) sage: sif._commands '' sage: sif.format(sie, 0) 'GF_5' sage: sif._commands 'GF_5 = GF(5)\n' We demonstrate the use of commands, by showing how to construct code that will produce a random matrix:: sage: sib = SageInputBuilder() sage: sif = SageInputFormatter() sage: sie = sib.name('matrix')(sib.name('ZZ'), 10, 10) sage: sib.command(sie, sie.randomize()) sage: sie._sie_prepare(sif) sage: sif._commands '' sage: sif.format(sie, 0) 'si' sage: sif._commands 'si = matrix(ZZ, 10, 10)\nsi.randomize()\n' """ if e._sie_use_var: if not e._sie_generated: s, _ = e._sie_format(self) # In complicated situations, this can get called # recursively... if not e._sie_generated: self._commands += '%s = %s\n' % (e._sie_get_varname(self), s) e._sie_generated = True formatted = e._sie_get_varname(self) else: s, iprec = e._sie_format(self) if iprec < prec: s = '(' + s + ')' formatted = s commands = e._sie_commands e._sie_commands = [] for cmd in commands: s_cmd = cmd._sie_format_statement(self) self._commands += s_cmd + '\n' return formatted def register_name(self, name): r""" Register that some value would like to use a given name. If only one request for a name is received, then we will use the requested name; otherwise, we will add numbers to the end of the name to make it unique. If the input name is ``None``, then it is treated as a name of ``'si'``. EXAMPLES:: sage: from sage.misc.sage_input import SageInputFormatter sage: sif = SageInputFormatter() sage: sif._names, sif._dup_names (set(), {}) sage: sif.register_name('x') sage: sif.register_name('y') sage: sif._names, sif._dup_names ({'x', 'y'}, {}) sage: sif.register_name('x') sage: sif._names, sif._dup_names ({'x', 'y'}, {'x': 0}) """ if name is None: name = 'si' if name in self._names: self._dup_names[name] = 0 else: self._names.add(name) def get_name(self, name): r""" Return a name corresponding to a given requested name. If only one request for a name is received, then we will use the requested name; otherwise, we will add numbers to the end of the name to make it unique. If the input name is ``None``, then it is treated as a name of ``'si'``. EXAMPLES:: sage: from sage.misc.sage_input import SageInputFormatter sage: sif = SageInputFormatter() sage: names = ('x', 'x', 'y', 'z') sage: for n in names: sif.register_name(n) sage: for n in names: sif.get_name(n) 'x1' 'x2' 'y' 'z' """ if name is None: name = 'si' if name in self._dup_names: next = self._dup_names[name] + 1 self._dup_names[name] = next return name + str(next) else: return name def verify_same(a, b): r""" Verify that two Sage values are the same. This is an extended equality test; it checks that the values are equal and that their parents are equal. (For values which are not Elements, the types are checked instead.) If the values are the same, we return ``None``; otherwise, we raise an exception. EXAMPLES:: sage: from sage.misc.sage_input import verify_same sage: verify_same(1, 1) sage: verify_same(1, 2) Traceback (most recent call last): ... AssertionError: Expected 1 == 2 sage: verify_same(1, 1r) Traceback (most recent call last): ... AttributeError: 'int' object has no attribute 'parent' sage: verify_same(1r, 1) Traceback (most recent call last): ... assert(type(a) == type(b)) AssertionError sage: verify_same(5, GF(7)(5)) Traceback (most recent call last): ... assert(a.parent() == b.parent()) AssertionError """ from sage.structure.element import is_Element if is_Element(a): assert(a.parent() == b.parent()) else: assert(type(a) is type(b)) if isinstance(a, float): # The IEEE floating-point standard recommends that NaN != NaN # Sage doesn't do this for RDF or RR, but Python does for floats. # So we need to consider the cases: a is/is not NaN, b is/is not NaN. if not (a == a): # a is a NaN; so confirm that b is a NaN assert not (b == b) else: # a is not NaN. If b is NaN, then the assertion will fail. assert a == b return from sage.rings.real_mpfi import is_RealIntervalFieldElement from sage.rings.complex_interval import is_ComplexIntervalFieldElement if is_RealIntervalFieldElement(a) or is_ComplexIntervalFieldElement(a): assert(cmp(a, b) == 0), "Expected %s == %s" % (a, b) else: assert(a == b), "Expected %s == %s" % (a, b) def verify_si_answer(x, answer, preparse): r""" Verify that evaluating ``answer`` gives a value equal to ``x`` (with the same parent/type). If ``preparse`` is ``True`` or ``False``, then we evaluate ``answer`` with the preparser enabled or disabled, respectively; if ``preparse`` is ``None``, then we evaluate ``answer`` both with the preparser enabled and disabled and check both results. On success, we return ``None``; on failure, we raise an exception. INPUT: - ``x`` - an arbitrary Sage value - ``answer`` - a string, or a :class:`SageInputAnswer` - ``preparse`` -- ``True``, ``False``, or ``None`` EXAMPLES:: sage: from sage.misc.sage_input import verify_si_answer sage: verify_si_answer(1, '1', True) sage: verify_si_answer(1, '1', False) Traceback (most recent call last): ... AttributeError: 'int' object has no attribute 'parent' sage: verify_si_answer(1, 'ZZ(1)', None) """ from sage.misc.sage_eval import sage_eval if preparse is None: verify_same(x, sage_eval(answer, preparse=True)) verify_same(x, sage_eval(answer, preparse=False)) else: verify_same(x, sage_eval(answer, preparse=preparse)) class SageInputAnswer(tuple): r""" This class inherits from tuple, so it acts like a tuple when passed to :func:`sage_eval`; but it prints as a sequence of commands. EXAMPLES:: sage: from sage.misc.sage_input import SageInputAnswer sage: v = SageInputAnswer('x = 22\n', 'x/7'); v x = 22 x/7 sage: isinstance(v, tuple) True sage: v[0] 'x = 22\n' sage: v[1] 'x/7' sage: len(v) 2 sage: v = SageInputAnswer('', 'sin(3.14)', {'sin': math.sin}); v LOCALS: sin: <built-in function sin> sin(3.14) sage: v[0] '' sage: v[1] 'sin(3.14)' sage: v[2] {'sin': <built-in function sin>} """ def __new__(cls, cmds, expr, locals=None): r""" Construct an instance of :class:`SageInputAnswer`. EXAMPLES:: sage: from sage.misc.sage_input import SageInputAnswer sage: v = SageInputAnswer('', 'sin(3.14)', {'sin': math.sin}); v LOCALS: sin: <built-in function sin> sin(3.14) sage: v[0] '' sage: v[1] 'sin(3.14)' sage: v[2] {'sin': <built-in function sin>} """ if locals: return tuple.__new__(cls, (cmds, expr, locals)) else: return tuple.__new__(cls, (cmds, expr)) def __repr__(self): r""" Return a string representation for a :class:`SageInputAnswer`, such that if you evaluate this :class:`SageInputAnswer` at the \sage command line, you get a result in a nice form ready to copy-and-paste. EXAMPLES:: sage: from sage.misc.sage_input import SageInputAnswer sage: v = SageInputAnswer('', 'sin(3.14)', {'sin': math.sin}); v LOCALS: sin: <built-in function sin> sin(3.14) """ if len(self) == 2: return self[0] + self[1] locals = self[2] locals_text = ''.join(' %s: %r\n' % (k, v) for k, v in locals.iteritems()) return 'LOCALS:\n' + locals_text + self[0] + self[1]
33.624757
151
0.56051
But since we store x, that can't happen; self._id_cache[id(x)] = (x, sie) sie._sie_preferred_varname = name def import_name(self, module, name, alt_name=None): return SIE_import_name(self, module, name, alt_name) def assign(self, e, val): e = self(e) val = self(val) return SIE_assign(self, e, val) def command(self, v, cmd): v = self(v) cmd = self(cmd) v._sie_commands.append(cmd) def dict(self, entries): if isinstance(entries, dict): entries = list(entries.items()) entries = [(self(key),self(val)) for (key,val) in entries] return SIE_dict(self, entries) def getattr(self, sie, attr): return SIE_getattr(self, self(sie), attr) def empty_subscript(self, parent): return SIE_subscript(self, parent, None) def use_variable(self, sie, name): sie._sie_preferred_varname = name sie._sie_request_use_var = True def share(self, sie): sie._sie_share = True def parent_with_gens(self, parent, sie, gen_names, name, gens_syntax=None): v = SIE_gens_constructor(self, sie, gen_names, gens_syntax=gens_syntax) self.cache(parent, v, name) gens = [SIE_gen(self, v, n) for n in gen_names] self._parent_gens[parent] = gens v._sie_gens = gens return v def gen(self, parent, n=0): if not parent in self._parent_gens: self(parent) if not parent in self._parent_gens: raise ValueError("{} did not register generators for sage_input".format(parent)) gens = self._parent_gens[parent] if n > len(gens): raise ValueError("{} registered only {} generators for sage_input".format(parent, len(gens))) return gens[n] def prod(self, factors, simplify=False): neg = False factors = [self(factor) for factor in factors] if simplify: i = 0 while i < len(factors): factor = factors[i] while isinstance(factor, SIE_unary) and factor._sie_op == '-': neg = not neg factor = factor._sie_operand factors[i] = factor if isinstance(factor, SIE_literal_stringrep) and factor._sie_value == '0': factors = [factor] neg = False break if isinstance(factor, SIE_literal_stringrep) and factor._sie_value == '1': factors[i:i+1] = [] else: i += 1 if len(factors) == 0: factors.append(SIE_literal_stringrep(self, '1')) prod = factors[0] for factor in factors[1:]: prod = prod * factor if neg: prod = -prod return prod def sum(self, terms, simplify=False): terms = [self(term) for term in terms] if simplify: i = 0 while i < len(terms): term = terms[i] if isinstance(term, SIE_literal_stringrep) and term._sie_value == '0': terms[i:i+1] = [] else: i += 1 if len(terms) == 0: terms.append(SIE_literal_stringrep(self, '0')) sum = terms[0] for term in terms[1:]: negate = False while simplify and isinstance(term, SIE_unary) and term._sie_op == '-': negate = not negate term = term._sie_operand if negate: sum = sum - term else: sum = sum + term return sum def result(self, e): sif = SageInputFormatter() # Even if use_variable was called on e, don't automatically e._sie_request_use_var = False e._sie_prepare(sif) s = sif.format(e, 0) locals = self._locals if len(locals): return SageInputAnswer(sif._commands, sif.format(e, 0), locals) else: return SageInputAnswer(sif._commands, sif.format(e, 0)) # the Python reference manual. _prec_lambda = 2 _prec_or = 4 _prec_and = 6 _prec_not = 8 _prec_membership = 10 _prec_identity = 12 _prec_comparison = 14 _prec_bitor = 16 _prec_bitxor = 18 _prec_bitand = 20 _prec_shift = 22 _prec_addsub = 24 _prec_muldiv = 26 _prec_negate = 28 _prec_bitnot = 30 _prec_exponent = 32 _prec_attribute = 34 _prec_subscript = 36 _prec_slicing = 38 _prec_funcall = 40 _prec_atomic = 42 class SageInputExpression(object): def __init__(self, sib): self._sie_refcount = 0 self._sie_builder = sib self._sie_context = None self._sie_preferred_varname = None self._sie_varname = None self._sie_request_use_var = False self._sie_use_var = False self._sie_requested_varname = False self._sie_commands = [] def _sie_is_simple(self): return False def _sie_referenced(self): return [] def _sie_prepare(self, sif): if self._sie_context is not sif: self._sie_context = sif self._sie_refcount = 0 self._sie_refcount += 1 if self._sie_request_use_var: self._sie_require_varname(sif) self._sie_use_var = True if not self._sie_is_simple(): if self._sie_refcount == 2: self._sie_require_varname(sif) self._sie_use_var = True if self._sie_refcount == 1: for r in self._sie_referenced(): r._sie_prepare(sif) for r in self._sie_commands: r._sie_prepare(sif) def _sie_require_varname(self, sif): if not self._sie_requested_varname: sif.register_name(self._sie_preferred_varname) self._sie_requested_varname = True self._sie_generated = False def _sie_get_varname(self, sif): if self._sie_varname is None: self._sie_varname = sif.get_name(self._sie_preferred_varname) return self._sie_varname def _sie_is_negation(self): return False def __call__(self, *args, **kwargs): args = [self._sie_builder(_) for _ in args] for k in kwargs: kwargs[k] = self._sie_builder(kwargs[k]) return SIE_call(self._sie_builder, self, args, kwargs) def __getitem__(self, key): skey = self._sie_builder(key) return SIE_subscript(self._sie_builder, self, skey) def __getattr__(self, attr): return SIE_getattr(self._sie_builder, self, attr) def _rich_repr_(self, display_manager, **kwds): return None def __pow__(self, other): return self._sie_binop('**', other) def __mul__(self, other): return self._sie_binop('*', other) def __div__(self, other): return self._sie_binop('/', other) def __add__(self, other): return self._sie_binop('+', other) def __sub__(self, other): return self._sie_binop('-', other) def _sie_binop(self, op, other): return SIE_binary(self._sie_builder, op, self, self._sie_builder(other)) def __neg__(self): return self._sie_unop('-') def __invert__(self): return self._sie_unop('~') def __abs__(self): return self._sie_builder.name('abs')(self) def _sie_unop(self, op): return SIE_unary(self._sie_builder, op, self) def _sie_format(self, sif): raise NotImplementedError def _sie_format_statement(self, sif): result, prec = self._sie_format(sif) return result class SIE_literal(SageInputExpression): def _sie_is_simple(self): # Perhaps this should actually look at the formatted length of self, # and sometimes return false? If some 50-digit integer occurs multiple # times in an expression, it might be better to do the replacement. return not self._sie_share class SIE_literal_stringrep(SIE_literal): def __init__(self, sib, n): super(SIE_literal_stringrep, self).__init__(sib) self._sie_value = str(n) self._sie_share = False def __repr__(self): return "{atomic:%s}" % self._sie_value def _sie_format(self, sif): return self._sie_value, _prec_atomic class SIE_call(SageInputExpression): def __init__(self, sib, func, args, kwargs): super(SIE_call, self).__init__(sib) self._sie_func = func self._sie_args = args self._sie_kwargs = kwargs def __repr__(self): func = repr(self._sie_func) args = [repr(arg) for arg in self._sie_args] kwargs = sorted(k + '=' + repr(v) for k, v in self._sie_kwargs.iteritems()) all_args = ', '.join(args + kwargs) return "{call: %s(%s)}" % (func, all_args) def _sie_referenced(self): refs = self._sie_args[:] refs.append(self._sie_func) refs.extend(self._sie_kwargs.itervalues()) return refs def _sie_format(self, sif): func = sif.format(self._sie_func, _prec_attribute) args = [sif.format(arg, 0) for arg in self._sie_args] kwargs = sorted(k + '=' + sif.format(v, 0) for k, v in self._sie_kwargs.iteritems()) all_args = ', '.join(args + kwargs) return ('%s(%s)' % (func, all_args), _prec_funcall) class SIE_subscript(SageInputExpression): def __init__(self, sib, coll, key): super(SIE_subscript, self).__init__(sib) self._sie_coll = coll self._sie_key = key def __repr__(self): coll = repr(self._sie_coll) if self._sie_key is None: key = '' else: key = repr(self._sie_key) return "{subscr: %s[%s]}" % (coll, key) def _sie_referenced(self): refs = [self._sie_coll] if self._sie_key is not None: refs.append(self._sie_key) return refs def _sie_format(self, sif): coll = sif.format(self._sie_coll, _prec_attribute) if self._sie_key is None: key = '' else: key = sif.format(self._sie_key, 0) return '%s[%s]' % (coll, key), _prec_subscript class SIE_getattr(SageInputExpression): def __init__(self, sib, obj, attr): super(SIE_getattr, self).__init__(sib) self._sie_obj = obj self._sie_attr = attr def __repr__(self): obj = repr(self._sie_obj) return "{getattr: %s.%s}" % (obj, self._sie_attr) def _sie_referenced(self): return [self._sie_obj] def _sie_format(self, sif): obj = sif.format(self._sie_obj, _prec_exponent) return '%s.%s' % (obj, self._sie_attr), _prec_attribute class SIE_tuple(SageInputExpression): def __init__(self, sib, values, is_list): super(SIE_tuple, self).__init__(sib) self._sie_values = values self._sie_is_list = is_list def __repr__(self): kind = "list" if self._sie_is_list else "tuple" return "{%s: (%s)}" % \ (kind, ', '.join([repr(v) for v in self._sie_values])) def _sie_referenced(self): return self._sie_values def _sie_format(self, sif): values = [sif.format(val, 0) for val in self._sie_values] if self._sie_is_list: return '[%s]' % ', '.join(values), _prec_atomic else: if len(values) == 1: return '(%s,)' % values[0], _prec_atomic else: return '(%s)' % ', '.join(values), _prec_atomic class SIE_dict(SageInputExpression): def __init__(self, sib, entries): super(SIE_dict, self).__init__(sib) self._sie_entries = entries def __repr__(self): return "{dict: {%s}}" % \ ', '.join([repr(key) + ':' + repr(val) for key,val in self._sie_entries]) def _sie_referenced(self): return [k for k,v in self._sie_entries] + [v for k,v in self._sie_entries] def _sie_format(self, sif): return "{%s}" %\ ', '.join(sif.format(k, 0)+':'+sif.format(v, 0) for k,v in self._sie_entries), _prec_atomic class SIE_binary(SageInputExpression): def __init__(self, sib, op, lhs, rhs): super(SIE_binary, self).__init__(sib) self._sie_op = op self._sie_operands = (lhs, rhs) def __repr__(self): return "{binop:%s %s %s}" % (self._sie_op, repr(self._sie_operands[0]), repr(self._sie_operands[1])) def _sie_referenced(self): return self._sie_operands def _sie_format(self, sif): op = self._sie_op fop = op if op == '**': lhs = sif.format(self._sie_operands[0], _prec_exponent+1) rhs = sif.format(self._sie_operands[1], _prec_exponent) if self._sie_builder.preparse(): return '%s^%s' % (lhs, rhs), _prec_exponent else: return '%s**%s' % (lhs, rhs), _prec_exponent if op == '*': prec = _prec_muldiv elif op == '/': prec = _prec_muldiv elif op == '+': fop = ' + ' prec = _prec_addsub elif op == '-': fop = ' - ' prec = _prec_addsub else: raise ValueError('Unhandled op {} in SIE_binary'.format(op)) lhs = sif.format(self._sie_operands[0], prec) rhs = sif.format(self._sie_operands[1], prec+1) return '%s%s%s' % (lhs, fop, rhs), prec class SIE_unary(SageInputExpression): def __init__(self, sib, op, operand): super(SIE_unary, self).__init__(sib) self._sie_op = op self._sie_operand = operand def __repr__(self): return "{unop:%s %s}" % (self._sie_op, repr(self._sie_operand)) def _sie_referenced(self): return [self._sie_operand] def _sie_format(self, sif): op = self._sie_op fop = op rprec = None if op == '-': # We print -(a*b) as -a*b, even though that will parse as # (-a)*b. prec = _prec_muldiv rprec = _prec_negate elif op == '~': prec = _prec_bitnot else: raise ValueError('Unhandled op {} in SIE_unary'.format(op)) if rprec is None: rprec = prec return '%s%s' % (fop, sif.format(self._sie_operand, prec)), rprec def _sie_is_negation(self): return self._sie_op == '-' class SIE_gens_constructor(SageInputExpression): def __init__(self, sib, constr, gen_names, gens_syntax=None): super(SIE_gens_constructor, self).__init__(sib) self._sie_constr = constr self._sie_gen_names = gen_names self._sie_gens = None # will be overwritten from .parent_with_gens() self._sie_gens_constr = gens_syntax self._sie_assign_gens = False self._sie_generated = False def __repr__(self): return "{constr_parent: %s with gens: %s}" % (repr(self._sie_constr), self._sie_gen_names) def _sie_referenced(self): # This is used to determine if some expressions should be replaced # by variables (if the expression has more than one parent in # the expression DAG). We assume that all expressions in # self._sie_gens_constr also occur in self._sie_constr. return [self._sie_constr] def _sie_gens_referenced(self, sif): self._sie_assign_gens = True self._sie_require_varname(sif) for gen in self._sie_gens: gen._sie_require_varname(sif) def _sie_add_command(self, sif): if not self._sie_generated: if self._sie_builder.preparse() and \ self._sie_gens_constr is not None and \ all(g._sie_got_preferred(sif) for g in self._sie_gens): s, _ = self._sie_gens_constr._sie_format(sif) sif._commands += '%s.<%s> = %s\n' % (self._sie_get_varname(sif), ','.join(self._sie_gen_names), s) else: s, _ = self._sie_constr._sie_format(sif) sif._commands += '%s = %s\n' % (self._sie_get_varname(sif), s) if self._sie_assign_gens: if len(self._sie_gens) == 1: sif._commands += '%s = %s.gen()\n' % (self._sie_gens[0]._sie_get_varname(sif), self._sie_get_varname(sif)) else: sif._commands += '%s = %s.gens()\n' % (','.join([g._sie_get_varname(sif) for g in self._sie_gens]), self._sie_get_varname(sif)) self._sie_generated = True def _sie_format(self, sif): if self._sie_assign_gens: self._sie_add_command(sif) return self._sie_get_varname(sif), _prec_atomic return self._sie_constr._sie_format(sif) class SIE_gen(SageInputExpression): def __init__(self, sib, parent, name): super(SIE_gen, self).__init__(sib) self._sie_parent = parent self._sie_preferred_varname = name def __repr__(self): return "{gen:%s %s}" % (self._sie_preferred_varname, repr(self._sie_parent)) def _sie_is_simple(self): return True def _sie_prepare(self, sif): super(SIE_gen, self)._sie_prepare(sif) self._sie_parent._sie_gens_referenced(sif) def _sie_format(self, sif): self._sie_parent._sie_add_command(sif) return self._sie_get_varname(sif), _prec_atomic def _sie_got_preferred(self, sif): return self._sie_get_varname(sif) == self._sie_preferred_varname class SIE_import_name(SageInputExpression): def __init__(self, sib, module, name, alt_name=None): super(SIE_import_name, self).__init__(sib) self._sie_formatted = False self._sie_module_name = module self._sie_object_name = name if alt_name is None: self._sie_preferred_varname = name else: self._sie_preferred_varname = alt_name def __repr__(self): return "{import:%s/%s%s}" % (self._sie_module_name, self._sie_object_name, "" if self._sie_object_name == self._sie_preferred_varname else " as %s" % self._sie_preferred_varname) def _sie_is_simple(self): return True def _sie_prepare(self, sif): super(SIE_import_name, self)._sie_prepare(sif) self._sie_require_varname(sif) def _sie_format(self, sif): name = self._sie_get_varname(sif) if self._sie_formatted: # Only run the import command once return name, _prec_atomic self._sie_formatted = True rename = '' if name != self._sie_object_name: rename = ' as ' + name sif._commands += 'from %s import %s%s\n' % (self._sie_module_name, self._sie_object_name, rename) return name, _prec_atomic class SIE_assign(SageInputExpression): def __init__(self, sib, lhs, rhs): super(SIE_assign, self).__init__(sib) self._sie_lhs = lhs self._sie_rhs = rhs def __repr__(self): return "{assign: %s %s}" % (repr(self._sie_lhs), repr(self._sie_rhs)) def _sie_referenced(self): return [self._sie_lhs, self._sie_rhs] def _sie_format(self, sif): raise ValueError("Cannot format SIE_assign as expression") def _sie_format_statement(self, sif): return '%s = %s' % (sif.format(self._sie_lhs, 0), sif.format(self._sie_rhs, 0)) class SageInputFormatter: def __init__(self): self._commands = '' self._names = set() self._dup_names = {} def format(self, e, prec): if e._sie_use_var: if not e._sie_generated: s, _ = e._sie_format(self) # In complicated situations, this can get called # recursively... if not e._sie_generated: self._commands += '%s = %s\n' % (e._sie_get_varname(self), s) e._sie_generated = True formatted = e._sie_get_varname(self) else: s, iprec = e._sie_format(self) if iprec < prec: s = '(' + s + ')' formatted = s commands = e._sie_commands e._sie_commands = [] for cmd in commands: s_cmd = cmd._sie_format_statement(self) self._commands += s_cmd + '\n' return formatted def register_name(self, name): if name is None: name = 'si' if name in self._names: self._dup_names[name] = 0 else: self._names.add(name) def get_name(self, name): if name is None: name = 'si' if name in self._dup_names: next = self._dup_names[name] + 1 self._dup_names[name] = next return name + str(next) else: return name def verify_same(a, b): from sage.structure.element import is_Element if is_Element(a): assert(a.parent() == b.parent()) else: assert(type(a) is type(b)) if isinstance(a, float): # The IEEE floating-point standard recommends that NaN != NaN # Sage doesn't do this for RDF or RR, but Python does for floats. if not (a == a): assert not (b == b) else: assert a == b return from sage.rings.real_mpfi import is_RealIntervalFieldElement from sage.rings.complex_interval import is_ComplexIntervalFieldElement if is_RealIntervalFieldElement(a) or is_ComplexIntervalFieldElement(a): assert(cmp(a, b) == 0), "Expected %s == %s" % (a, b) else: assert(a == b), "Expected %s == %s" % (a, b) def verify_si_answer(x, answer, preparse): from sage.misc.sage_eval import sage_eval if preparse is None: verify_same(x, sage_eval(answer, preparse=True)) verify_same(x, sage_eval(answer, preparse=False)) else: verify_same(x, sage_eval(answer, preparse=preparse)) class SageInputAnswer(tuple): def __new__(cls, cmds, expr, locals=None): if locals: return tuple.__new__(cls, (cmds, expr, locals)) else: return tuple.__new__(cls, (cmds, expr)) def __repr__(self): if len(self) == 2: return self[0] + self[1] locals = self[2] locals_text = ''.join(' %s: %r\n' % (k, v) for k, v in locals.iteritems()) return 'LOCALS:\n' + locals_text + self[0] + self[1]
true
true
f716d301aba4afe90bc5d5c6ec197a440bdc19f2
3,047
py
Python
contrib/testgen/base58.py
Trackerming/bitcoin-sv
fb50a64e3ea0334a86b2c80daf5147c5bc2693c4
[ "MIT" ]
35
2019-02-23T06:21:13.000Z
2021-11-15T11:35:13.000Z
contrib/testgen/base58.py
Chihuataneo/bitcoin-sv
d9b12a23dbf0d2afc5f488fa077d762b302ba873
[ "MIT" ]
60
2019-02-25T18:17:03.000Z
2021-07-13T00:14:00.000Z
contrib/testgen/base58.py
Chihuataneo/bitcoin-sv
d9b12a23dbf0d2afc5f488fa077d762b302ba873
[ "MIT" ]
24
2019-02-20T05:37:02.000Z
2021-10-29T18:42:10.000Z
# Copyright (c) 2012-2016 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. ''' Bitcoin base58 encoding and decoding. Based on https://bitcointalk.org/index.php?topic=1026.0 (public domain) ''' import hashlib # for compatibility with following code... class SHA256: new = hashlib.sha256 if str != bytes: # Python 3.x def ord(c): return c def chr(n): return bytes((n,)) __b58chars = '123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz' __b58base = len(__b58chars) b58chars = __b58chars def b58encode(v): """ encode v, which is a string of bytes, to base58. """ long_value = 0 for (i, c) in enumerate(v[::-1]): long_value += (256**i) * ord(c) result = '' while long_value >= __b58base: div, mod = divmod(long_value, __b58base) result = __b58chars[mod] + result long_value = div result = __b58chars[long_value] + result # Bitcoin does a little leading-zero-compression: # leading 0-bytes in the input become leading-1s nPad = 0 for c in v: if c == '\0': nPad += 1 else: break return (__b58chars[0] * nPad) + result def b58decode(v, length=None): """ decode v into a string of len bytes """ long_value = 0 for (i, c) in enumerate(v[::-1]): long_value += __b58chars.find(c) * (__b58base**i) result = bytes() while long_value >= 256: div, mod = divmod(long_value, 256) result = chr(mod) + result long_value = div result = chr(long_value) + result nPad = 0 for c in v: if c == __b58chars[0]: nPad += 1 else: break result = chr(0) * nPad + result if length is not None and len(result) != length: return None return result def checksum(v): """Return 32-bit checksum based on SHA256""" return SHA256.new(SHA256.new(v).digest()).digest()[0:4] def b58encode_chk(v): """b58encode a string, with 32-bit checksum""" return b58encode(v + checksum(v)) def b58decode_chk(v): """decode a base58 string, check and remove checksum""" result = b58decode(v) if result is None: return None if result[-4:] == checksum(result[:-4]): return result[:-4] else: return None def get_bcaddress_version(strAddress): """ Returns None if strAddress is invalid. Otherwise returns integer version of address. """ addr = b58decode_chk(strAddress) if addr is None or len(addr) != 21: return None version = addr[0] return ord(version) if __name__ == '__main__': # Test case (from http://gitorious.org/bitcoin/python-base58.git) assert get_bcaddress_version('15VjRaDX9zpbA8LVnbrCAFzrVzN7ixHNsC') is 0 _ohai = 'o hai'.encode('ascii') _tmp = b58encode(_ohai) assert _tmp == 'DYB3oMS' assert b58decode(_tmp, 5) == _ohai print("Tests passed")
24.772358
97
0.624549
import hashlib class SHA256: new = hashlib.sha256 if str != bytes: def ord(c): return c def chr(n): return bytes((n,)) __b58chars = '123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz' __b58base = len(__b58chars) b58chars = __b58chars def b58encode(v): long_value = 0 for (i, c) in enumerate(v[::-1]): long_value += (256**i) * ord(c) result = '' while long_value >= __b58base: div, mod = divmod(long_value, __b58base) result = __b58chars[mod] + result long_value = div result = __b58chars[long_value] + result nPad = 0 for c in v: if c == '\0': nPad += 1 else: break return (__b58chars[0] * nPad) + result def b58decode(v, length=None): long_value = 0 for (i, c) in enumerate(v[::-1]): long_value += __b58chars.find(c) * (__b58base**i) result = bytes() while long_value >= 256: div, mod = divmod(long_value, 256) result = chr(mod) + result long_value = div result = chr(long_value) + result nPad = 0 for c in v: if c == __b58chars[0]: nPad += 1 else: break result = chr(0) * nPad + result if length is not None and len(result) != length: return None return result def checksum(v): return SHA256.new(SHA256.new(v).digest()).digest()[0:4] def b58encode_chk(v): return b58encode(v + checksum(v)) def b58decode_chk(v): result = b58decode(v) if result is None: return None if result[-4:] == checksum(result[:-4]): return result[:-4] else: return None def get_bcaddress_version(strAddress): addr = b58decode_chk(strAddress) if addr is None or len(addr) != 21: return None version = addr[0] return ord(version) if __name__ == '__main__': assert get_bcaddress_version('15VjRaDX9zpbA8LVnbrCAFzrVzN7ixHNsC') is 0 _ohai = 'o hai'.encode('ascii') _tmp = b58encode(_ohai) assert _tmp == 'DYB3oMS' assert b58decode(_tmp, 5) == _ohai print("Tests passed")
true
true
f716d420b906d891f1c046d6c7abb726027eaa2b
4,749
py
Python
ml-services/od-yolov3-tf2/yolov3_tf2/utils.py
jveverka/data-lab
c2a43fd2c34520a9d490f29feff3035bdc70c0d6
[ "Apache-2.0" ]
null
null
null
ml-services/od-yolov3-tf2/yolov3_tf2/utils.py
jveverka/data-lab
c2a43fd2c34520a9d490f29feff3035bdc70c0d6
[ "Apache-2.0" ]
6
2019-12-07T09:53:26.000Z
2020-05-21T19:52:27.000Z
ml-services/od-yolov3-tf2/yolov3_tf2/utils.py
jveverka/data-lab
c2a43fd2c34520a9d490f29feff3035bdc70c0d6
[ "Apache-2.0" ]
null
null
null
from absl import logging import numpy as np import tensorflow as tf import cv2 YOLOV3_LAYER_LIST = [ 'yolo_darknet', 'yolo_conv_0', 'yolo_output_0', 'yolo_conv_1', 'yolo_output_1', 'yolo_conv_2', 'yolo_output_2', ] YOLOV3_TINY_LAYER_LIST = [ 'yolo_darknet', 'yolo_conv_0', 'yolo_output_0', 'yolo_conv_1', 'yolo_output_1', ] def load_darknet_weights(model, weights_file, tiny=False): wf = open(weights_file, 'rb') major, minor, revision, seen, _ = np.fromfile(wf, dtype=np.int32, count=5) if tiny: layers = YOLOV3_TINY_LAYER_LIST else: layers = YOLOV3_LAYER_LIST for layer_name in layers: sub_model = model.get_layer(layer_name) for i, layer in enumerate(sub_model.layers): if not layer.name.startswith('conv2d'): continue batch_norm = None if i + 1 < len(sub_model.layers) and \ sub_model.layers[i + 1].name.startswith('batch_norm'): batch_norm = sub_model.layers[i + 1] logging.info("{}/{} {}".format( sub_model.name, layer.name, 'bn' if batch_norm else 'bias')) filters = layer.filters size = layer.kernel_size[0] in_dim = layer.input_shape[-1] if batch_norm is None: conv_bias = np.fromfile(wf, dtype=np.float32, count=filters) else: # darknet [beta, gamma, mean, variance] bn_weights = np.fromfile( wf, dtype=np.float32, count=4 * filters) # tf [gamma, beta, mean, variance] bn_weights = bn_weights.reshape((4, filters))[[1, 0, 2, 3]] # darknet shape (out_dim, in_dim, height, width) conv_shape = (filters, in_dim, size, size) conv_weights = np.fromfile( wf, dtype=np.float32, count=np.product(conv_shape)) # tf shape (height, width, in_dim, out_dim) conv_weights = conv_weights.reshape( conv_shape).transpose([2, 3, 1, 0]) if batch_norm is None: layer.set_weights([conv_weights, conv_bias]) else: layer.set_weights([conv_weights]) batch_norm.set_weights(bn_weights) assert len(wf.read()) == 0, 'failed to read all data' wf.close() def broadcast_iou(box_1, box_2): # box_1: (..., (x1, y1, x2, y2)) # box_2: (N, (x1, y1, x2, y2)) # broadcast boxes box_1 = tf.expand_dims(box_1, -2) box_2 = tf.expand_dims(box_2, 0) # new_shape: (..., N, (x1, y1, x2, y2)) new_shape = tf.broadcast_dynamic_shape(tf.shape(box_1), tf.shape(box_2)) box_1 = tf.broadcast_to(box_1, new_shape) box_2 = tf.broadcast_to(box_2, new_shape) int_w = tf.maximum(tf.minimum(box_1[..., 2], box_2[..., 2]) - tf.maximum(box_1[..., 0], box_2[..., 0]), 0) int_h = tf.maximum(tf.minimum(box_1[..., 3], box_2[..., 3]) - tf.maximum(box_1[..., 1], box_2[..., 1]), 0) int_area = int_w * int_h box_1_area = (box_1[..., 2] - box_1[..., 0]) * \ (box_1[..., 3] - box_1[..., 1]) box_2_area = (box_2[..., 2] - box_2[..., 0]) * \ (box_2[..., 3] - box_2[..., 1]) return int_area / (box_1_area + box_2_area - int_area) def draw_outputs(img, outputs, class_names): boxes, objectness, classes, nums = outputs boxes, objectness, classes, nums = boxes[0], objectness[0], classes[0], nums[0] wh = np.flip(img.shape[0:2]) for i in range(nums): x1y1 = tuple((np.array(boxes[i][0:2]) * wh).astype(np.int32)) x2y2 = tuple((np.array(boxes[i][2:4]) * wh).astype(np.int32)) img = cv2.rectangle(img, x1y1, x2y2, (255, 0, 0), 2) img = cv2.putText(img, '{} {:.4f}'.format( class_names[int(classes[i])], objectness[i]), x1y1, cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, (0, 0, 255), 2) return img def draw_labels(x, y, class_names): img = x.numpy() boxes, classes = tf.split(y, (4, 1), axis=-1) classes = classes[..., 0] wh = np.flip(img.shape[0:2]) for i in range(len(boxes)): x1y1 = tuple((np.array(boxes[i][0:2]) * wh).astype(np.int32)) x2y2 = tuple((np.array(boxes[i][2:4]) * wh).astype(np.int32)) img = cv2.rectangle(img, x1y1, x2y2, (255, 0, 0), 2) img = cv2.putText(img, class_names[classes[i]], x1y1, cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, (0, 0, 255), 2) return img def freeze_all(model, frozen=True): model.trainable = not frozen if isinstance(model, tf.keras.Model): for l in model.layers: freeze_all(l, frozen)
35.177778
83
0.561592
from absl import logging import numpy as np import tensorflow as tf import cv2 YOLOV3_LAYER_LIST = [ 'yolo_darknet', 'yolo_conv_0', 'yolo_output_0', 'yolo_conv_1', 'yolo_output_1', 'yolo_conv_2', 'yolo_output_2', ] YOLOV3_TINY_LAYER_LIST = [ 'yolo_darknet', 'yolo_conv_0', 'yolo_output_0', 'yolo_conv_1', 'yolo_output_1', ] def load_darknet_weights(model, weights_file, tiny=False): wf = open(weights_file, 'rb') major, minor, revision, seen, _ = np.fromfile(wf, dtype=np.int32, count=5) if tiny: layers = YOLOV3_TINY_LAYER_LIST else: layers = YOLOV3_LAYER_LIST for layer_name in layers: sub_model = model.get_layer(layer_name) for i, layer in enumerate(sub_model.layers): if not layer.name.startswith('conv2d'): continue batch_norm = None if i + 1 < len(sub_model.layers) and \ sub_model.layers[i + 1].name.startswith('batch_norm'): batch_norm = sub_model.layers[i + 1] logging.info("{}/{} {}".format( sub_model.name, layer.name, 'bn' if batch_norm else 'bias')) filters = layer.filters size = layer.kernel_size[0] in_dim = layer.input_shape[-1] if batch_norm is None: conv_bias = np.fromfile(wf, dtype=np.float32, count=filters) else: bn_weights = np.fromfile( wf, dtype=np.float32, count=4 * filters) bn_weights = bn_weights.reshape((4, filters))[[1, 0, 2, 3]] conv_shape = (filters, in_dim, size, size) conv_weights = np.fromfile( wf, dtype=np.float32, count=np.product(conv_shape)) conv_weights = conv_weights.reshape( conv_shape).transpose([2, 3, 1, 0]) if batch_norm is None: layer.set_weights([conv_weights, conv_bias]) else: layer.set_weights([conv_weights]) batch_norm.set_weights(bn_weights) assert len(wf.read()) == 0, 'failed to read all data' wf.close() def broadcast_iou(box_1, box_2): box_1 = tf.expand_dims(box_1, -2) box_2 = tf.expand_dims(box_2, 0) new_shape = tf.broadcast_dynamic_shape(tf.shape(box_1), tf.shape(box_2)) box_1 = tf.broadcast_to(box_1, new_shape) box_2 = tf.broadcast_to(box_2, new_shape) int_w = tf.maximum(tf.minimum(box_1[..., 2], box_2[..., 2]) - tf.maximum(box_1[..., 0], box_2[..., 0]), 0) int_h = tf.maximum(tf.minimum(box_1[..., 3], box_2[..., 3]) - tf.maximum(box_1[..., 1], box_2[..., 1]), 0) int_area = int_w * int_h box_1_area = (box_1[..., 2] - box_1[..., 0]) * \ (box_1[..., 3] - box_1[..., 1]) box_2_area = (box_2[..., 2] - box_2[..., 0]) * \ (box_2[..., 3] - box_2[..., 1]) return int_area / (box_1_area + box_2_area - int_area) def draw_outputs(img, outputs, class_names): boxes, objectness, classes, nums = outputs boxes, objectness, classes, nums = boxes[0], objectness[0], classes[0], nums[0] wh = np.flip(img.shape[0:2]) for i in range(nums): x1y1 = tuple((np.array(boxes[i][0:2]) * wh).astype(np.int32)) x2y2 = tuple((np.array(boxes[i][2:4]) * wh).astype(np.int32)) img = cv2.rectangle(img, x1y1, x2y2, (255, 0, 0), 2) img = cv2.putText(img, '{} {:.4f}'.format( class_names[int(classes[i])], objectness[i]), x1y1, cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, (0, 0, 255), 2) return img def draw_labels(x, y, class_names): img = x.numpy() boxes, classes = tf.split(y, (4, 1), axis=-1) classes = classes[..., 0] wh = np.flip(img.shape[0:2]) for i in range(len(boxes)): x1y1 = tuple((np.array(boxes[i][0:2]) * wh).astype(np.int32)) x2y2 = tuple((np.array(boxes[i][2:4]) * wh).astype(np.int32)) img = cv2.rectangle(img, x1y1, x2y2, (255, 0, 0), 2) img = cv2.putText(img, class_names[classes[i]], x1y1, cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, (0, 0, 255), 2) return img def freeze_all(model, frozen=True): model.trainable = not frozen if isinstance(model, tf.keras.Model): for l in model.layers: freeze_all(l, frozen)
true
true
f716d445c1bcac531c48494bd19770afcbb198fa
1,377
py
Python
dags/tuto.py
setuk/docker-airflow
8741ac32094893e1cd56b8bd411d240f60453eb7
[ "Apache-2.0" ]
null
null
null
dags/tuto.py
setuk/docker-airflow
8741ac32094893e1cd56b8bd411d240f60453eb7
[ "Apache-2.0" ]
null
null
null
dags/tuto.py
setuk/docker-airflow
8741ac32094893e1cd56b8bd411d240f60453eb7
[ "Apache-2.0" ]
null
null
null
""" Code that goes along with the Airflow located at: http://airflow.readthedocs.org/en/latest/tutorial.html """ from airflow import DAG from airflow.operators.bash_operator import BashOperator from datetime import datetime, timedelta default_args = { "owner": "airflow", "depends_on_past": False, "start_date": datetime(2020, 11, 19), "email": ["airflow@airflow.com"], "email_on_failure": False, "email_on_retry": False, "retries": 1, "retry_delay": timedelta(minutes=5), # 'queue': 'bash_queue', # 'pool': 'backfill', # 'priority_weight': 10, # 'end_date': datetime(2016, 1, 1), } # dag = DAG("tutorial", default_args=default_args, schedule_interval=timedelta(1)) dag = DAG("tutorial", default_args=default_args, schedule_interval=None) # t1, t2 and t3 are examples of tasks created by instantiating operators t1 = BashOperator(task_id="print_date", bash_command="date", dag=dag) t2 = BashOperator(task_id="sleep", bash_command="sleep 5", retries=3, dag=dag) templated_command = """ {% for i in range(5) %} echo "{{ ds }}" echo "{{ macros.ds_add(ds, 7)}}" echo "{{ params.my_param }}" {% endfor %} """ t3 = BashOperator( task_id="templated", bash_command=templated_command, params={"my_param": "Parameter I passed in"}, dag=dag, ) t2.set_upstream(t1) t3.set_upstream(t1)
27.54
82
0.67175
from airflow import DAG from airflow.operators.bash_operator import BashOperator from datetime import datetime, timedelta default_args = { "owner": "airflow", "depends_on_past": False, "start_date": datetime(2020, 11, 19), "email": ["airflow@airflow.com"], "email_on_failure": False, "email_on_retry": False, "retries": 1, "retry_delay": timedelta(minutes=5), } dag = DAG("tutorial", default_args=default_args, schedule_interval=None) t1 = BashOperator(task_id="print_date", bash_command="date", dag=dag) t2 = BashOperator(task_id="sleep", bash_command="sleep 5", retries=3, dag=dag) templated_command = """ {% for i in range(5) %} echo "{{ ds }}" echo "{{ macros.ds_add(ds, 7)}}" echo "{{ params.my_param }}" {% endfor %} """ t3 = BashOperator( task_id="templated", bash_command=templated_command, params={"my_param": "Parameter I passed in"}, dag=dag, ) t2.set_upstream(t1) t3.set_upstream(t1)
true
true
f716d497d29ec78b5afc1dd07eb0f92340ba179b
8,724
py
Python
benchmark/cloud/aws/kylin.py
ChenYi015/Raven
e732e03f8dd118ed805a143fc6916f0e5fc53c2c
[ "Apache-2.0" ]
1
2022-03-03T05:54:25.000Z
2022-03-03T05:54:25.000Z
benchmark/cloud/aws/kylin.py
ChenYi015/Raven
e732e03f8dd118ed805a143fc6916f0e5fc53c2c
[ "Apache-2.0" ]
null
null
null
benchmark/cloud/aws/kylin.py
ChenYi015/Raven
e732e03f8dd118ed805a143fc6916f0e5fc53c2c
[ "Apache-2.0" ]
null
null
null
# Copyright 2021 Raven Authors. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json import logging import os import threading import time from typing import List from benchmark.cloud.aws.aws import Ec2Instance, AmazonWebService from benchmark.tools import get_random_id logger = logging.getLogger() class KylinMode: ALL = 'all' JOB = 'job' QUERY = 'query' class KylinMaster(Ec2Instance): def __init__(self, *, aws: AmazonWebService = None, region: str = '', ec2_key_name: str = '', ec2_instance_type: str): path = os.path.join(os.environ['RAVEN_HOME'], 'config', 'cloud', 'aws', 'kylin', 'kylin-master-cloudformation-template.yaml') with open(path, encoding='utf-8') as file: template = file.read() super().__init__( name='KylinMaster', aws=aws, region=region, stack_name='Raven-Kylin-Master-Stack', template=template, ec2_key_name=ec2_key_name, ec2_instance_type=ec2_instance_type ) @property def spark_master_url(self): return self.aws.get_stack_output_by_key(stack_name=self.stack_name, output_key='SparkMasterUrl') def __str__(self): return f'{self.name}(PublicIp={self.public_ip}, PrivateIp={self.private_ip})' def launch(self): logger.info('Kylin master is launching...') super().launch() logger.info('Kylin master has launched.') def terminate(self): logger.info('Kylin master is terminating...') super().terminate() logger.info('Kylin master has terminated.') class KylinWorker(Ec2Instance): def __init__(self, *, aws: AmazonWebService = None, region: str = '', ec2_key_name: str = '', ec2_instance_type: str, worker_id: int = 1): path = os.path.join(os.environ['RAVEN_HOME'], 'config', 'cloud', 'aws', 'kylin', 'kylin-worker-cloudformation-template.yaml') with open(path, encoding='utf-8') as file: template = file.read() super().__init__( name='KylinWorker', aws=aws, region=region, stack_name=f'Raven-Kylin-Worker{worker_id}-Stack', template=template, ec2_key_name=ec2_key_name, ec2_instance_type=ec2_instance_type, KylinWorkerId=worker_id, ) self._worker_id = worker_id self._spark_master_private_ip = '' @property def worker_id(self): return self._worker_id @property def spark_master_private_ip(self): return self._spark_master_private_ip @spark_master_private_ip.setter def spark_master_private_ip(self, private_ip: str): self._spark_master_private_ip = private_ip self.kwargs['SparkMasterPrivateIp'] = private_ip def __str__(self): return f'{self.name}(PublicIp={self.public_ip}, PrivateIp={self.private_ip})' def launch(self): logger.info(f'Kylin worker {self._worker_id} is launching...') super().launch() logger.info(f'Kylin worker {self._worker_id} has launched.') def terminate(self): logger.info(f'Kylin worker {self._worker_id} is terminating...') super().terminate() logger.info(f'Kylin worker {self._worker_id} has terminated.') class KylinCluster: def __init__(self, *, aws: AmazonWebService, master_instance_type: str = 't2.small', worker_num: int = 0, worker_instance_type: str = 't2.small'): self._aws = aws self._master_instance_type = master_instance_type self._worker_instance_type = worker_instance_type self._master = KylinMaster(aws=aws, ec2_instance_type=self._master_instance_type) self._workers: List[KylinWorker] = [ KylinWorker(aws=aws, ec2_instance_type=self._worker_instance_type, worker_id=worker_id) for worker_id in range(0, worker_num)] self._cluster_id = get_random_id(16) @property def master(self): return self._master @property def workers(self): return self._workers def __str__(self): return f'KylinCluster(Master={self.master}, #Worker={len(self.workers)})' def launch(self): logger.info('Kylin cluster is launching...') self.master.launch() threads: List[threading.Thread] = [] for worker in self.workers: worker.spark_master_private_ip = self.master.private_ip thread = threading.Thread(target=worker.launch) thread.start() threads.append(thread) for thread in threads: thread.join() logger.info('Kylin cluster has launched.') def terminate(self): logger.info('Kylin cluster is terminating...') threads: List[threading.Thread] = [] for worker in self.workers: thread = threading.Thread(target=worker.terminate) thread.start() threads.append(thread) for thread in threads: thread.join() self.master.terminate() logger.info('Kylin cluster has terminated.') def install_cloud_watch_agent(self): logger.debug('Kylin cluster is installing cloudwatch agent...') threads: List[threading.Thread] = [threading.Thread(target=self.master.install_cloudwatch_agent)] for worker in self.workers: threads.append(threading.Thread(target=worker.install_cloudwatch_agent)) for thread in threads: thread.start() for thread in threads: thread.join() logger.debug('Kylin cluster has finished installing cloudwatch agent.') def collect_cluster_info(self, output_dir: str = None): """Collect kylin cluster information. :param output_dir: :return: """ if not output_dir: output_dir = os.path.join(os.environ['RAVEN_HOME'], 'out', 'cluster', f'kylin-{self._cluster_id}') os.makedirs(output_dir, exist_ok=True) info = { 'Master': self.master.to_dict(), 'Workers': [worker.to_dict() for worker in self.workers] } with open(os.path.join(output_dir, f'cluster-info_{time.strftime("%Y-%m-%d_%H-%M-%S")}.json'), mode='w', encoding='utf-8') as file: json.dump(info, file, indent=2) def collect_metrics(self, output_dir: str = None): logger.debug('Kylin cluster is pulling metrics cloudwatch agent...') if not output_dir: output_dir = os.path.join(os.environ['RAVEN_HOME'], 'out', 'cluster', f'kylin-{self._cluster_id}') os.makedirs(output_dir, exist_ok=True) threads: List[threading.Thread] = [ threading.Thread(target=self.master.collect_metrics, kwargs={'output_dir': output_dir})] for worker in self.workers: threads.append(threading.Thread(target=worker.collect_metrics, kwargs={'output_dir': output_dir})) for thread in threads: thread.start() for thread in threads: thread.join() logger.debug('Kylin cluster has finished pulling metrics cloudwatch agent...') def scale(self, worker_num: int): logger.info('Kylin cluster is scaling...') n = len(self.workers) threads: List[threading.Thread] = [] if worker_num < n: for worker_id in range(worker_num, n): thread = threading.Thread(target=self.workers[worker_id].terminate) thread.start() threads.append(thread) elif worker_num > n: for worker_id in range(n, worker_num): worker = KylinWorker(aws=self._aws, ec2_instance_type=self._worker_instance_type, worker_id=worker_id) worker.spark_master_private_ip = self.master.private_ip self.workers.append(worker) thread = threading.Thread(target=worker.launch) thread.start() threads.append(thread) for thread in threads: thread.join() logger.info('Kylin cluster has finished scaling.')
36.810127
118
0.634457
import json import logging import os import threading import time from typing import List from benchmark.cloud.aws.aws import Ec2Instance, AmazonWebService from benchmark.tools import get_random_id logger = logging.getLogger() class KylinMode: ALL = 'all' JOB = 'job' QUERY = 'query' class KylinMaster(Ec2Instance): def __init__(self, *, aws: AmazonWebService = None, region: str = '', ec2_key_name: str = '', ec2_instance_type: str): path = os.path.join(os.environ['RAVEN_HOME'], 'config', 'cloud', 'aws', 'kylin', 'kylin-master-cloudformation-template.yaml') with open(path, encoding='utf-8') as file: template = file.read() super().__init__( name='KylinMaster', aws=aws, region=region, stack_name='Raven-Kylin-Master-Stack', template=template, ec2_key_name=ec2_key_name, ec2_instance_type=ec2_instance_type ) @property def spark_master_url(self): return self.aws.get_stack_output_by_key(stack_name=self.stack_name, output_key='SparkMasterUrl') def __str__(self): return f'{self.name}(PublicIp={self.public_ip}, PrivateIp={self.private_ip})' def launch(self): logger.info('Kylin master is launching...') super().launch() logger.info('Kylin master has launched.') def terminate(self): logger.info('Kylin master is terminating...') super().terminate() logger.info('Kylin master has terminated.') class KylinWorker(Ec2Instance): def __init__(self, *, aws: AmazonWebService = None, region: str = '', ec2_key_name: str = '', ec2_instance_type: str, worker_id: int = 1): path = os.path.join(os.environ['RAVEN_HOME'], 'config', 'cloud', 'aws', 'kylin', 'kylin-worker-cloudformation-template.yaml') with open(path, encoding='utf-8') as file: template = file.read() super().__init__( name='KylinWorker', aws=aws, region=region, stack_name=f'Raven-Kylin-Worker{worker_id}-Stack', template=template, ec2_key_name=ec2_key_name, ec2_instance_type=ec2_instance_type, KylinWorkerId=worker_id, ) self._worker_id = worker_id self._spark_master_private_ip = '' @property def worker_id(self): return self._worker_id @property def spark_master_private_ip(self): return self._spark_master_private_ip @spark_master_private_ip.setter def spark_master_private_ip(self, private_ip: str): self._spark_master_private_ip = private_ip self.kwargs['SparkMasterPrivateIp'] = private_ip def __str__(self): return f'{self.name}(PublicIp={self.public_ip}, PrivateIp={self.private_ip})' def launch(self): logger.info(f'Kylin worker {self._worker_id} is launching...') super().launch() logger.info(f'Kylin worker {self._worker_id} has launched.') def terminate(self): logger.info(f'Kylin worker {self._worker_id} is terminating...') super().terminate() logger.info(f'Kylin worker {self._worker_id} has terminated.') class KylinCluster: def __init__(self, *, aws: AmazonWebService, master_instance_type: str = 't2.small', worker_num: int = 0, worker_instance_type: str = 't2.small'): self._aws = aws self._master_instance_type = master_instance_type self._worker_instance_type = worker_instance_type self._master = KylinMaster(aws=aws, ec2_instance_type=self._master_instance_type) self._workers: List[KylinWorker] = [ KylinWorker(aws=aws, ec2_instance_type=self._worker_instance_type, worker_id=worker_id) for worker_id in range(0, worker_num)] self._cluster_id = get_random_id(16) @property def master(self): return self._master @property def workers(self): return self._workers def __str__(self): return f'KylinCluster(Master={self.master}, #Worker={len(self.workers)})' def launch(self): logger.info('Kylin cluster is launching...') self.master.launch() threads: List[threading.Thread] = [] for worker in self.workers: worker.spark_master_private_ip = self.master.private_ip thread = threading.Thread(target=worker.launch) thread.start() threads.append(thread) for thread in threads: thread.join() logger.info('Kylin cluster has launched.') def terminate(self): logger.info('Kylin cluster is terminating...') threads: List[threading.Thread] = [] for worker in self.workers: thread = threading.Thread(target=worker.terminate) thread.start() threads.append(thread) for thread in threads: thread.join() self.master.terminate() logger.info('Kylin cluster has terminated.') def install_cloud_watch_agent(self): logger.debug('Kylin cluster is installing cloudwatch agent...') threads: List[threading.Thread] = [threading.Thread(target=self.master.install_cloudwatch_agent)] for worker in self.workers: threads.append(threading.Thread(target=worker.install_cloudwatch_agent)) for thread in threads: thread.start() for thread in threads: thread.join() logger.debug('Kylin cluster has finished installing cloudwatch agent.') def collect_cluster_info(self, output_dir: str = None): if not output_dir: output_dir = os.path.join(os.environ['RAVEN_HOME'], 'out', 'cluster', f'kylin-{self._cluster_id}') os.makedirs(output_dir, exist_ok=True) info = { 'Master': self.master.to_dict(), 'Workers': [worker.to_dict() for worker in self.workers] } with open(os.path.join(output_dir, f'cluster-info_{time.strftime("%Y-%m-%d_%H-%M-%S")}.json'), mode='w', encoding='utf-8') as file: json.dump(info, file, indent=2) def collect_metrics(self, output_dir: str = None): logger.debug('Kylin cluster is pulling metrics cloudwatch agent...') if not output_dir: output_dir = os.path.join(os.environ['RAVEN_HOME'], 'out', 'cluster', f'kylin-{self._cluster_id}') os.makedirs(output_dir, exist_ok=True) threads: List[threading.Thread] = [ threading.Thread(target=self.master.collect_metrics, kwargs={'output_dir': output_dir})] for worker in self.workers: threads.append(threading.Thread(target=worker.collect_metrics, kwargs={'output_dir': output_dir})) for thread in threads: thread.start() for thread in threads: thread.join() logger.debug('Kylin cluster has finished pulling metrics cloudwatch agent...') def scale(self, worker_num: int): logger.info('Kylin cluster is scaling...') n = len(self.workers) threads: List[threading.Thread] = [] if worker_num < n: for worker_id in range(worker_num, n): thread = threading.Thread(target=self.workers[worker_id].terminate) thread.start() threads.append(thread) elif worker_num > n: for worker_id in range(n, worker_num): worker = KylinWorker(aws=self._aws, ec2_instance_type=self._worker_instance_type, worker_id=worker_id) worker.spark_master_private_ip = self.master.private_ip self.workers.append(worker) thread = threading.Thread(target=worker.launch) thread.start() threads.append(thread) for thread in threads: thread.join() logger.info('Kylin cluster has finished scaling.')
true
true
f716d4d8c9c6fc2d044b19e6dd1de0835c5c8e87
285
py
Python
Blog/sitemaps.py
myselfajp/MyFirstPage
c22b2fbe6ddca6f0af199f51ec7f12894458a91b
[ "MIT" ]
null
null
null
Blog/sitemaps.py
myselfajp/MyFirstPage
c22b2fbe6ddca6f0af199f51ec7f12894458a91b
[ "MIT" ]
null
null
null
Blog/sitemaps.py
myselfajp/MyFirstPage
c22b2fbe6ddca6f0af199f51ec7f12894458a91b
[ "MIT" ]
null
null
null
from django.contrib.sitemaps import Sitemap from Blog.models import Post class BlogSitemap(Sitemap): changefreq = "weekly" priority = 0.5 def items(self): return Post.objects.filter(status=True) def lastmod(self, obj): return obj.published_date
20.357143
47
0.687719
from django.contrib.sitemaps import Sitemap from Blog.models import Post class BlogSitemap(Sitemap): changefreq = "weekly" priority = 0.5 def items(self): return Post.objects.filter(status=True) def lastmod(self, obj): return obj.published_date
true
true
f716d5594aa167180b878069286e9c1308907fdf
9,503
py
Python
tests/infra/jsonrpc.py
rschust/CCF
2ad5f162cd73c645070f26461d8d053b45f63c3e
[ "Apache-2.0" ]
null
null
null
tests/infra/jsonrpc.py
rschust/CCF
2ad5f162cd73c645070f26461d8d053b45f63c3e
[ "Apache-2.0" ]
null
null
null
tests/infra/jsonrpc.py
rschust/CCF
2ad5f162cd73c645070f26461d8d053b45f63c3e
[ "Apache-2.0" ]
null
null
null
# Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the Apache 2.0 License. import socket import ssl import msgpack import struct import select import contextlib import json import logging import time import os from enum import IntEnum from cryptography import x509 from cryptography.hazmat.backends import default_backend from cryptography.hazmat.primitives import asymmetric from loguru import logger as LOG # Values defined in node/rpc/jsonrpc.h class ErrorCode(IntEnum): PARSE_ERROR = -32700 INVALID_REQUEST = -32600 METHOD_NOT_FOUND = -32601 INVALID_PARAMS = -32602 INTERNAL_ERROR = -32603 NODE_NOT_FOUND = -32604 INVALID_CLIENT_SIGNATURE = -32605 INVALID_CALLER_ID = -32606 CODE_ID_NOT_FOUND = -32607 CODE_ID_RETIRED = -32608 RPC_NOT_FORWARDED = -32609 SERVER_ERROR_START = -32000 TX_NOT_LEADER = -32001 TX_REPLICATED = -32002 TX_ROLLED_BACK = -32003 TX_FAILED_TO_COMMIT = -32004 TX_FAILED_TO_REPLICATE = -32005 SCRIPT_ERROR = -32006 INSUFFICIENT_RIGHTS = -32007 DENIED = -32008 TX_LEADER_UNKNOWN = -32009 RPC_NOT_SIGNED = -32010 SERVER_ERROR_END = -32099 def truncate(string, max_len=256): if len(string) > 256: return string[: 256 - 3] + "..." else: return string class Request: def __init__(self, id, method, params, jsonrpc="2.0"): self.id = id self.method = method self.params = params self.jsonrpc = jsonrpc def to_dict(self): return { "id": self.id, "method": self.method, "jsonrpc": self.jsonrpc, "params": self.params, } def to_msgpack(self): return msgpack.packb(self.to_dict(), use_bin_type=True) def to_json(self): return json.dumps(self.to_dict()).encode() class Response: def __init__( self, id, result=None, error=None, commit=None, term=None, global_commit=None, jsonrpc="2.0", ): self.id = id self.result = result self.error = error self.jsonrpc = jsonrpc self.commit = commit self.term = term self.global_commit = global_commit self._attrs = set(locals()) - {"self"} def to_dict(self): d = {"id": self.id, "jsonrpc": self.jsonrpc} if self.result is not None: d["result"] = self.result else: d["error"] = self.error return d def _from_parsed(self, parsed): def decode(sl, is_key=False): if is_key and hasattr(sl, "decode"): return sl.decode() if hasattr(sl, "items"): return {decode(k, is_key=True): decode(v) for k, v in sl.items()} elif isinstance(sl, list): return [decode(e) for e in sl] else: return sl parsed_s = { decode(attr, is_key=True): decode(value) for attr, value in parsed.items() } unexpected = parsed_s.keys() - self._attrs if unexpected: raise ValueError("Unexpected keys in response: {}".format(unexpected)) for attr, value in parsed_s.items(): setattr(self, attr, value) def from_msgpack(self, data): parsed = msgpack.unpackb(data) self._from_parsed(parsed) def from_json(self, data): parsed = json.loads(data.decode()) self._from_parsed(parsed) class FramedTLSClient: def __init__(self, host, port, server_hostname, cert=None, key=None, cafile=None): self.host = host self.port = port self.server_hostname = server_hostname self.cert = cert self.key = key self.cafile = cafile self.context = None self.sock = None self.conn = None def connect(self): if self.cafile: self.context = ssl.create_default_context(cafile=self.cafile) # Auto detect EC curve to use based on server CA ca_bytes = open(self.cafile, "rb").read() ca_curve = ( x509.load_pem_x509_certificate(ca_bytes, default_backend()) .public_key() .curve ) if isinstance(ca_curve, asymmetric.ec.SECP256K1): self.context.set_ecdh_curve("secp256k1") else: self.context = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH) if self.cert and self.key: self.context.load_cert_chain(certfile=self.cert, keyfile=self.key) self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) self.conn = self.context.wrap_socket( self.sock, server_side=False, server_hostname=self.server_hostname ) self.conn.connect((self.host, self.port)) def send(self, msg): frame = struct.pack("<I", len(msg)) + msg self.conn.sendall(frame) def _read(self): size, = struct.unpack("<I", self.conn.recv(4)) data = self.conn.recv(size) while len(data) < size: data += self.conn.recv(size - len(data)) return data def read(self): for _ in range(5000): r, _, _ = select.select([self.conn], [], [], 0) if r: return self._read() else: time.sleep(0.01) def disconnect(self): self.conn.close() class Stream: def __init__(self, jsonrpc="2.0", format="msgpack"): self.jsonrpc = jsonrpc self.seqno = 0 self.pending = {} self.format = format def request(self, method, params): r = Request(self.seqno, method, params, self.jsonrpc) self.seqno += 1 return r def response(self, id): return self.pending.pop(id, None) def update(self, msg): r = Response(0) getattr(r, "from_{}".format(self.format))(msg) self.pending[r.id] = r class RPCLogger: def log_request(self, request, name, description): LOG.info( truncate( "{} #{} {} {}{}".format( name, request.id, request.method, request.params, description ) ) ) def log_response(self, response): LOG.debug( truncate( "#{} {}".format( response.id, { k: v for k, v in (response.__dict__ or {}).items() if not k.startswith("_") }, ) ) ) class RPCFileLogger(RPCLogger): def __init__(self, path): self.path = path def log_request(self, request, name, description): with open(self.path, "a") as f: f.write(">> Request:" + os.linesep) json.dump(request.to_dict(), f, indent=2) f.write(os.linesep) def log_response(self, response): with open(self.path, "a") as f: f.write("<< Response:" + os.linesep) json.dump(response.to_dict(), f, indent=2) f.write(os.linesep) class FramedTLSJSONRPCClient: def __init__( self, host, port, server_hostname, cert=None, key=None, cafile=None, version="2.0", format="msgpack", description=None, ): self.client = FramedTLSClient(host, port, server_hostname, cert, key, cafile) self.stream = Stream(version, format=format) self.format = format self.name = "[{}:{}]".format(host, port) self.description = description self.rpc_loggers = (RPCLogger(),) def connect(self): return self.client.connect() def disconnect(self): return self.client.disconnect() def request(self, method, params): r = self.stream.request(method, params) self.client.send(getattr(r, "to_{}".format(self.format))()) description = "" if self.description: description = " ({})".format(self.description) for logger in self.rpc_loggers: logger.log_request(r, self.name, description) return r.id def tick(self): msg = self.client.read() self.stream.update(msg) def response(self, id): self.tick() r = self.stream.response(id) for logger in self.rpc_loggers: logger.log_response(r) return r def do(self, method, params, expected_result=None, expected_error_code=None): id = self.request(method, params) r = self.response(id) if expected_result is not None: assert expected_result == r.result if expected_error_code is not None: assert expected_error_code.value == r.error["code"] return r def rpc(self, method, params): id = self.request(method, params) return self.response(id) @contextlib.contextmanager def client( host, port, server_hostname="users", cert=None, key=None, cafile=None, version="2.0", format="msgpack", description=None, log_file=None, ): c = FramedTLSJSONRPCClient( host, port, server_hostname, cert, key, cafile, version, format, description ) if log_file is not None: c.rpc_loggers += (RPCFileLogger(log_file),) c.connect() try: yield c finally: c.disconnect()
27.78655
86
0.573398
import socket import ssl import msgpack import struct import select import contextlib import json import logging import time import os from enum import IntEnum from cryptography import x509 from cryptography.hazmat.backends import default_backend from cryptography.hazmat.primitives import asymmetric from loguru import logger as LOG class ErrorCode(IntEnum): PARSE_ERROR = -32700 INVALID_REQUEST = -32600 METHOD_NOT_FOUND = -32601 INVALID_PARAMS = -32602 INTERNAL_ERROR = -32603 NODE_NOT_FOUND = -32604 INVALID_CLIENT_SIGNATURE = -32605 INVALID_CALLER_ID = -32606 CODE_ID_NOT_FOUND = -32607 CODE_ID_RETIRED = -32608 RPC_NOT_FORWARDED = -32609 SERVER_ERROR_START = -32000 TX_NOT_LEADER = -32001 TX_REPLICATED = -32002 TX_ROLLED_BACK = -32003 TX_FAILED_TO_COMMIT = -32004 TX_FAILED_TO_REPLICATE = -32005 SCRIPT_ERROR = -32006 INSUFFICIENT_RIGHTS = -32007 DENIED = -32008 TX_LEADER_UNKNOWN = -32009 RPC_NOT_SIGNED = -32010 SERVER_ERROR_END = -32099 def truncate(string, max_len=256): if len(string) > 256: return string[: 256 - 3] + "..." else: return string class Request: def __init__(self, id, method, params, jsonrpc="2.0"): self.id = id self.method = method self.params = params self.jsonrpc = jsonrpc def to_dict(self): return { "id": self.id, "method": self.method, "jsonrpc": self.jsonrpc, "params": self.params, } def to_msgpack(self): return msgpack.packb(self.to_dict(), use_bin_type=True) def to_json(self): return json.dumps(self.to_dict()).encode() class Response: def __init__( self, id, result=None, error=None, commit=None, term=None, global_commit=None, jsonrpc="2.0", ): self.id = id self.result = result self.error = error self.jsonrpc = jsonrpc self.commit = commit self.term = term self.global_commit = global_commit self._attrs = set(locals()) - {"self"} def to_dict(self): d = {"id": self.id, "jsonrpc": self.jsonrpc} if self.result is not None: d["result"] = self.result else: d["error"] = self.error return d def _from_parsed(self, parsed): def decode(sl, is_key=False): if is_key and hasattr(sl, "decode"): return sl.decode() if hasattr(sl, "items"): return {decode(k, is_key=True): decode(v) for k, v in sl.items()} elif isinstance(sl, list): return [decode(e) for e in sl] else: return sl parsed_s = { decode(attr, is_key=True): decode(value) for attr, value in parsed.items() } unexpected = parsed_s.keys() - self._attrs if unexpected: raise ValueError("Unexpected keys in response: {}".format(unexpected)) for attr, value in parsed_s.items(): setattr(self, attr, value) def from_msgpack(self, data): parsed = msgpack.unpackb(data) self._from_parsed(parsed) def from_json(self, data): parsed = json.loads(data.decode()) self._from_parsed(parsed) class FramedTLSClient: def __init__(self, host, port, server_hostname, cert=None, key=None, cafile=None): self.host = host self.port = port self.server_hostname = server_hostname self.cert = cert self.key = key self.cafile = cafile self.context = None self.sock = None self.conn = None def connect(self): if self.cafile: self.context = ssl.create_default_context(cafile=self.cafile) ca_bytes = open(self.cafile, "rb").read() ca_curve = ( x509.load_pem_x509_certificate(ca_bytes, default_backend()) .public_key() .curve ) if isinstance(ca_curve, asymmetric.ec.SECP256K1): self.context.set_ecdh_curve("secp256k1") else: self.context = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH) if self.cert and self.key: self.context.load_cert_chain(certfile=self.cert, keyfile=self.key) self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) self.conn = self.context.wrap_socket( self.sock, server_side=False, server_hostname=self.server_hostname ) self.conn.connect((self.host, self.port)) def send(self, msg): frame = struct.pack("<I", len(msg)) + msg self.conn.sendall(frame) def _read(self): size, = struct.unpack("<I", self.conn.recv(4)) data = self.conn.recv(size) while len(data) < size: data += self.conn.recv(size - len(data)) return data def read(self): for _ in range(5000): r, _, _ = select.select([self.conn], [], [], 0) if r: return self._read() else: time.sleep(0.01) def disconnect(self): self.conn.close() class Stream: def __init__(self, jsonrpc="2.0", format="msgpack"): self.jsonrpc = jsonrpc self.seqno = 0 self.pending = {} self.format = format def request(self, method, params): r = Request(self.seqno, method, params, self.jsonrpc) self.seqno += 1 return r def response(self, id): return self.pending.pop(id, None) def update(self, msg): r = Response(0) getattr(r, "from_{}".format(self.format))(msg) self.pending[r.id] = r class RPCLogger: def log_request(self, request, name, description): LOG.info( truncate( "{} #{} {} {}{}".format( name, request.id, request.method, request.params, description ) ) ) def log_response(self, response): LOG.debug( truncate( "#{} {}".format( response.id, { k: v for k, v in (response.__dict__ or {}).items() if not k.startswith("_") }, ) ) ) class RPCFileLogger(RPCLogger): def __init__(self, path): self.path = path def log_request(self, request, name, description): with open(self.path, "a") as f: f.write(">> Request:" + os.linesep) json.dump(request.to_dict(), f, indent=2) f.write(os.linesep) def log_response(self, response): with open(self.path, "a") as f: f.write("<< Response:" + os.linesep) json.dump(response.to_dict(), f, indent=2) f.write(os.linesep) class FramedTLSJSONRPCClient: def __init__( self, host, port, server_hostname, cert=None, key=None, cafile=None, version="2.0", format="msgpack", description=None, ): self.client = FramedTLSClient(host, port, server_hostname, cert, key, cafile) self.stream = Stream(version, format=format) self.format = format self.name = "[{}:{}]".format(host, port) self.description = description self.rpc_loggers = (RPCLogger(),) def connect(self): return self.client.connect() def disconnect(self): return self.client.disconnect() def request(self, method, params): r = self.stream.request(method, params) self.client.send(getattr(r, "to_{}".format(self.format))()) description = "" if self.description: description = " ({})".format(self.description) for logger in self.rpc_loggers: logger.log_request(r, self.name, description) return r.id def tick(self): msg = self.client.read() self.stream.update(msg) def response(self, id): self.tick() r = self.stream.response(id) for logger in self.rpc_loggers: logger.log_response(r) return r def do(self, method, params, expected_result=None, expected_error_code=None): id = self.request(method, params) r = self.response(id) if expected_result is not None: assert expected_result == r.result if expected_error_code is not None: assert expected_error_code.value == r.error["code"] return r def rpc(self, method, params): id = self.request(method, params) return self.response(id) @contextlib.contextmanager def client( host, port, server_hostname="users", cert=None, key=None, cafile=None, version="2.0", format="msgpack", description=None, log_file=None, ): c = FramedTLSJSONRPCClient( host, port, server_hostname, cert, key, cafile, version, format, description ) if log_file is not None: c.rpc_loggers += (RPCFileLogger(log_file),) c.connect() try: yield c finally: c.disconnect()
true
true
f716d560b71358551851abec5503a1ab0331080f
8,345
py
Python
tests/providers/test_automotive.py
MarcelRobeer/faker
016ef66c6852ed7d5f198b54dc620bd784ce58c2
[ "MIT" ]
null
null
null
tests/providers/test_automotive.py
MarcelRobeer/faker
016ef66c6852ed7d5f198b54dc620bd784ce58c2
[ "MIT" ]
null
null
null
tests/providers/test_automotive.py
MarcelRobeer/faker
016ef66c6852ed7d5f198b54dc620bd784ce58c2
[ "MIT" ]
null
null
null
import re from typing import Pattern from faker.providers.automotive.de_DE import Provider as DeDeAutomotiveProvider from faker.providers.automotive.es_ES import Provider as EsEsAutomotiveProvider from faker.providers.automotive.ro_RO import Provider as RoRoAutomotiveProvider from faker.providers.automotive.ru_RU import Provider as RuRuAutomotiveProvider from faker.providers.automotive.sk_SK import Provider as SkSkAutomotiveProvider from faker.providers.automotive.tr_TR import Provider as TrTrAutomotiveProvider class _SimpleAutomotiveTestMixin: """Use this test mixin for simple license plate validation""" def perform_extra_checks(self, license_plate, match): pass def test_license_plate(self, faker, num_samples): for _ in range(num_samples): license_plate = faker.license_plate() match = self.license_plate_pattern.fullmatch(license_plate) assert match self.perform_extra_checks(license_plate, match) class TestSkSk(_SimpleAutomotiveTestMixin): """Test sk_SK automotive provider methods""" license_plate_pattern: Pattern = re.compile(r'(?P<prefix>[A-Z]{2})\d{3}[A-Z]{2}') def perform_extra_checks(self, license_plate, match): assert match.group('prefix') in SkSkAutomotiveProvider.license_plate_prefix class TestPtBr(_SimpleAutomotiveTestMixin): """Test pt_BR automotive provider methods""" license_plate_pattern: Pattern = re.compile(r'[A-Z]{3}-\d{4}') class TestPtPt(_SimpleAutomotiveTestMixin): """Test pt_PT automotive provider methods""" license_plate_pattern: Pattern = re.compile( r'\d{2}-\d{2}-[A-Z]{2}|' r'\d{2}-[A-Z]{2}-\d{2}|' r'[A-Z]{2}-\d{2}-\d{2}|' r'[A-Z]{2}-\d{2}-[A-Z]{2}', ) class TestHeIl(_SimpleAutomotiveTestMixin): license_plate_pattern: Pattern = re.compile(r'(\d{3}-\d{2}-\d{3})|(\d{2}-\d{3}-\d{2})') class TestHuHu(_SimpleAutomotiveTestMixin): """Test hu_HU automotive provider methods""" license_plate_pattern: Pattern = re.compile(r'[A-Z]{3}-\d{3}') class TestDeDe(_SimpleAutomotiveTestMixin): """Test de_DE automotive provider methods""" license_plate_pattern: Pattern = re.compile( r'(?P<prefix>[A-Z\u00D6\u00DC]{1,3})-[A-Z]{1,2}-[1-9]{1,4}', re.UNICODE, ) def perform_extra_checks(self, license_plate, match): assert match.group('prefix') in DeDeAutomotiveProvider.license_plate_prefix class TestSvSe(_SimpleAutomotiveTestMixin): """Test sv_SE automotive provider methods""" license_plate_pattern: Pattern = re.compile(r'[A-Z]{3} \d{2}[\dA-Z]') class TestPlPl: def test_License_plate(self, faker, num_samples): pattern: Pattern = re.compile(r'{patterns}'.format(patterns='|'.join(faker.license_plate_regex_formats()))) for _ in range(num_samples): plate = faker.license_plate() assert pattern.fullmatch(plate) class TestEnPh(_SimpleAutomotiveTestMixin): """Test en_PH automotive provider methods""" license_plate_pattern: Pattern = re.compile(r'[A-Z]{2}\d{4,5}|[A-Z]{3}\d{3,4}') motorcycle_pattern: Pattern = re.compile(r'[A-Z]{2}\d{4,5}') automobile_pattern: Pattern = re.compile(r'[A-Z]{3}\d{3,4}') def test_motorcycle_plate(self, faker, num_samples): for _ in range(num_samples): assert self.motorcycle_pattern.match(faker.motorcycle_license_plate()) def test_automobile_plate(self, faker, num_samples): for _ in range(num_samples): assert self.automobile_pattern.match(faker.automobile_license_plate()) def test_protocol_plate(self, faker, num_samples): for _ in range(num_samples): protocol_plate = faker.protocol_license_plate() assert int(protocol_plate) != 15 and 1 <= int(protocol_plate) <= 17 class TestFilPh(TestEnPh): """Test fil_PH automotive provider methods""" pass class TestTlPh(TestEnPh): """Test tl_PH automotive provider methods""" pass class TestRuRu(_SimpleAutomotiveTestMixin): """Test ru_RU automotive provider methods""" _plate_letters = ''.join(RuRuAutomotiveProvider.license_plate_letters) license_plate_pattern: Pattern = re.compile( r'(?:' r'(?P<private_plate_prefix>[{0}]\d\d\d[{0}][{0}])|' r'(?P<public_transport_plate_prefix>[{0}][{0}]\d\d\d)|' r'(?P<trailer_plate_prefix>[{0}][{0}]\d\d\d\d)|' r'(?P<police_plate_prefix>[{0}]\d\d\d\d)|' r'(?P<military_plate_prefix>\d\d\d\d[{0}][{0}])|' r'(?P<plate_number_special>00\dCD\d|00\dD\d\d\d|00\dT\d\d\d)' r') (?P<plate_suffix>.*)'.format(_plate_letters), ) def perform_extra_checks(self, license_plate, match): plate_suffix = match.group('plate_suffix') assert plate_suffix in RuRuAutomotiveProvider.license_plate_suffix def test_vehicle_category(self, faker, num_samples): for _ in range(num_samples): vehicle_category = faker.vehicle_category() assert isinstance(vehicle_category, str) assert vehicle_category in RuRuAutomotiveProvider.vehicle_categories class TestFrFr(_SimpleAutomotiveTestMixin): """Test fr_FR automotive provider methods""" license_plate_pattern: Pattern = re.compile(r'\d{3}-[A-Z]{3}-\d{2}|[A-Z]{2}-\d{3}-[A-Z]{2}') class TestNoNo(_SimpleAutomotiveTestMixin): """Test no_NO automotive provider methods""" license_plate_pattern: Pattern = re.compile(r'[A-Z]{2} \d{5}') class TestEsEs: """Test es_ES automotive provider methods""" new_format_pattern: Pattern = re.compile(r'\d{4}\s[A-Z]{3}') old_format_pattern: Pattern = re.compile(r'(?P<province_prefix>[A-Z]{1,2})\s\d{4}\s[A-Z]{2}') def test_plate_new_format(self, faker, num_samples): for _ in range(num_samples): plate = faker.license_plate_unified() assert isinstance(plate, str) assert self.new_format_pattern.match(plate) def test_plate_old_format(self, faker, num_samples): for _ in range(num_samples): plate = faker.license_plate_by_province() assert isinstance(plate, str) match = self.old_format_pattern.match(plate) assert match assert match.group('province_prefix') in EsEsAutomotiveProvider.province_prefix def test_plate_old_format_explicit_province_prefix(self, faker, num_samples): for _ in range(num_samples): plate = faker.license_plate_by_province(province_prefix="CA") assert isinstance(plate, str) assert self.old_format_pattern.match(plate) assert plate[:2] == "CA" def test_plate_format(self, faker, num_samples): for _ in range(num_samples): plate = faker.license_plate() assert isinstance(plate, str) assert self.new_format_pattern.match(plate) or self.old_format_pattern.match(plate) class TestThTh(_SimpleAutomotiveTestMixin): """Test th_TH automotive provider methods""" license_plate_pattern: Pattern = re.compile( r'(\d [ก-ฮ]{2} \d{1,4})|' # car r'([ก-ฮ]{2} \d{1,4})|' # car r'([ก-ฮ]{3} \d{1,3})|' # motorcycle r'(\d{2}-\d{4})', # truck ) class TestTrTr(_SimpleAutomotiveTestMixin): """Test tr_TR automotive provider methods""" license_plate_pattern: Pattern = re.compile( r'\d{2} [A-Z] \d{4}|' r'\d{2} [A-Z] \d{5}|' r'\d{2} [A-Z]{2} \d{3}|' r'\d{2} [A-Z]{2} \d{4}|' r'\d{2} [A-Z]{3} \d{2}|' r'\d{2} [A-Z]{3} \d{3}', ) def perform_extra_checks(self, license_plate, match): [city_code, letters, _] = license_plate.split(' ') assert int(city_code) in range(1, 82) assert all(letter in TrTrAutomotiveProvider.ascii_uppercase_turkish for letter in letters) class TestRoRo(_SimpleAutomotiveTestMixin): """Test ro_RO automotive provider methods""" license_plate_pattern: Pattern = re.compile( r'(?P<prefix>[A-Z]{1,2})-\d{2,3}-[A-Z]{3}') def perform_extra_checks(self, license_plate, match): assert match.group('prefix') in RoRoAutomotiveProvider.license_plate_prefix class TestElGr(_SimpleAutomotiveTestMixin): """Test el_GR automotive provider methods""" license_plate_pattern = re.compile(r'^(?P<prefix>[A-Z]{2,3}) \d{4}$')
37.59009
115
0.670461
import re from typing import Pattern from faker.providers.automotive.de_DE import Provider as DeDeAutomotiveProvider from faker.providers.automotive.es_ES import Provider as EsEsAutomotiveProvider from faker.providers.automotive.ro_RO import Provider as RoRoAutomotiveProvider from faker.providers.automotive.ru_RU import Provider as RuRuAutomotiveProvider from faker.providers.automotive.sk_SK import Provider as SkSkAutomotiveProvider from faker.providers.automotive.tr_TR import Provider as TrTrAutomotiveProvider class _SimpleAutomotiveTestMixin: def perform_extra_checks(self, license_plate, match): pass def test_license_plate(self, faker, num_samples): for _ in range(num_samples): license_plate = faker.license_plate() match = self.license_plate_pattern.fullmatch(license_plate) assert match self.perform_extra_checks(license_plate, match) class TestSkSk(_SimpleAutomotiveTestMixin): license_plate_pattern: Pattern = re.compile(r'(?P<prefix>[A-Z]{2})\d{3}[A-Z]{2}') def perform_extra_checks(self, license_plate, match): assert match.group('prefix') in SkSkAutomotiveProvider.license_plate_prefix class TestPtBr(_SimpleAutomotiveTestMixin): license_plate_pattern: Pattern = re.compile(r'[A-Z]{3}-\d{4}') class TestPtPt(_SimpleAutomotiveTestMixin): license_plate_pattern: Pattern = re.compile( r'\d{2}-\d{2}-[A-Z]{2}|' r'\d{2}-[A-Z]{2}-\d{2}|' r'[A-Z]{2}-\d{2}-\d{2}|' r'[A-Z]{2}-\d{2}-[A-Z]{2}', ) class TestHeIl(_SimpleAutomotiveTestMixin): license_plate_pattern: Pattern = re.compile(r'(\d{3}-\d{2}-\d{3})|(\d{2}-\d{3}-\d{2})') class TestHuHu(_SimpleAutomotiveTestMixin): license_plate_pattern: Pattern = re.compile(r'[A-Z]{3}-\d{3}') class TestDeDe(_SimpleAutomotiveTestMixin): license_plate_pattern: Pattern = re.compile( r'(?P<prefix>[A-Z\u00D6\u00DC]{1,3})-[A-Z]{1,2}-[1-9]{1,4}', re.UNICODE, ) def perform_extra_checks(self, license_plate, match): assert match.group('prefix') in DeDeAutomotiveProvider.license_plate_prefix class TestSvSe(_SimpleAutomotiveTestMixin): license_plate_pattern: Pattern = re.compile(r'[A-Z]{3} \d{2}[\dA-Z]') class TestPlPl: def test_License_plate(self, faker, num_samples): pattern: Pattern = re.compile(r'{patterns}'.format(patterns='|'.join(faker.license_plate_regex_formats()))) for _ in range(num_samples): plate = faker.license_plate() assert pattern.fullmatch(plate) class TestEnPh(_SimpleAutomotiveTestMixin): license_plate_pattern: Pattern = re.compile(r'[A-Z]{2}\d{4,5}|[A-Z]{3}\d{3,4}') motorcycle_pattern: Pattern = re.compile(r'[A-Z]{2}\d{4,5}') automobile_pattern: Pattern = re.compile(r'[A-Z]{3}\d{3,4}') def test_motorcycle_plate(self, faker, num_samples): for _ in range(num_samples): assert self.motorcycle_pattern.match(faker.motorcycle_license_plate()) def test_automobile_plate(self, faker, num_samples): for _ in range(num_samples): assert self.automobile_pattern.match(faker.automobile_license_plate()) def test_protocol_plate(self, faker, num_samples): for _ in range(num_samples): protocol_plate = faker.protocol_license_plate() assert int(protocol_plate) != 15 and 1 <= int(protocol_plate) <= 17 class TestFilPh(TestEnPh): pass class TestTlPh(TestEnPh): pass class TestRuRu(_SimpleAutomotiveTestMixin): _plate_letters = ''.join(RuRuAutomotiveProvider.license_plate_letters) license_plate_pattern: Pattern = re.compile( r'(?:' r'(?P<private_plate_prefix>[{0}]\d\d\d[{0}][{0}])|' r'(?P<public_transport_plate_prefix>[{0}][{0}]\d\d\d)|' r'(?P<trailer_plate_prefix>[{0}][{0}]\d\d\d\d)|' r'(?P<police_plate_prefix>[{0}]\d\d\d\d)|' r'(?P<military_plate_prefix>\d\d\d\d[{0}][{0}])|' r'(?P<plate_number_special>00\dCD\d|00\dD\d\d\d|00\dT\d\d\d)' r') (?P<plate_suffix>.*)'.format(_plate_letters), ) def perform_extra_checks(self, license_plate, match): plate_suffix = match.group('plate_suffix') assert plate_suffix in RuRuAutomotiveProvider.license_plate_suffix def test_vehicle_category(self, faker, num_samples): for _ in range(num_samples): vehicle_category = faker.vehicle_category() assert isinstance(vehicle_category, str) assert vehicle_category in RuRuAutomotiveProvider.vehicle_categories class TestFrFr(_SimpleAutomotiveTestMixin): license_plate_pattern: Pattern = re.compile(r'\d{3}-[A-Z]{3}-\d{2}|[A-Z]{2}-\d{3}-[A-Z]{2}') class TestNoNo(_SimpleAutomotiveTestMixin): license_plate_pattern: Pattern = re.compile(r'[A-Z]{2} \d{5}') class TestEsEs: new_format_pattern: Pattern = re.compile(r'\d{4}\s[A-Z]{3}') old_format_pattern: Pattern = re.compile(r'(?P<province_prefix>[A-Z]{1,2})\s\d{4}\s[A-Z]{2}') def test_plate_new_format(self, faker, num_samples): for _ in range(num_samples): plate = faker.license_plate_unified() assert isinstance(plate, str) assert self.new_format_pattern.match(plate) def test_plate_old_format(self, faker, num_samples): for _ in range(num_samples): plate = faker.license_plate_by_province() assert isinstance(plate, str) match = self.old_format_pattern.match(plate) assert match assert match.group('province_prefix') in EsEsAutomotiveProvider.province_prefix def test_plate_old_format_explicit_province_prefix(self, faker, num_samples): for _ in range(num_samples): plate = faker.license_plate_by_province(province_prefix="CA") assert isinstance(plate, str) assert self.old_format_pattern.match(plate) assert plate[:2] == "CA" def test_plate_format(self, faker, num_samples): for _ in range(num_samples): plate = faker.license_plate() assert isinstance(plate, str) assert self.new_format_pattern.match(plate) or self.old_format_pattern.match(plate) class TestThTh(_SimpleAutomotiveTestMixin): license_plate_pattern: Pattern = re.compile( r'(\d [ก-ฮ]{2} \d{1,4})|' r'([ก-ฮ]{2} \d{1,4})|' r'([ก-ฮ]{3} \d{1,3})|' r'(\d{2}-\d{4})', ) class TestTrTr(_SimpleAutomotiveTestMixin): license_plate_pattern: Pattern = re.compile( r'\d{2} [A-Z] \d{4}|' r'\d{2} [A-Z] \d{5}|' r'\d{2} [A-Z]{2} \d{3}|' r'\d{2} [A-Z]{2} \d{4}|' r'\d{2} [A-Z]{3} \d{2}|' r'\d{2} [A-Z]{3} \d{3}', ) def perform_extra_checks(self, license_plate, match): [city_code, letters, _] = license_plate.split(' ') assert int(city_code) in range(1, 82) assert all(letter in TrTrAutomotiveProvider.ascii_uppercase_turkish for letter in letters) class TestRoRo(_SimpleAutomotiveTestMixin): license_plate_pattern: Pattern = re.compile( r'(?P<prefix>[A-Z]{1,2})-\d{2,3}-[A-Z]{3}') def perform_extra_checks(self, license_plate, match): assert match.group('prefix') in RoRoAutomotiveProvider.license_plate_prefix class TestElGr(_SimpleAutomotiveTestMixin): license_plate_pattern = re.compile(r'^(?P<prefix>[A-Z]{2,3}) \d{4}$')
true
true
f716d625081f9ebcae5efd23c12ecb9272c56c04
887
py
Python
qubo_nn/plots/gen_tsne_gen4.py
instance01/qubo-nn
6f8058565f4b6ab4a8300501fc2f67cdaeed482f
[ "MIT" ]
9
2021-09-17T09:40:59.000Z
2022-03-29T13:41:25.000Z
qubo_nn/plots/gen_tsne_gen4.py
instance01/qubo-nn
6f8058565f4b6ab4a8300501fc2f67cdaeed482f
[ "MIT" ]
null
null
null
qubo_nn/plots/gen_tsne_gen4.py
instance01/qubo-nn
6f8058565f4b6ab4a8300501fc2f67cdaeed482f
[ "MIT" ]
4
2022-03-06T19:26:19.000Z
2022-03-29T13:41:37.000Z
import pickle import numpy as np from MulticoreTSNE import MulticoreTSNE as TSNE from qubo_nn.data import LMDBDataLoader from qubo_nn.config import Config cfg_id = '27_gen4' cfg = Config('../').get_cfg(cfg_id) cfg["use_big"] = False lmdb_loader = LMDBDataLoader(cfg, reverse=False, base_path='../') X = [] y = [] for i, data in enumerate(lmdb_loader.train_data_loader): if i > 43: # 44 batches á 500 = 22k (from total of 440k), so 5% break X.extend(data[0].tolist()) y.extend(data[1].tolist()) X = np.array(X) X = X.reshape(-1, 64**2) print(X.shape) for i in [10, 20, 30, 50, 70, 100, 200, 500, 1000]: tsne = TSNE( n_jobs=10, n_iter=5000, perplexity=i, # perplexity=500., # Best. verbose=1 ) Y = tsne.fit_transform(X) with open('tsne_gen4_data%d.pickle' % i, 'wb+') as f: pickle.dump((Y, y), f)
23.972973
68
0.622322
import pickle import numpy as np from MulticoreTSNE import MulticoreTSNE as TSNE from qubo_nn.data import LMDBDataLoader from qubo_nn.config import Config cfg_id = '27_gen4' cfg = Config('../').get_cfg(cfg_id) cfg["use_big"] = False lmdb_loader = LMDBDataLoader(cfg, reverse=False, base_path='../') X = [] y = [] for i, data in enumerate(lmdb_loader.train_data_loader): if i > 43: break X.extend(data[0].tolist()) y.extend(data[1].tolist()) X = np.array(X) X = X.reshape(-1, 64**2) print(X.shape) for i in [10, 20, 30, 50, 70, 100, 200, 500, 1000]: tsne = TSNE( n_jobs=10, n_iter=5000, perplexity=i, verbose=1 ) Y = tsne.fit_transform(X) with open('tsne_gen4_data%d.pickle' % i, 'wb+') as f: pickle.dump((Y, y), f)
true
true
f716d781ce1344228dd00f5ca854221451fb21f6
1,862
py
Python
tests/extras/test_tooltips.py
Akuli/tkinder
c360fbfe086ca09cdd856a8636de05b24e1b7093
[ "MIT" ]
23
2019-01-15T00:07:30.000Z
2022-01-18T06:19:18.000Z
tests/extras/test_tooltips.py
Akuli/tkinder
c360fbfe086ca09cdd856a8636de05b24e1b7093
[ "MIT" ]
12
2019-01-13T19:51:52.000Z
2021-05-17T17:55:51.000Z
tests/extras/test_tooltips.py
Akuli/pythotk
c360fbfe086ca09cdd856a8636de05b24e1b7093
[ "MIT" ]
7
2019-01-13T19:48:26.000Z
2021-04-21T13:30:21.000Z
import time import types import pytest import teek from teek.extras import tooltips def run_event_loop(for_how_long): # this is dumb start = time.time() while time.time() < start + for_how_long: teek.update() @pytest.mark.slow def test_set_tooltip(): window = teek.Window() assert not hasattr(window, '_tooltip_manager') tooltips.set_tooltip(window, None) assert not hasattr(window, '_tooltip_manager') tooltips.set_tooltip(window, 'Boo') assert window._tooltip_manager.text == 'Boo' tooltips.set_tooltip(window, None) assert window._tooltip_manager.text is None tooltips.set_tooltip(window, 'lol') assert window._tooltip_manager.text == 'lol' N = types.SimpleNamespace # because pep8 line length assert not window._tooltip_manager.got_mouse window._tooltip_manager.enter(N(widget=window, rootx=123, rooty=456)) assert window._tooltip_manager.got_mouse assert window._tooltip_manager.mousex == 123 assert window._tooltip_manager.mousey == 456 window._tooltip_manager.motion(N(rootx=789, rooty=101112)) assert window._tooltip_manager.got_mouse assert window._tooltip_manager.mousex == 789 assert window._tooltip_manager.mousey == 101112 run_event_loop(1.1) assert window._tooltip_manager.tipwindow is not None assert window._tooltip_manager.got_mouse window._tooltip_manager.leave(N(widget=window)) assert not window._tooltip_manager.got_mouse assert window._tooltip_manager.tipwindow is None # what happens if the window gets destroyed before it's supposed to show? window._tooltip_manager.enter(N(widget=window, rootx=1, rooty=2)) window._tooltip_manager.leave(N(widget=window)) assert window._tooltip_manager.tipwindow is None run_event_loop(1.1) assert window._tooltip_manager.tipwindow is None
31.559322
77
0.749194
import time import types import pytest import teek from teek.extras import tooltips def run_event_loop(for_how_long): start = time.time() while time.time() < start + for_how_long: teek.update() @pytest.mark.slow def test_set_tooltip(): window = teek.Window() assert not hasattr(window, '_tooltip_manager') tooltips.set_tooltip(window, None) assert not hasattr(window, '_tooltip_manager') tooltips.set_tooltip(window, 'Boo') assert window._tooltip_manager.text == 'Boo' tooltips.set_tooltip(window, None) assert window._tooltip_manager.text is None tooltips.set_tooltip(window, 'lol') assert window._tooltip_manager.text == 'lol' N = types.SimpleNamespace assert not window._tooltip_manager.got_mouse window._tooltip_manager.enter(N(widget=window, rootx=123, rooty=456)) assert window._tooltip_manager.got_mouse assert window._tooltip_manager.mousex == 123 assert window._tooltip_manager.mousey == 456 window._tooltip_manager.motion(N(rootx=789, rooty=101112)) assert window._tooltip_manager.got_mouse assert window._tooltip_manager.mousex == 789 assert window._tooltip_manager.mousey == 101112 run_event_loop(1.1) assert window._tooltip_manager.tipwindow is not None assert window._tooltip_manager.got_mouse window._tooltip_manager.leave(N(widget=window)) assert not window._tooltip_manager.got_mouse assert window._tooltip_manager.tipwindow is None window._tooltip_manager.enter(N(widget=window, rootx=1, rooty=2)) window._tooltip_manager.leave(N(widget=window)) assert window._tooltip_manager.tipwindow is None run_event_loop(1.1) assert window._tooltip_manager.tipwindow is None
true
true
f716d8668d7f3e71327a13ddba27d41f18e2ef20
49
py
Python
ecommerce/shipping.py
broach44/beginning-python
54fb51ce666e263e7a76c37bb39cb6df636886ca
[ "MIT" ]
null
null
null
ecommerce/shipping.py
broach44/beginning-python
54fb51ce666e263e7a76c37bb39cb6df636886ca
[ "MIT" ]
null
null
null
ecommerce/shipping.py
broach44/beginning-python
54fb51ce666e263e7a76c37bb39cb6df636886ca
[ "MIT" ]
null
null
null
def calc_shipping(): print("calc shipping")
12.25
26
0.673469
def calc_shipping(): print("calc shipping")
true
true
f716d9590903bcc9299b90cc70855136916fd55d
1,613
py
Python
old_versions/TCA_2_2/TCA_V_2_2_1/code/TCASpacePartitioning.py
OSADP/TCA
25bc1c1db00393cc6b8c6764610bf381494dfcb9
[ "Apache-2.0" ]
1
2021-05-22T00:06:09.000Z
2021-05-22T00:06:09.000Z
old_versions/TCA_2_2/TCA_V_2_2_1/code/TCASpacePartitioning.py
OSADP/TCA
25bc1c1db00393cc6b8c6764610bf381494dfcb9
[ "Apache-2.0" ]
null
null
null
old_versions/TCA_2_2/TCA_V_2_2_1/code/TCASpacePartitioning.py
OSADP/TCA
25bc1c1db00393cc6b8c6764610bf381494dfcb9
[ "Apache-2.0" ]
null
null
null
#standard import unittest import math # from collections import OrderedDict from random import uniform #external import pandas as pd from scipy.spatial import KDTree def Find_RSE_range(df, RSEs, minrange): sub_df = df[['vehicle_ID', 'location_x', 'location_y']] tree = KDTree(sub_df[['location_x', 'location_y']].values) rse_points = list(RSEs.RSEListLocations.values()) locs_index = tree.query_ball_point(rse_points, r=minrange) #link RSE back to vehicles rse_vehicles = {} for c, RSE in enumerate(RSEs.RSEListLocations.keys()): if len(locs_index[c]) > 0: vlist = sub_df.iloc[locs_index[c]]['vehicle_ID'].tolist() rse_vehicles[RSE] = vlist else: rse_vehicles[RSE] = [] return rse_vehicles class BufferContentCheck(unittest.TestCase): def setUp(self): pass def test_whole(self): minrange = 4.00 num_vehicles = 10000 num_RSE = 30 # Vehicles_loc = {x:(uniform(0, 200), uniform(0, 200)) for x in range(num_vehicles)} # df = pd.DataFrame({ # 'Vid' : ['V' + str(x) for x in Vehicles_loc.keys()], # 'x' : [Vehicles_loc[x][0] for x in Vehicles_loc], # 'y' : [Vehicles_loc[x][1] for x in Vehicles_loc], # }) # df = df.set_index(['Vid'], drop=False) # RSEs = OrderedDict({'RSE' + str(x):(uniform(0, 200), uniform(0, 200)) for x in range(num_RSE)}) # rse_info = Find_RSE_range(df, RSEs, minrange) if __name__ == '__main__': unittest.main()
26.442623
105
0.594544
import unittest import math from random import uniform import pandas as pd from scipy.spatial import KDTree def Find_RSE_range(df, RSEs, minrange): sub_df = df[['vehicle_ID', 'location_x', 'location_y']] tree = KDTree(sub_df[['location_x', 'location_y']].values) rse_points = list(RSEs.RSEListLocations.values()) locs_index = tree.query_ball_point(rse_points, r=minrange) rse_vehicles = {} for c, RSE in enumerate(RSEs.RSEListLocations.keys()): if len(locs_index[c]) > 0: vlist = sub_df.iloc[locs_index[c]]['vehicle_ID'].tolist() rse_vehicles[RSE] = vlist else: rse_vehicles[RSE] = [] return rse_vehicles class BufferContentCheck(unittest.TestCase): def setUp(self): pass def test_whole(self): minrange = 4.00 num_vehicles = 10000 num_RSE = 30 if __name__ == '__main__': unittest.main()
true
true
f716da328dd870f9e5d396ba489f4d3b821fa89f
720
py
Python
bin/make_halo_cnf_data.py
muntazirabidi/boss-sbi
fae016eb10b64153391499276d238ccdf660df88
[ "MIT" ]
1
2022-03-15T18:13:02.000Z
2022-03-15T18:13:02.000Z
bin/make_halo_cnf_data.py
muntazirabidi/boss-sbi
fae016eb10b64153391499276d238ccdf660df88
[ "MIT" ]
11
2020-12-16T18:26:31.000Z
2021-04-02T14:58:37.000Z
bin/make_halo_cnf_data.py
muntazirabidi/boss-sbi
fae016eb10b64153391499276d238ccdf660df88
[ "MIT" ]
2
2021-03-29T17:33:54.000Z
2021-04-01T16:07:07.000Z
import os import numpy as np from simbig import halos as Halos np.random.seed(918234) theta_x_pairs = [] for i in range(1000): # read in halo catalog halos = Halos.Quijote_LHC_HR(i, z=0.5) # impose random halo mass limit as a proxy for baryonic effect Mlim = np.random.uniform(12.5, 13.0) theta_cosmo = Halos.Quijote_LHC_cosmo(i) # observable: I'm goign to use Nhalo as a proxy for some observable Nhalos = np.sum(np.array(halos['Mass']) > Mlim) # (parameter, data) pair theta_x = np.concatenate([theta_cosmo, [Mlim], [Nhalos]]) theta_x_pairs.append(theta_x) np.save(os.path.join(os.environ['QUIJOTE_DIR'], 'chang', 'halo_cnf_data.npy'), np.array(theta_x_pairs))
28.8
103
0.6875
import os import numpy as np from simbig import halos as Halos np.random.seed(918234) theta_x_pairs = [] for i in range(1000): halos = Halos.Quijote_LHC_HR(i, z=0.5) Mlim = np.random.uniform(12.5, 13.0) theta_cosmo = Halos.Quijote_LHC_cosmo(i) Nhalos = np.sum(np.array(halos['Mass']) > Mlim) # (parameter, data) pair theta_x = np.concatenate([theta_cosmo, [Mlim], [Nhalos]]) theta_x_pairs.append(theta_x) np.save(os.path.join(os.environ['QUIJOTE_DIR'], 'chang', 'halo_cnf_data.npy'), np.array(theta_x_pairs))
true
true
f716da8fd6c32ba467ca558698e188b418c2559d
10,502
py
Python
scripts/build-ios.py
ArtronicsGame/mobile-sdk
492afb38fbf372d2e76534b8f92e433b7cfb69b5
[ "BSD-3-Clause" ]
null
null
null
scripts/build-ios.py
ArtronicsGame/mobile-sdk
492afb38fbf372d2e76534b8f92e433b7cfb69b5
[ "BSD-3-Clause" ]
null
null
null
scripts/build-ios.py
ArtronicsGame/mobile-sdk
492afb38fbf372d2e76534b8f92e433b7cfb69b5
[ "BSD-3-Clause" ]
null
null
null
import os import sys import re import shutil import argparse import string from build.sdk_build_utils import * IOS_ARCHS = ['i386', 'x86_64', 'armv7', 'arm64'] def updateUmbrellaHeader(filename, args): with open(filename, 'r') as f: lines = f.readlines() for i in range(0, len(lines)): match = re.search('^\s*#import\s+"(.*)".*', lines[i].rstrip('\n')) if match: lines[i] = '#import <CartoMobileSDK/%s>\n' % match.group(1) for i in range(0, len(lines)): if re.search('^\s*#define\s+.*$', lines[i].rstrip('\n')): break lines = lines[:i+1] + ['\n'] + ['#define %s\n' % define for define in args.defines.split(';') if define] + lines[i+1:] with open(filename, 'w') as f: f.writelines(lines) def updatePrivateHeader(filename, args): with open(filename, 'r') as f: lines = f.readlines() for i in range(0, len(lines)): match = re.search('^\s*#include\s+"(.*)".*', lines[i].rstrip('\n')) if match: lines[i] = '#include <CartoMobileSDK/%s>\n' % match.group(1) match = re.search('^\s*#import\s+"(.*)".*', lines[i].rstrip('\n')) if match: lines[i] = '#import <CartoMobileSDK/%s>\n' % match.group(1) with open(filename, 'w') as f: f.writelines(lines) def buildModuleMap(filename, publicHeaders, privateHeaders): with open(filename, 'w') as f: f.write('framework module CartoMobileSDK {\n') f.write(' umbrella header "CartoMobileSDK.h"\n') for header in publicHeaders: f.write(' header "%s"\n' % header) f.write(' export *\n') f.write(' module * { export * }\n') f.write(' explicit module Private {\n') f.write(' requires cplusplus\n') for header in privateHeaders: f.write(' header "%s"\n' % header) f.write(' }\n') f.write('}\n') def buildIOSLib(args, arch): platform = 'OS' if arch.startswith('arm') else 'SIMULATOR' version = getVersion(args.buildnumber) if args.configuration == 'Release' else 'Devel' baseDir = getBaseDir() buildDir = getBuildDir('ios', '%s-%s' % (platform, arch)) defines = ["-D%s" % define for define in args.defines.split(';') if define] options = ["-D%s" % option for option in args.cmakeoptions.split(';') if option] if not cmake(args, buildDir, options + [ '-G', 'Xcode', '-DCMAKE_SYSTEM_NAME=iOS', '-DWRAPPER_DIR=%s' % ('%s/generated/ios-objc/proxies' % baseDir), '-DINCLUDE_OBJC:BOOL=ON', '-DSINGLE_LIBRARY:BOOL=ON', '-DENABLE_BITCODE:BOOL=%s' % ('OFF' if args.stripbitcode else 'ON'), '-DSHARED_LIBRARY:BOOL=%s' % ('ON' if args.sharedlib else 'OFF'), '-DCMAKE_OSX_ARCHITECTURES=%s' % arch, '-DCMAKE_OSX_SYSROOT=iphone%s' % platform.lower(), '-DCMAKE_OSX_DEPLOYMENT_TARGET=%s' % ('9.0' if args.metalangle else '7.0'), '-DCMAKE_BUILD_TYPE=%s' % args.configuration, "-DSDK_CPP_DEFINES=%s" % " ".join(defines), "-DSDK_VERSION='%s'" % version, "-DSDK_PLATFORM='iOS'", '%s/scripts/build' % baseDir ]): return False return cmake(args, buildDir, [ '--build', '.', '--config', args.configuration ]) def buildIOSFramework(args, archs): shutil.rmtree(getDistDir('ios'), True) platformArchs = [('OS' if arch.startswith('arm') else 'SIMULATOR', arch) for arch in archs] baseDir = getBaseDir() distDir = getDistDir('ios') if args.sharedlib: outputDir = '%s/CartoMobileSDK.framework' % distDir else: outputDir = '%s/CartoMobileSDK.framework/Versions/A' % distDir makedirs(outputDir) libFilePaths = [] for platform, arch in platformArchs: libFilePath = "%s/%s-%s/libcarto_mobile_sdk.%s" % (getBuildDir('ios', '%s-%s' % (platform, arch)), args.configuration, 'iphoneos' if arch.startswith("arm") else 'iphonesimulator', 'dylib' if args.sharedlib else 'a') if args.metalangle: mergedLibFilePath = '%s_merged.%s' % tuple(libFilePath.rsplit('.', 1)) angleLibFilePath = "%s/libs-external/angle-metal/%s/libangle.a" % (baseDir, arch) if not execute('libtool', baseDir, '-o', mergedLibFilePath, libFilePath, angleLibFilePath ): return False libFilePath = mergedLibFilePath libFilePaths.append(libFilePath) if not execute('lipo', baseDir, '-output', '%s/CartoMobileSDK' % outputDir, '-create', *libFilePaths ): return False if args.sharedlib: if not execute('install_name_tool', outputDir, '-id', '@rpath/CartoMobileSDK.framework/CartoMobileSDK', 'CartoMobileSDK' ): return False if not copyfile('%s/scripts/ios/Info.plist' % baseDir, '%s/Info.plist' % outputDir): return False makedirs('%s/Headers' % outputDir) if not args.sharedlib: if not makesymlink('%s/CartoMobileSDK.framework/Versions' % distDir, 'A', 'Current'): return False if not makesymlink('%s/CartoMobileSDK.framework' % distDir, 'Versions/A/Modules', 'Modules'): return False if not makesymlink('%s/CartoMobileSDK.framework' % distDir, 'Versions/A/Headers', 'Headers'): return False if not makesymlink('%s/CartoMobileSDK.framework' % distDir, 'Versions/A/PrivateHeaders', 'PrivateHeaders'): return False if not makesymlink('%s/CartoMobileSDK.framework' % distDir, 'Versions/A/CartoMobileSDK', 'CartoMobileSDK'): return False publicHeaders = [] privateHeaders = [] headerDirTemplates = ['%s/all/native', '%s/ios/native', '%s/ios/objc', '%s/generated/ios-objc/proxies', '%s/libs-external/cglib'] if args.metalangle: headerDirTemplates.append('%s/libs-external/angle-metal/include') for headerDirTemplate in headerDirTemplates: headerDir = headerDirTemplate % baseDir if not os.path.exists(headerDir): continue currentDir = os.getcwd() os.chdir(headerDir) for dirpath, dirnames, filenames in os.walk('.'): for filename in filenames: if filename.endswith('.h'): destDir = '%s/Headers/%s' % (outputDir, dirpath) if headerDirTemplate.find('objc') == -1: destDir = '%s/PrivateHeaders/%s' % (outputDir, dirpath) privateHeaders.append(os.path.normpath(os.path.join(dirpath, filename))) elif filename != 'CartoMobileSDK.h': publicHeaders.append(os.path.normpath(os.path.join(dirpath, filename))) if not (makedirs(destDir) and copyfile(os.path.join(dirpath, filename), '%s/%s' % (destDir, filename))): os.chdir(currentDir) return False if filename == 'CartoMobileSDK.h': updateUmbrellaHeader('%s/%s' % (destDir, filename), args) else: updatePrivateHeader('%s/%s' % (destDir, filename), args) os.chdir(currentDir) makedirs('%s/Modules' % outputDir) buildModuleMap('%s/Modules/module.modulemap' % outputDir, publicHeaders, privateHeaders) print("Output available in:\n%s" % distDir) return True def buildIOSCocoapod(args, buildpackage): baseDir = getBaseDir() distDir = getDistDir('ios') version = args.buildversion distName = 'sdk4-ios-%s.zip' % version iosversion = '9.0' if args.metalangle else '7.0' frameworks = (["IOSurface"] if args.metalangle else ["OpenGLES", "GLKit"]) + ["UIKit", "CoreGraphics", "CoreText", "CFNetwork", "Foundation", "CartoMobileSDK"] with open('%s/scripts/ios-cocoapod/CartoMobileSDK.podspec.template' % baseDir, 'r') as f: cocoapodFile = string.Template(f.read()).safe_substitute({ 'baseDir': baseDir, 'distDir': distDir, 'distName': distName, 'version': version, 'iosversion': iosversion, 'frameworks': ', '.join('"%s"' % framework for framework in frameworks) }) with open('%s/CartoMobileSDK.podspec' % distDir, 'w') as f: f.write(cocoapodFile) if buildpackage: try: os.remove('%s/%s' % (distDir, distName)) except: pass if not execute('zip', distDir, '-y', '-r', distName, 'CartoMobileSDK.framework'): return False print("Output available in:\n%s\n\nTo publish, use:\ncd %s\naws s3 cp %s s3://nutifront/sdk_snapshots/%s --grants read=uri=http://acs.amazonaws.com/groups/global/AllUsers\npod trunk push\n" % (distDir, distDir, distName, distName)) return True parser = argparse.ArgumentParser() parser.add_argument('--profile', dest='profile', default=getDefaultProfileId(), type=validProfile, help='Build profile') parser.add_argument('--ios-arch', dest='iosarch', default=[], choices=IOS_ARCHS + ['all'], action='append', help='iOS target architectures') parser.add_argument('--defines', dest='defines', default='', help='Defines for compilation') parser.add_argument('--cmake', dest='cmake', default='cmake', help='CMake executable') parser.add_argument('--cmake-options', dest='cmakeoptions', default='', help='CMake options') parser.add_argument('--configuration', dest='configuration', default='Release', choices=['Release', 'RelWithDebInfo', 'Debug'], help='Configuration') parser.add_argument('--build-number', dest='buildnumber', default='', help='Build sequence number, goes to version str') parser.add_argument('--build-version', dest='buildversion', default='%s-devel' % SDK_VERSION, help='Build version, goes to distributions') parser.add_argument('--build-cocoapod', dest='buildcocoapod', default=False, action='store_true', help='Build CocoaPod') parser.add_argument('--build-cocoapod-package', dest='buildcocoapodpackage', default=False, action='store_true', help='Build CocoaPod') parser.add_argument('--metalangle', dest='metalangle', default=False, action='store_true', help='Use MetalANGLE instead of Apple GL') parser.add_argument('--strip-bitcode', dest='stripbitcode', default=False, action='store_true', help='Strip bitcode from the built framework') parser.add_argument('--shared-framework', dest='sharedlib', default=False, action='store_true', help='Build shared framework instead of static') args = parser.parse_args() if 'all' in args.iosarch or args.iosarch == []: args.iosarch = IOS_ARCHS args.defines += ';' + getProfile(args.profile).get('defines', '') if args.metalangle: args.defines += ';' + '_CARTO_USE_METALANGLE' args.cmakeoptions += ';' + getProfile(args.profile).get('cmake-options', '') if not checkExecutable(args.cmake, '--help'): print('Failed to find CMake executable. Use --cmake to specify its location') sys.exit(-1) for arch in args.iosarch: if not buildIOSLib(args, arch): sys.exit(-1) if not buildIOSFramework(args, args.iosarch): sys.exit(-1) if args.buildcocoapod or args.buildcocoapodpackage: if not buildIOSCocoapod(args, args.buildcocoapodpackage): sys.exit(-1)
45.463203
245
0.668444
import os import sys import re import shutil import argparse import string from build.sdk_build_utils import * IOS_ARCHS = ['i386', 'x86_64', 'armv7', 'arm64'] def updateUmbrellaHeader(filename, args): with open(filename, 'r') as f: lines = f.readlines() for i in range(0, len(lines)): match = re.search('^\s*#import\s+"(.*)".*', lines[i].rstrip('\n')) if match: lines[i] = '#import <CartoMobileSDK/%s>\n' % match.group(1) for i in range(0, len(lines)): if re.search('^\s*#define\s+.*$', lines[i].rstrip('\n')): break lines = lines[:i+1] + ['\n'] + ['#define %s\n' % define for define in args.defines.split(';') if define] + lines[i+1:] with open(filename, 'w') as f: f.writelines(lines) def updatePrivateHeader(filename, args): with open(filename, 'r') as f: lines = f.readlines() for i in range(0, len(lines)): match = re.search('^\s*#include\s+"(.*)".*', lines[i].rstrip('\n')) if match: lines[i] = '#include <CartoMobileSDK/%s>\n' % match.group(1) match = re.search('^\s*#import\s+"(.*)".*', lines[i].rstrip('\n')) if match: lines[i] = '#import <CartoMobileSDK/%s>\n' % match.group(1) with open(filename, 'w') as f: f.writelines(lines) def buildModuleMap(filename, publicHeaders, privateHeaders): with open(filename, 'w') as f: f.write('framework module CartoMobileSDK {\n') f.write(' umbrella header "CartoMobileSDK.h"\n') for header in publicHeaders: f.write(' header "%s"\n' % header) f.write(' export *\n') f.write(' module * { export * }\n') f.write(' explicit module Private {\n') f.write(' requires cplusplus\n') for header in privateHeaders: f.write(' header "%s"\n' % header) f.write(' }\n') f.write('}\n') def buildIOSLib(args, arch): platform = 'OS' if arch.startswith('arm') else 'SIMULATOR' version = getVersion(args.buildnumber) if args.configuration == 'Release' else 'Devel' baseDir = getBaseDir() buildDir = getBuildDir('ios', '%s-%s' % (platform, arch)) defines = ["-D%s" % define for define in args.defines.split(';') if define] options = ["-D%s" % option for option in args.cmakeoptions.split(';') if option] if not cmake(args, buildDir, options + [ '-G', 'Xcode', '-DCMAKE_SYSTEM_NAME=iOS', '-DWRAPPER_DIR=%s' % ('%s/generated/ios-objc/proxies' % baseDir), '-DINCLUDE_OBJC:BOOL=ON', '-DSINGLE_LIBRARY:BOOL=ON', '-DENABLE_BITCODE:BOOL=%s' % ('OFF' if args.stripbitcode else 'ON'), '-DSHARED_LIBRARY:BOOL=%s' % ('ON' if args.sharedlib else 'OFF'), '-DCMAKE_OSX_ARCHITECTURES=%s' % arch, '-DCMAKE_OSX_SYSROOT=iphone%s' % platform.lower(), '-DCMAKE_OSX_DEPLOYMENT_TARGET=%s' % ('9.0' if args.metalangle else '7.0'), '-DCMAKE_BUILD_TYPE=%s' % args.configuration, "-DSDK_CPP_DEFINES=%s" % " ".join(defines), "-DSDK_VERSION='%s'" % version, "-DSDK_PLATFORM='iOS'", '%s/scripts/build' % baseDir ]): return False return cmake(args, buildDir, [ '--build', '.', '--config', args.configuration ]) def buildIOSFramework(args, archs): shutil.rmtree(getDistDir('ios'), True) platformArchs = [('OS' if arch.startswith('arm') else 'SIMULATOR', arch) for arch in archs] baseDir = getBaseDir() distDir = getDistDir('ios') if args.sharedlib: outputDir = '%s/CartoMobileSDK.framework' % distDir else: outputDir = '%s/CartoMobileSDK.framework/Versions/A' % distDir makedirs(outputDir) libFilePaths = [] for platform, arch in platformArchs: libFilePath = "%s/%s-%s/libcarto_mobile_sdk.%s" % (getBuildDir('ios', '%s-%s' % (platform, arch)), args.configuration, 'iphoneos' if arch.startswith("arm") else 'iphonesimulator', 'dylib' if args.sharedlib else 'a') if args.metalangle: mergedLibFilePath = '%s_merged.%s' % tuple(libFilePath.rsplit('.', 1)) angleLibFilePath = "%s/libs-external/angle-metal/%s/libangle.a" % (baseDir, arch) if not execute('libtool', baseDir, '-o', mergedLibFilePath, libFilePath, angleLibFilePath ): return False libFilePath = mergedLibFilePath libFilePaths.append(libFilePath) if not execute('lipo', baseDir, '-output', '%s/CartoMobileSDK' % outputDir, '-create', *libFilePaths ): return False if args.sharedlib: if not execute('install_name_tool', outputDir, '-id', '@rpath/CartoMobileSDK.framework/CartoMobileSDK', 'CartoMobileSDK' ): return False if not copyfile('%s/scripts/ios/Info.plist' % baseDir, '%s/Info.plist' % outputDir): return False makedirs('%s/Headers' % outputDir) if not args.sharedlib: if not makesymlink('%s/CartoMobileSDK.framework/Versions' % distDir, 'A', 'Current'): return False if not makesymlink('%s/CartoMobileSDK.framework' % distDir, 'Versions/A/Modules', 'Modules'): return False if not makesymlink('%s/CartoMobileSDK.framework' % distDir, 'Versions/A/Headers', 'Headers'): return False if not makesymlink('%s/CartoMobileSDK.framework' % distDir, 'Versions/A/PrivateHeaders', 'PrivateHeaders'): return False if not makesymlink('%s/CartoMobileSDK.framework' % distDir, 'Versions/A/CartoMobileSDK', 'CartoMobileSDK'): return False publicHeaders = [] privateHeaders = [] headerDirTemplates = ['%s/all/native', '%s/ios/native', '%s/ios/objc', '%s/generated/ios-objc/proxies', '%s/libs-external/cglib'] if args.metalangle: headerDirTemplates.append('%s/libs-external/angle-metal/include') for headerDirTemplate in headerDirTemplates: headerDir = headerDirTemplate % baseDir if not os.path.exists(headerDir): continue currentDir = os.getcwd() os.chdir(headerDir) for dirpath, dirnames, filenames in os.walk('.'): for filename in filenames: if filename.endswith('.h'): destDir = '%s/Headers/%s' % (outputDir, dirpath) if headerDirTemplate.find('objc') == -1: destDir = '%s/PrivateHeaders/%s' % (outputDir, dirpath) privateHeaders.append(os.path.normpath(os.path.join(dirpath, filename))) elif filename != 'CartoMobileSDK.h': publicHeaders.append(os.path.normpath(os.path.join(dirpath, filename))) if not (makedirs(destDir) and copyfile(os.path.join(dirpath, filename), '%s/%s' % (destDir, filename))): os.chdir(currentDir) return False if filename == 'CartoMobileSDK.h': updateUmbrellaHeader('%s/%s' % (destDir, filename), args) else: updatePrivateHeader('%s/%s' % (destDir, filename), args) os.chdir(currentDir) makedirs('%s/Modules' % outputDir) buildModuleMap('%s/Modules/module.modulemap' % outputDir, publicHeaders, privateHeaders) print("Output available in:\n%s" % distDir) return True def buildIOSCocoapod(args, buildpackage): baseDir = getBaseDir() distDir = getDistDir('ios') version = args.buildversion distName = 'sdk4-ios-%s.zip' % version iosversion = '9.0' if args.metalangle else '7.0' frameworks = (["IOSurface"] if args.metalangle else ["OpenGLES", "GLKit"]) + ["UIKit", "CoreGraphics", "CoreText", "CFNetwork", "Foundation", "CartoMobileSDK"] with open('%s/scripts/ios-cocoapod/CartoMobileSDK.podspec.template' % baseDir, 'r') as f: cocoapodFile = string.Template(f.read()).safe_substitute({ 'baseDir': baseDir, 'distDir': distDir, 'distName': distName, 'version': version, 'iosversion': iosversion, 'frameworks': ', '.join('"%s"' % framework for framework in frameworks) }) with open('%s/CartoMobileSDK.podspec' % distDir, 'w') as f: f.write(cocoapodFile) if buildpackage: try: os.remove('%s/%s' % (distDir, distName)) except: pass if not execute('zip', distDir, '-y', '-r', distName, 'CartoMobileSDK.framework'): return False print("Output available in:\n%s\n\nTo publish, use:\ncd %s\naws s3 cp %s s3://nutifront/sdk_snapshots/%s --grants read=uri=http://acs.amazonaws.com/groups/global/AllUsers\npod trunk push\n" % (distDir, distDir, distName, distName)) return True parser = argparse.ArgumentParser() parser.add_argument('--profile', dest='profile', default=getDefaultProfileId(), type=validProfile, help='Build profile') parser.add_argument('--ios-arch', dest='iosarch', default=[], choices=IOS_ARCHS + ['all'], action='append', help='iOS target architectures') parser.add_argument('--defines', dest='defines', default='', help='Defines for compilation') parser.add_argument('--cmake', dest='cmake', default='cmake', help='CMake executable') parser.add_argument('--cmake-options', dest='cmakeoptions', default='', help='CMake options') parser.add_argument('--configuration', dest='configuration', default='Release', choices=['Release', 'RelWithDebInfo', 'Debug'], help='Configuration') parser.add_argument('--build-number', dest='buildnumber', default='', help='Build sequence number, goes to version str') parser.add_argument('--build-version', dest='buildversion', default='%s-devel' % SDK_VERSION, help='Build version, goes to distributions') parser.add_argument('--build-cocoapod', dest='buildcocoapod', default=False, action='store_true', help='Build CocoaPod') parser.add_argument('--build-cocoapod-package', dest='buildcocoapodpackage', default=False, action='store_true', help='Build CocoaPod') parser.add_argument('--metalangle', dest='metalangle', default=False, action='store_true', help='Use MetalANGLE instead of Apple GL') parser.add_argument('--strip-bitcode', dest='stripbitcode', default=False, action='store_true', help='Strip bitcode from the built framework') parser.add_argument('--shared-framework', dest='sharedlib', default=False, action='store_true', help='Build shared framework instead of static') args = parser.parse_args() if 'all' in args.iosarch or args.iosarch == []: args.iosarch = IOS_ARCHS args.defines += ';' + getProfile(args.profile).get('defines', '') if args.metalangle: args.defines += ';' + '_CARTO_USE_METALANGLE' args.cmakeoptions += ';' + getProfile(args.profile).get('cmake-options', '') if not checkExecutable(args.cmake, '--help'): print('Failed to find CMake executable. Use --cmake to specify its location') sys.exit(-1) for arch in args.iosarch: if not buildIOSLib(args, arch): sys.exit(-1) if not buildIOSFramework(args, args.iosarch): sys.exit(-1) if args.buildcocoapod or args.buildcocoapodpackage: if not buildIOSCocoapod(args, args.buildcocoapodpackage): sys.exit(-1)
true
true
f716db30ad2b5a1b0baa72fd74c8fbb701037f01
1,189
py
Python
tools/accuracy_checker/openvino/tools/accuracy_checker/postprocessor/normalize_salient_map.py
TolyaTalamanov/open_model_zoo
1697e60712df4ca72635a2080a197b9d3bc24129
[ "Apache-2.0" ]
2,201
2018-10-15T14:37:19.000Z
2020-07-16T02:05:51.000Z
tools/accuracy_checker/openvino/tools/accuracy_checker/postprocessor/normalize_salient_map.py
Pandinosaurus/open_model_zoo
2543996541346418919c5cddfb71e33e2cdef080
[ "Apache-2.0" ]
759
2018-10-18T07:43:55.000Z
2020-07-16T01:23:12.000Z
tools/accuracy_checker/openvino/tools/accuracy_checker/postprocessor/normalize_salient_map.py
Pandinosaurus/open_model_zoo
2543996541346418919c5cddfb71e33e2cdef080
[ "Apache-2.0" ]
808
2018-10-16T14:03:49.000Z
2020-07-15T11:41:45.000Z
""" Copyright (c) 2018-2022 Intel Corporation Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ import cv2 import numpy as np from .postprocessor import Postprocessor class SalientMapNormalizer(Postprocessor): __provider__ = 'normalize_salience_map' def process_image(self, annotation, prediction): for ann in annotation: gt_mask = ann.mask if len(gt_mask.shape) == 3 and gt_mask.shape[-1] == 3: gt_mask = cv2.cvtColor(gt_mask, cv2.COLOR_BGR2GRAY) gt_mask = gt_mask / 255 gt_mask[gt_mask >= 0.5] = 1 gt_mask[gt_mask < 0.5] = 0 ann.mask = gt_mask.astype(np.uint8) return annotation, prediction
34.970588
72
0.702271
import cv2 import numpy as np from .postprocessor import Postprocessor class SalientMapNormalizer(Postprocessor): __provider__ = 'normalize_salience_map' def process_image(self, annotation, prediction): for ann in annotation: gt_mask = ann.mask if len(gt_mask.shape) == 3 and gt_mask.shape[-1] == 3: gt_mask = cv2.cvtColor(gt_mask, cv2.COLOR_BGR2GRAY) gt_mask = gt_mask / 255 gt_mask[gt_mask >= 0.5] = 1 gt_mask[gt_mask < 0.5] = 0 ann.mask = gt_mask.astype(np.uint8) return annotation, prediction
true
true
f716dbad5d5deabbd30519182541764ab0c17a2f
10,478
py
Python
cynetworkx/algorithms/isomorphism/temporalisomorphvf2.py
Viech/cynetworkx
01a37859c67b752392e9e783c949084964eef2cf
[ "BSD-3-Clause" ]
12
2019-07-23T08:07:53.000Z
2022-03-09T06:13:16.000Z
cynetworkx/algorithms/isomorphism/temporalisomorphvf2.py
Viech/cynetworkx
01a37859c67b752392e9e783c949084964eef2cf
[ "BSD-3-Clause" ]
7
2019-08-30T07:00:00.000Z
2021-12-30T08:02:56.000Z
cynetworkx/algorithms/isomorphism/temporalisomorphvf2.py
Viech/cynetworkx
01a37859c67b752392e9e783c949084964eef2cf
[ "BSD-3-Clause" ]
5
2020-10-10T03:40:32.000Z
2021-11-23T12:28:53.000Z
# -*- coding: utf-8 -*- """ ***************************** Time-respecting VF2 Algorithm ***************************** An extension of the VF2 algorithm for time-respecting graph ismorphism testing in temporal graphs. A temporal graph is one in which edges contain a datetime attribute, denoting when interaction occurred between the incident nodes. A time-respecting subgraph of a temporal graph is a subgraph such that all interactions incident to a node occurred within a time threshold, delta, of each other. A directed time-respecting subgraph has the added constraint that incoming interactions to a node must precede outgoing interactions from the same node - this enforces a sense of directed flow. Introduction ------------ The TimeRespectingGraphMatcher and TimeRespectingDiGraphMatcher extend the GraphMatcher and DiGraphMatcher classes, respectively, to include temporal constraints on matches. This is achieved through a semantic check, via the semantic_feasibility() function. As well as including G1 (the graph in which to seek embeddings) and G2 (the subgraph structure of interest), the name of the temporal attribute on the edges and the time threshold, delta, must be supplied as arguments to the matching constructors. A delta of zero is the strictest temporal constraint on the match - only embeddings in which all interactions occur at the same time will be returned. A delta of one day will allow embeddings in which adjacent interactions occur up to a day apart. Examples -------- Examples will be provided when the datetime type has been incorporated. Temporal Subgraph Isomorphism ----------------------------- A brief discussion of the somewhat diverse current literature will be included here. References ---------- [1] Redmond, U. and Cunningham, P. Temporal subgraph isomorphism. In: The 2013 IEEE/ACM International Conference on Advances in Social Networks Analysis and Mining (ASONAM). Niagara Falls, Canada; 2013: pages 1451 - 1452. [65] For a discussion of the literature on temporal networks: [3] P. Holme and J. Saramaki. Temporal networks. Physics Reports, 519(3):97–125, 2012. Notes ----- Handles directed and undirected graphs and graphs with parallel edges. """ from __future__ import absolute_import import cynetworkx as nx from datetime import datetime, timedelta from .isomorphvf2 import GraphMatcher, DiGraphMatcher __all__ = ['TimeRespectingGraphMatcher', 'TimeRespectingDiGraphMatcher'] class TimeRespectingGraphMatcher(GraphMatcher): def __init__(self, G1, G2, temporal_attribute_name, delta): """Initialize TimeRespectingGraphMatcher. G1 and G2 should be nx.Graph or nx.MultiGraph instances. Examples -------- To create a TimeRespectingGraphMatcher which checks for syntactic and semantic feasibility: >>> from cynetworkx.algorithms import isomorphism >>> G1 = nx.Graph(nx.path_graph(4, create_using=nx.Graph())) >>> G2 = nx.Graph(nx.path_graph(4, create_using=nx.Graph())) >>> GM = isomorphism.TimeRespectingGraphMatcher(G1, G2, 'date', timedelta(days=1)) """ self.temporal_attribute_name = temporal_attribute_name self.delta = delta super(TimeRespectingGraphMatcher, self).__init__(G1, G2) def one_hop(self, Gx, Gx_node, neighbors): """ Edges one hop out from a node in the mapping should be time-respecting with respect to each other. """ dates = [] for n in neighbors: if type(Gx) == type(nx.Graph()): # Graph G[u][v] returns the data dictionary. dates.append(Gx[Gx_node][n][self.temporal_attribute_name]) else: # MultiGraph G[u][v] returns a dictionary of key -> data dictionary. for edge in Gx[Gx_node][n].values(): # Iterates all edges between node pair. dates.append(edge[self.temporal_attribute_name]) if any(x is None for x in dates): raise ValueError('Datetime not supplied for at least one edge.') return not dates or max(dates) - min(dates) <= self.delta def two_hop(self, Gx, core_x, Gx_node, neighbors): """ Paths of length 2 from Gx_node should be time-respecting. """ return all(self.one_hop(Gx, v, [n for n in Gx[v] if n in core_x] + [Gx_node]) for v in neighbors) def semantic_feasibility(self, G1_node, G2_node): """Returns True if adding (G1_node, G2_node) is semantically feasible. Any subclass which redefines semantic_feasibility() must maintain the self.tests if needed, to keep the match() method functional. Implementations should consider multigraphs. """ neighbors = [n for n in self.G1[G1_node] if n in self.core_1] if not self.one_hop(self.G1, G1_node, neighbors): # Fail fast on first node. return False if not self.two_hop(self.G1, self.core_1, G1_node, neighbors): return False # Otherwise, this node is semantically feasible! return True class TimeRespectingDiGraphMatcher(DiGraphMatcher): def __init__(self, G1, G2, temporal_attribute_name, delta): """Initialize TimeRespectingDiGraphMatcher. G1 and G2 should be nx.DiGraph or nx.MultiDiGraph instances. Examples -------- To create a TimeRespectingDiGraphMatcher which checks for syntactic and semantic feasibility: >>> from cynetworkx.algorithms import isomorphism >>> G1 = nx.DiGraph(nx.path_graph(4, create_using=nx.DiGraph())) >>> G2 = nx.DiGraph(nx.path_graph(4, create_using=nx.DiGraph())) >>> GM = isomorphism.TimeRespectingDiGraphMatcher(G1, G2, 'date', timedelta(days=1)) """ self.temporal_attribute_name = temporal_attribute_name self.delta = delta super(TimeRespectingDiGraphMatcher, self).__init__(G1, G2) def get_pred_dates(self, Gx, Gx_node, core_x, pred): """ Get the dates of edges from predecessors. """ pred_dates = [] if type(Gx) == type(nx.DiGraph()): # Graph G[u][v] returns the data dictionary. for n in pred: pred_dates.append(Gx[n][Gx_node][self.temporal_attribute_name]) else: # MultiGraph G[u][v] returns a dictionary of key -> data dictionary. for n in pred: for edge in Gx[n][Gx_node].values(): # Iterates all edge data between node pair. pred_dates.append(edge[self.temporal_attribute_name]) return pred_dates def get_succ_dates(self, Gx, Gx_node, core_x, succ): """ Get the dates of edges to successors. """ succ_dates = [] if type(Gx) == type(nx.DiGraph()): # Graph G[u][v] returns the data dictionary. for n in succ: succ_dates.append(Gx[Gx_node][n][self.temporal_attribute_name]) else: # MultiGraph G[u][v] returns a dictionary of key -> data dictionary. for n in succ: for edge in Gx[Gx_node][n].values(): # Iterates all edge data between node pair. succ_dates.append(edge[self.temporal_attribute_name]) return succ_dates def one_hop(self, Gx, Gx_node, core_x, pred, succ): """ The ego node. """ pred_dates = self.get_pred_dates(Gx, Gx_node, core_x, pred) succ_dates = self.get_succ_dates(Gx, Gx_node, core_x, succ) return self.test_one(pred_dates, succ_dates) and self.test_two(pred_dates, succ_dates) def two_hop_pred(self, Gx, Gx_node, core_x, pred): """ The predeccessors of the ego node. """ return all(self.one_hop(Gx, p, core_x, self.preds(Gx, core_x, p), self.succs(Gx, core_x, p, Gx_node)) for p in pred) def two_hop_succ(self, Gx, Gx_node, core_x, succ): """ The successors of the ego node. """ return all(self.one_hop(Gx, s, core_x, self.preds(Gx, core_x, s, Gx_node), self.succs(Gx, core_x, s)) for s in succ) def preds(self, Gx, core_x, v, Gx_node=None): pred = [n for n in Gx.predecessors(v) if n in core_x] if Gx_node: pred.append(Gx_node) return pred def succs(self, Gx, core_x, v, Gx_node=None): succ = [n for n in Gx.successors(v) if n in core_x] if Gx_node: succ.append(Gx_node) return succ def test_one(self, pred_dates, succ_dates): """ Edges one hop out from Gx_node in the mapping should be time-respecting with respect to each other, regardless of direction. """ time_respecting = True dates = pred_dates + succ_dates if any(x is None for x in dates): raise ValueError('Date or datetime not supplied for at least one edge.') dates.sort() # Small to large. if 0 < len(dates) and not (dates[-1] - dates[0] <= self.delta): time_respecting = False return time_respecting def test_two(self, pred_dates, succ_dates): """ Edges from a dual Gx_node in the mapping should be ordered in a time-respecting manner. """ time_respecting = True pred_dates.sort() succ_dates.sort() # First out before last in; negative of the necessary condition for time-respect. if 0 < len(succ_dates) and 0 < len(pred_dates) and succ_dates[0] < pred_dates[-1]: time_respecting = False return time_respecting def semantic_feasibility(self, G1_node, G2_node): """Returns True if adding (G1_node, G2_node) is semantically feasible. Any subclass which redefines semantic_feasibility() must maintain the self.tests if needed, to keep the match() method functional. Implementations should consider multigraphs. """ pred, succ = [n for n in self.G1.predecessors(G1_node) if n in self.core_1], [ n for n in self.G1.successors(G1_node) if n in self.core_1] if not self.one_hop(self.G1, G1_node, self.core_1, pred, succ): # Fail fast on first node. return False if not self.two_hop_pred(self.G1, G1_node, self.core_1, pred): return False if not self.two_hop_succ(self.G1, G1_node, self.core_1, succ): return False # Otherwise, this node is semantically feasible! return True
38.664207
124
0.655564
from __future__ import absolute_import import cynetworkx as nx from datetime import datetime, timedelta from .isomorphvf2 import GraphMatcher, DiGraphMatcher __all__ = ['TimeRespectingGraphMatcher', 'TimeRespectingDiGraphMatcher'] class TimeRespectingGraphMatcher(GraphMatcher): def __init__(self, G1, G2, temporal_attribute_name, delta): self.temporal_attribute_name = temporal_attribute_name self.delta = delta super(TimeRespectingGraphMatcher, self).__init__(G1, G2) def one_hop(self, Gx, Gx_node, neighbors): dates = [] for n in neighbors: if type(Gx) == type(nx.Graph()): dates.append(Gx[Gx_node][n][self.temporal_attribute_name]) else: for edge in Gx[Gx_node][n].values(): dates.append(edge[self.temporal_attribute_name]) if any(x is None for x in dates): raise ValueError('Datetime not supplied for at least one edge.') return not dates or max(dates) - min(dates) <= self.delta def two_hop(self, Gx, core_x, Gx_node, neighbors): return all(self.one_hop(Gx, v, [n for n in Gx[v] if n in core_x] + [Gx_node]) for v in neighbors) def semantic_feasibility(self, G1_node, G2_node): neighbors = [n for n in self.G1[G1_node] if n in self.core_1] if not self.one_hop(self.G1, G1_node, neighbors): return False if not self.two_hop(self.G1, self.core_1, G1_node, neighbors): return False return True class TimeRespectingDiGraphMatcher(DiGraphMatcher): def __init__(self, G1, G2, temporal_attribute_name, delta): self.temporal_attribute_name = temporal_attribute_name self.delta = delta super(TimeRespectingDiGraphMatcher, self).__init__(G1, G2) def get_pred_dates(self, Gx, Gx_node, core_x, pred): pred_dates = [] if type(Gx) == type(nx.DiGraph()): for n in pred: pred_dates.append(Gx[n][Gx_node][self.temporal_attribute_name]) else: for n in pred: for edge in Gx[n][Gx_node].values(): pred_dates.append(edge[self.temporal_attribute_name]) return pred_dates def get_succ_dates(self, Gx, Gx_node, core_x, succ): succ_dates = [] if type(Gx) == type(nx.DiGraph()): for n in succ: succ_dates.append(Gx[Gx_node][n][self.temporal_attribute_name]) else: for n in succ: for edge in Gx[Gx_node][n].values(): succ_dates.append(edge[self.temporal_attribute_name]) return succ_dates def one_hop(self, Gx, Gx_node, core_x, pred, succ): pred_dates = self.get_pred_dates(Gx, Gx_node, core_x, pred) succ_dates = self.get_succ_dates(Gx, Gx_node, core_x, succ) return self.test_one(pred_dates, succ_dates) and self.test_two(pred_dates, succ_dates) def two_hop_pred(self, Gx, Gx_node, core_x, pred): return all(self.one_hop(Gx, p, core_x, self.preds(Gx, core_x, p), self.succs(Gx, core_x, p, Gx_node)) for p in pred) def two_hop_succ(self, Gx, Gx_node, core_x, succ): return all(self.one_hop(Gx, s, core_x, self.preds(Gx, core_x, s, Gx_node), self.succs(Gx, core_x, s)) for s in succ) def preds(self, Gx, core_x, v, Gx_node=None): pred = [n for n in Gx.predecessors(v) if n in core_x] if Gx_node: pred.append(Gx_node) return pred def succs(self, Gx, core_x, v, Gx_node=None): succ = [n for n in Gx.successors(v) if n in core_x] if Gx_node: succ.append(Gx_node) return succ def test_one(self, pred_dates, succ_dates): time_respecting = True dates = pred_dates + succ_dates if any(x is None for x in dates): raise ValueError('Date or datetime not supplied for at least one edge.') dates.sort() if 0 < len(dates) and not (dates[-1] - dates[0] <= self.delta): time_respecting = False return time_respecting def test_two(self, pred_dates, succ_dates): time_respecting = True pred_dates.sort() succ_dates.sort() if 0 < len(succ_dates) and 0 < len(pred_dates) and succ_dates[0] < pred_dates[-1]: time_respecting = False return time_respecting def semantic_feasibility(self, G1_node, G2_node): pred, succ = [n for n in self.G1.predecessors(G1_node) if n in self.core_1], [ n for n in self.G1.successors(G1_node) if n in self.core_1] if not self.one_hop(self.G1, G1_node, self.core_1, pred, succ): return False if not self.two_hop_pred(self.G1, G1_node, self.core_1, pred): return False if not self.two_hop_succ(self.G1, G1_node, self.core_1, succ): return False return True
true
true
f716dcbb37cfe64fdd5bab29b9a4cfa9b3b000b2
33,058
py
Python
pyccel/ast/operators.py
jalalium/pyccel
4f3d9a359e42c16440e9c841059257d292a8361b
[ "MIT" ]
null
null
null
pyccel/ast/operators.py
jalalium/pyccel
4f3d9a359e42c16440e9c841059257d292a8361b
[ "MIT" ]
null
null
null
pyccel/ast/operators.py
jalalium/pyccel
4f3d9a359e42c16440e9c841059257d292a8361b
[ "MIT" ]
null
null
null
#------------------------------------------------------------------------------------------# # This file is part of Pyccel which is released under MIT License. See the LICENSE file or # # go to https://github.com/pyccel/pyccel/blob/master/LICENSE for full license details. # #------------------------------------------------------------------------------------------# # TODO [EB 12.03.21]: Remove pylint command with PR #797 # pylint: disable=W0201 """ Module handling all python builtin operators These operators all have a precision as detailed here: https://docs.python.org/3/reference/expressions.html#operator-precedence They also have specific rules to determine the dtype, precision, rank, shape """ from ..errors.errors import Errors, PyccelSemanticError from .basic import PyccelAstNode from .datatypes import (NativeBool, NativeInteger, NativeReal, NativeComplex, NativeString, default_precision, NativeNumeric) from .literals import Literal, LiteralInteger, LiteralFloat, LiteralComplex, Nil from .literals import convert_to_literal errors = Errors() __all__ = ( 'PyccelOperator', 'PyccelPow', 'PyccelAdd', 'PyccelMinus', 'PyccelMul', 'PyccelDiv', 'PyccelMod', 'PyccelFloorDiv', 'PyccelEq', 'PyccelNe', 'PyccelLt', 'PyccelLe', 'PyccelGt', 'PyccelGe', 'PyccelAnd', 'PyccelOr', 'PyccelNot', 'PyccelAssociativeParenthesis', 'PyccelUnary', 'PyccelUnarySub', 'Relational', 'PyccelIs', 'PyccelIsNot', 'IfTernaryOperator' ) #============================================================================== def broadcast(shape_1, shape_2): """ This function broadcast two shapes using numpy broadcasting rules """ from pyccel.ast.sympy_helper import pyccel_to_sympy a = len(shape_1) b = len(shape_2) if a>b: new_shape_2 = (LiteralInteger(1),)*(a-b) + tuple(shape_2) new_shape_1 = shape_1 elif b>a: new_shape_1 = (LiteralInteger(1),)*(b-a) + tuple(shape_1) new_shape_2 = shape_2 else: new_shape_2 = shape_2 new_shape_1 = shape_1 new_shape = [] for e1,e2 in zip(new_shape_1, new_shape_2): used_names = set() symbol_map = {} sy_e1 = pyccel_to_sympy(e1, symbol_map, used_names) sy_e2 = pyccel_to_sympy(e2, symbol_map, used_names) if sy_e1 == sy_e2: new_shape.append(e1) elif sy_e1 == 1: new_shape.append(e2) elif sy_e2 == 1: new_shape.append(e1) elif sy_e1.is_constant() and not sy_e2.is_constant(): new_shape.append(e1) elif sy_e2.is_constant() and not sy_e1.is_constant(): new_shape.append(e2) elif not sy_e2.is_constant() and not sy_e1.is_constant()\ and not (sy_e1 - sy_e2).is_constant(): new_shape.append(e1) else: shape1_code = '({})'.format(' '.join([str(s)+',' for s in shape_1])) shape2_code = '({})'.format(' '.join([str(s)+',' for s in shape_2])) msg = 'operands could not be broadcast together with shapes {} {}' msg = msg.format(shape1_code, shape2_code) raise PyccelSemanticError(msg) return tuple(new_shape) #============================================================================== class PyccelOperator(PyccelAstNode): """ Abstract superclass for all builtin operators. The __init__ function is common but the functions called by __init__ are specialised Parameters ---------- args: tuple The arguments passed to the operator """ __slots__ = ('_args', ) _attribute_nodes = ('_args',) def __init__(self, *args): self._args = tuple(self._handle_precedence(args)) if self.stage == 'syntactic': super().__init__() return self._set_dtype() self._set_shape_rank() # rank is None for lambda functions self._set_order() super().__init__() def _set_dtype(self): self._dtype, self._precision = self._calculate_dtype(*self._args) # pylint: disable=no-member def _set_shape_rank(self): self._shape, self._rank = self._calculate_shape_rank(*self._args) # pylint: disable=no-member @property def precedence(self): """ The precedence of the operator as defined here: https://docs.python.org/3/reference/expressions.html#operator-precedence """ return self._precedence def _handle_precedence(self, args): """ Insert parentheses where necessary by examining the precedence of the operator e.g: PyccelMul(a,PyccelAdd(b,c)) means: a*(b+c) so this input will give: PyccelMul(a, PyccelAssociativeParenthesis(PyccelAdd(b,c))) Parentheses are also added were they are required for clarity Parameters ---------- args: tuple The arguments passed to the operator Results ------- args: tuple The arguments with the parentheses inserted """ precedence = [getattr(a, 'precedence', 17) for a in args] if min(precedence) <= self._precedence: new_args = [] for i, (a,p) in enumerate(zip(args, precedence)): if (p < self._precedence or (p == self._precedence and i != 0)): new_args.append(PyccelAssociativeParenthesis(a)) else: new_args.append(a) args = tuple(new_args) return args def __str__(self): return repr(self) def _set_order(self): """ Sets the shape and rank This is chosen to match the arguments if they are in agreement. Otherwise it defaults to 'C' """ if self._rank is not None and self._rank > 1: if all(a.order == self._args[0].order for a in self._args): self._order = self._args[0].order else: self._order = 'C' else: self._order = None @property def args(self): """ Arguments of the operator """ return self._args #============================================================================== class PyccelUnaryOperator(PyccelOperator): """ Abstract superclass representing a python operator with only one argument Parameters ---------- arg: PyccelAstNode The argument passed to the operator """ __slots__ = ('_dtype', '_precision','_shape','_rank','_order') def __init__(self, arg): super().__init__(arg) @staticmethod def _calculate_dtype(*args): """ Sets the dtype and precision They are chosen to match the argument """ a = args[0] dtype = a.dtype precision = a.precision return dtype, precision @staticmethod def _calculate_shape_rank(*args): """ Sets the shape and rank They are chosen to match the argument """ a = args[0] rank = a.rank shape = a.shape return shape, rank #============================================================================== class PyccelUnary(PyccelUnaryOperator): """ Class representing a call to the python positive operator. I.e: +a is equivalent to: PyccelUnary(a) Parameters ---------- arg: PyccelAstNode The argument passed to the operator """ __slots__ = () _precedence = 14 def _handle_precedence(self, args): args = PyccelUnaryOperator._handle_precedence(self, args) args = tuple(PyccelAssociativeParenthesis(a) if isinstance(a, PyccelUnary) else a for a in args) return args def __repr__(self): return '+{}'.format(repr(self.args[0])) #============================================================================== class PyccelUnarySub(PyccelUnary): """ Class representing a call to the python negative operator. I.e: -a is equivalent to: PyccelUnarySub(a) Parameters ---------- arg: PyccelAstNode The argument passed to the operator """ __slots__ = () def __repr__(self): return '-{}'.format(repr(self.args[0])) #============================================================================== class PyccelNot(PyccelUnaryOperator): """ Class representing a call to the python not operator. I.e: not a is equivalent to: PyccelNot(a) Parameters ---------- arg: PyccelAstNode The argument passed to the operator """ __slots__ = () _precedence = 6 @staticmethod def _calculate_dtype(*args): """ Sets the dtype and precision They are chosen to match the argument unless the class has a _dtype or _precision member """ dtype = NativeBool() precision = default_precision['bool'] return dtype, precision @staticmethod def _calculate_shape_rank(*args): """ Sets the shape and rank They are chosen to match the argument unless the class has a _shape or _rank member """ rank = 0 shape = () return shape, rank def __repr__(self): return 'not {}'.format(repr(self.args[0])) #============================================================================== class PyccelAssociativeParenthesis(PyccelUnaryOperator): """ Class representing parentheses Parameters ---------- arg: PyccelAstNode The argument in the PyccelAssociativeParenthesis """ __slots__ = () # ok _precedence = 18 def _handle_precedence(self, args): return args def __repr__(self): return '({})'.format(repr(self.args[0])) #============================================================================== class PyccelBinaryOperator(PyccelOperator): """ Abstract superclass representing a python operator with two arguments Parameters ---------- arg1: PyccelAstNode The first argument passed to the operator arg2: PyccelAstNode The second argument passed to the operator """ __slots__ = ('_dtype','_precision','_shape','_rank','_order') def __init__(self, arg1, arg2, simplify = False): super().__init__(arg1, arg2) @classmethod def _calculate_dtype(cls, *args): """ Sets the dtype and precision If one argument is a string then all arguments must be strings If the arguments are numeric then the dtype and precision match the broadest type and the largest precision e.g. 1 + 2j -> PyccelAdd(LiteralInteger, LiteralComplex) -> complex """ integers = [a for a in args if a.dtype in (NativeInteger(),NativeBool())] reals = [a for a in args if a.dtype is NativeReal()] complexes = [a for a in args if a.dtype is NativeComplex()] strs = [a for a in args if a.dtype is NativeString()] if strs: return cls._handle_str_type(strs) assert len(integers + reals + complexes) == 0 elif complexes: return cls._handle_complex_type(complexes) elif reals: return cls._handle_real_type(reals) elif integers: return cls._handle_integer_type(integers) else: raise TypeError('cannot determine the type of {}'.format(args)) @staticmethod def _handle_str_type(strs): """ Set dtype and precision when both arguments are strings """ raise TypeError("unsupported operand type(s) for /: 'str' and 'str'") @staticmethod def _handle_complex_type(complexes): """ Set dtype and precision when the result is complex """ dtype = NativeComplex() precision = max(a.precision for a in complexes) return dtype, precision @staticmethod def _handle_real_type(reals): """ Set dtype and precision when the result is real """ dtype = NativeReal() precision = max(a.precision for a in reals) return dtype, precision @staticmethod def _handle_integer_type(integers): """ Set dtype and precision when the result is integer """ dtype = NativeInteger() precision = max(a.precision for a in integers) return dtype, precision @staticmethod def _calculate_shape_rank(*args): """ Sets the shape and rank Strings must be scalars. For numeric types the rank and shape is determined according to numpy broadcasting rules where possible """ strs = [a for a in args if a.dtype is NativeString()] if strs: other = [a for a in args if a.dtype in (NativeInteger(), NativeBool(), NativeReal(), NativeComplex())] assert len(other) == 0 rank = 0 shape = () else: ranks = [a.rank for a in args] shapes = [a.shape for a in args] if None in ranks: rank = None shape = None elif all(sh is not None for tup in shapes for sh in tup): s = broadcast(args[0].shape, args[1].shape) shape = s rank = len(s) else: rank = max(a.rank for a in args) shape = [None]*rank return shape, rank #============================================================================== class PyccelArithmeticOperator(PyccelBinaryOperator): """ Abstract superclass representing a python arithmetic operator This class is necessary to handle specific precedence rules for arithmetic operators I.e. to handle the error: Extension: Unary operator following arithmetic operator (use parentheses) Parameters ---------- arg1: PyccelAstNode The first argument passed to the operator arg2: PyccelAstNode The second argument passed to the operator """ __slots__ = () def _handle_precedence(self, args): args = PyccelBinaryOperator._handle_precedence(self, args) args = tuple(PyccelAssociativeParenthesis(a) if isinstance(a, PyccelUnary) else a for a in args) return args #============================================================================== class PyccelPow(PyccelArithmeticOperator): """ Class representing a call to the python exponent operator. I.e: a ** b is equivalent to: PyccelPow(a, b) Parameters ---------- arg1: PyccelAstNode The first argument passed to the operator arg2: PyccelAstNode The second argument passed to the operator """ __slots__ = () _precedence = 15 def __repr__(self): return '{} ** {}'.format(self.args[0], self.args[1]) #============================================================================== class PyccelAdd(PyccelArithmeticOperator): """ Class representing a call to the python addition operator. I.e: a + b is equivalent to: PyccelAdd(a, b) Parameters ---------- arg1: PyccelAstNode The first argument passed to the operator arg2: PyccelAstNode The second argument passed to the operator """ __slots__ = () _precedence = 12 def __new__(cls, arg1, arg2, simplify = False): if simplify: if isinstance(arg2, PyccelUnarySub): return PyccelMinus(arg1, arg2.args[0], simplify = True) dtype, precision = cls._calculate_dtype(arg1, arg2) if isinstance(arg1, Literal) and isinstance(arg2, Literal): return convert_to_literal(arg1.python_value + arg2.python_value, dtype, precision) if dtype == arg2.dtype and precision == arg2.precision and \ isinstance(arg1, Literal) and arg1.python_value == 0: return arg2 if dtype == arg1.dtype and precision == arg1.precision and \ isinstance(arg2, Literal) and arg2.python_value == 0: return arg1 if isinstance(arg1, (LiteralInteger, LiteralFloat)) and \ isinstance(arg2, LiteralComplex) and \ arg2.real == LiteralFloat(0): return LiteralComplex(arg1, arg2.imag) elif isinstance(arg2, (LiteralInteger, LiteralFloat)) and \ isinstance(arg1, LiteralComplex) and \ arg1.real == LiteralFloat(0): return LiteralComplex(arg2, arg1.imag) else: return super().__new__(cls) @staticmethod def _handle_str_type(strs): dtype = NativeString() precision = None return dtype, precision def __repr__(self): return '{} + {}'.format(self.args[0], self.args[1]) #============================================================================== class PyccelMul(PyccelArithmeticOperator): """ Class representing a call to the python multiplication operator. I.e: a * b is equivalent to: PyccelMul(a, b) Parameters ---------- arg1: PyccelAstNode The first argument passed to the operator arg2: PyccelAstNode The second argument passed to the operator """ __slots__ = () _precedence = 13 def __new__(cls, arg1, arg2, simplify = False): if simplify: if (arg1 == 1): return arg2 if (arg2 == 1): return arg1 if (arg1 == 0 or arg2 == 0): dtype, precision = cls._calculate_dtype(arg1, arg2) return convert_to_literal(0, dtype, precision) if (isinstance(arg1, PyccelUnarySub) and arg1.args[0] == 1): return PyccelUnarySub(arg2) if (isinstance(arg2, PyccelUnarySub) and arg2.args[0] == 1): return PyccelUnarySub(arg1) if isinstance(arg1, Literal) and isinstance(arg2, Literal): dtype, precision = cls._calculate_dtype(arg1, arg2) return convert_to_literal(arg1.python_value * arg2.python_value, dtype, precision) return super().__new__(cls) def __repr__(self): return '{} * {}'.format(self.args[0], self.args[1]) #============================================================================== class PyccelMinus(PyccelArithmeticOperator): """ Class representing a call to the python subtraction operator. I.e: a - b is equivalent to: PyccelMinus(a, b) Parameters ---------- arg1: PyccelAstNode The first argument passed to the operator arg2: PyccelAstNode The second argument passed to the operator """ __slots__ = () _precedence = 12 def __new__(cls, arg1, arg2, simplify = False): if simplify: if isinstance(arg2, PyccelUnarySub): return PyccelAdd(arg1, arg2.args[0], simplify = True) elif isinstance(arg1, Literal) and isinstance(arg2, Literal): dtype, precision = cls._calculate_dtype(arg1, arg2) return convert_to_literal(arg1.python_value - arg2.python_value, dtype, precision) if isinstance(arg1, LiteralFloat) and \ isinstance(arg2, LiteralComplex) and \ arg2.real == LiteralFloat(0): return LiteralComplex(arg1, -arg2.imag.python_value) elif isinstance(arg2, LiteralFloat) and \ isinstance(arg1, LiteralComplex) and \ arg1.real == LiteralFloat(0): return LiteralComplex(-arg2.python_value, arg1.imag) else: return super().__new__(cls) def __repr__(self): return '{} - {}'.format(repr(self.args[0]), repr(self.args[1])) #============================================================================== class PyccelDiv(PyccelArithmeticOperator): """ Class representing a call to the python division operator. I.e: a / b is equivalent to: PyccelDiv(a, b) Parameters ---------- arg1: PyccelAstNode The first argument passed to the operator arg2: PyccelAstNode The second argument passed to the operator """ __slots__ = () _precedence = 13 def __new__(cls, arg1, arg2, simplify=False): if simplify: if (arg2 == 1): return arg1 return super().__new__(cls) @staticmethod def _handle_integer_type(integers): dtype = NativeReal() precision = default_precision['real'] return dtype, precision def __repr__(self): return '{} + {}'.format(self.args[0], self.args[1]) def __repr__(self): return '{} / {}'.format(self.args[0], self.args[1]) #============================================================================== class PyccelMod(PyccelArithmeticOperator): """ Class representing a call to the python modulo operator. I.e: a % b is equivalent to: PyccelMod(a, b) Parameters ---------- arg1: PyccelAstNode The first argument passed to the operator arg2: PyccelAstNode The second argument passed to the operator """ __slots__ = () _precedence = 13 def __repr__(self): return '{} % {}'.format(self.args[0], self.args[1]) #============================================================================== class PyccelFloorDiv(PyccelArithmeticOperator): """ Class representing a call to the python integer division operator. I.e: a // b is equivalent to: PyccelFloorDiv(a, b) Parameters ---------- arg1: PyccelAstNode The first argument passed to the operator arg2: PyccelAstNode The second argument passed to the operator """ __slots__ = () _precedence = 13 def __repr__(self): return '{} // {}'.format(self.args[0], self.args[1]) #============================================================================== class PyccelComparisonOperator(PyccelBinaryOperator): """ Abstract superclass representing a python comparison operator with two arguments Parameters ---------- arg1: PyccelAstNode The first argument passed to the operator arg2: PyccelAstNode The second argument passed to the operator """ __slots__ = () _precedence = 7 @staticmethod def _calculate_dtype(*args): dtype = NativeBool() precision = default_precision['bool'] return dtype, precision #============================================================================== class PyccelEq(PyccelComparisonOperator): """ Class representing a call to the python equality operator. I.e: a == b is equivalent to: PyccelEq(a, b) Parameters ---------- arg1: PyccelAstNode The first argument passed to the operator arg2: PyccelAstNode The second argument passed to the operator """ __slots__ = () def __repr__(self): return '{} == {}'.format(self.args[0], self.args[1]) class PyccelNe(PyccelComparisonOperator): """ Class representing a call to the python inequality operator. I.e: a != b is equivalent to: PyccelEq(a, b) Parameters ---------- arg1: PyccelAstNode The first argument passed to the operator arg2: PyccelAstNode The second argument passed to the operator """ __slots__ = () def __repr__(self): return '{} != {}'.format(self.args[0], self.args[1]) class PyccelLt(PyccelComparisonOperator): """ Class representing a call to the python less than operator. I.e: a < b is equivalent to: PyccelEq(a, b) Parameters ---------- arg1: PyccelAstNode The first argument passed to the operator arg2: PyccelAstNode The second argument passed to the operator """ __slots__ = () def __repr__(self): return '{} < {}'.format(self.args[0], self.args[1]) class PyccelLe(PyccelComparisonOperator): """ Class representing a call to the python less or equal operator. I.e: a <= b is equivalent to: PyccelEq(a, b) Parameters ---------- arg1: PyccelAstNode The first argument passed to the operator arg2: PyccelAstNode The second argument passed to the operator """ __slots__ = () def __repr__(self): return '{} <= {}'.format(self.args[0], self.args[1]) class PyccelGt(PyccelComparisonOperator): """ Class representing a call to the python greater than operator. I.e: a > b is equivalent to: PyccelEq(a, b) Parameters ---------- arg1: PyccelAstNode The first argument passed to the operator arg2: PyccelAstNode The second argument passed to the operator """ __slots__ = () def __repr__(self): return '{} > {}'.format(self.args[0], self.args[1]) class PyccelGe(PyccelComparisonOperator): """ Class representing a call to the python greater or equal operator. I.e: a >= b is equivalent to: PyccelEq(a, b) Parameters ---------- arg1: PyccelAstNode The first argument passed to the operator arg2: PyccelAstNode The second argument passed to the operator """ __slots__ = () def __repr__(self): return '{} >= {}'.format(self.args[0], self.args[1]) #============================================================================== class PyccelBooleanOperator(PyccelOperator): """ Abstract superclass representing a python boolean operator with two arguments Parameters ---------- arg1: PyccelAstNode The first argument passed to the operator arg2: PyccelAstNode The second argument passed to the operator """ dtype = NativeBool() precision = default_precision['bool'] rank = 0 shape = () order = None __slots__ = () def _set_order(self): pass def _set_dtype(self): pass def _set_shape_rank(self): pass #============================================================================== class PyccelAnd(PyccelBooleanOperator): """ Class representing a call to the python AND operator. I.e: a and b is equivalent to: PyccelAnd(a, b) Parameters ---------- arg1: PyccelAstNode The first argument passed to the operator arg2: PyccelAstNode The second argument passed to the operator """ __slots__ = () _precedence = 5 def _handle_precedence(self, args): args = PyccelBooleanOperator._handle_precedence(self, args) args = tuple(PyccelAssociativeParenthesis(a) if isinstance(a, PyccelOr) else a for a in args) return args def __repr__(self): return '{} and {}'.format(self.args[0], self.args[1]) #============================================================================== class PyccelOr(PyccelBooleanOperator): """ Class representing a call to the python OR operator. I.e: a or b is equivalent to: PyccelOr(a, b) Parameters ---------- arg1: PyccelAstNode The first argument passed to the operator arg2: PyccelAstNode The second argument passed to the operator """ __slots__ = () _precedence = 4 def _handle_precedence(self, args): args = PyccelBooleanOperator._handle_precedence(self, args) args = tuple(PyccelAssociativeParenthesis(a) if isinstance(a, PyccelAnd) else a for a in args) return args def __repr__(self): return '{} or {}'.format(self.args[0], self.args[1]) #============================================================================== class PyccelIs(PyccelBooleanOperator): """Represents a is expression in the code. Examples -------- >>> from pyccel.ast.operators import PyccelIs >>> from pyccel.ast.literals import Nil >>> from pyccel.ast.internals import PyccelSymbol >>> x = PyccelSymbol('x') >>> PyccelIs(x, Nil()) PyccelIs(x, None) """ __slots__ = () _precedence = 7 def __init__(self, arg1, arg2): super().__init__(arg1, arg2) @property def lhs(self): """ First operator argument""" return self._args[0] @property def rhs(self): """ First operator argument""" return self._args[1] def __repr__(self): return '{} is {}'.format(self.args[0], self.args[1]) #============================================================================== class PyccelIsNot(PyccelIs): """Represents a is expression in the code. Examples -------- >>> from pyccel.ast.operators import PyccelIsNot >>> from pyccel.ast.literals import Nil >>> from pyccel.ast.internals import PyccelSymbol >>> x = PyccelSymbol('x') >>> PyccelIsNot(x, Nil()) PyccelIsNot(x, None) """ __slots__ = () def __repr__(self): return '{} is not {}'.format(self.args[0], self.args[1]) #============================================================================== class IfTernaryOperator(PyccelOperator): """Represent a ternary conditional operator in the code, of the form (a if cond else b) Parameters ---------- args : args : type list format : condition , value_if_true, value_if_false Examples -------- >>> from pyccel.ast.internals import PyccelSymbol >>> from pyccel.ast.core import Assign >>> from pyccel.ast.operators import IfTernaryOperator >>> n = PyccelSymbol('n') >>> x = 5 if n > 1 else 2 >>> IfTernaryOperator(PyccelGt(n > 1), 5, 2) IfTernaryOperator(PyccelGt(n > 1), 5, 2) """ __slots__ = ('_dtype','_precision','_shape','_rank','_order') _precedence = 3 def __init__(self, cond, value_true, value_false): super().__init__(cond, value_true, value_false) if self.stage == 'syntactic': return if isinstance(value_true , Nil) or isinstance(value_false, Nil): errors.report('None is not implemented for Ternary Operator', severity='fatal') if isinstance(value_true , NativeString) or isinstance(value_false, NativeString): errors.report('String is not implemented for Ternary Operator', severity='fatal') if value_true.dtype != value_false.dtype: if value_true.dtype not in NativeNumeric or value_false.dtype not in NativeNumeric: errors.report('The types are incompatible in IfTernaryOperator', severity='fatal') if value_false.rank != value_true.rank : errors.report('Ternary Operator results should have the same rank', severity='fatal') if value_false.shape != value_true.shape : errors.report('Ternary Operator results should have the same shape', severity='fatal') @staticmethod def _calculate_dtype(cond, value_true, value_false): """ Sets the dtype and precision for IfTernaryOperator """ if value_true.dtype in NativeNumeric and value_false.dtype in NativeNumeric: dtype = max([value_true.dtype, value_false.dtype], key = NativeNumeric.index) else: dtype = value_true.dtype precision = max([value_true.precision, value_false.precision]) return dtype, precision @staticmethod def _calculate_shape_rank(cond, value_true, value_false): """ Sets the shape and rank and the order for IfTernaryOperator """ shape = value_true.shape rank = value_true.rank if rank is not None and rank > 1: if value_false.order != value_true.order : errors.report('Ternary Operator results should have the same order', severity='fatal') return shape, rank @property def cond(self): """ The condition property for IfTernaryOperator class """ return self._args[0] @property def value_true(self): """ The value_if_cond_true property for IfTernaryOperator class """ return self._args[1] @property def value_false(self): """ The value_if_cond_false property for IfTernaryOperator class """ return self._args[2] #============================================================================== Relational = (PyccelEq, PyccelNe, PyccelLt, PyccelLe, PyccelGt, PyccelGe, PyccelAnd, PyccelOr, PyccelNot, PyccelIs, PyccelIsNot)
30.134913
134
0.563918
rom ..errors.errors import Errors, PyccelSemanticError from .basic import PyccelAstNode from .datatypes import (NativeBool, NativeInteger, NativeReal, NativeComplex, NativeString, default_precision, NativeNumeric) from .literals import Literal, LiteralInteger, LiteralFloat, LiteralComplex, Nil from .literals import convert_to_literal errors = Errors() __all__ = ( 'PyccelOperator', 'PyccelPow', 'PyccelAdd', 'PyccelMinus', 'PyccelMul', 'PyccelDiv', 'PyccelMod', 'PyccelFloorDiv', 'PyccelEq', 'PyccelNe', 'PyccelLt', 'PyccelLe', 'PyccelGt', 'PyccelGe', 'PyccelAnd', 'PyccelOr', 'PyccelNot', 'PyccelAssociativeParenthesis', 'PyccelUnary', 'PyccelUnarySub', 'Relational', 'PyccelIs', 'PyccelIsNot', 'IfTernaryOperator' ) def broadcast(shape_1, shape_2): from pyccel.ast.sympy_helper import pyccel_to_sympy a = len(shape_1) b = len(shape_2) if a>b: new_shape_2 = (LiteralInteger(1),)*(a-b) + tuple(shape_2) new_shape_1 = shape_1 elif b>a: new_shape_1 = (LiteralInteger(1),)*(b-a) + tuple(shape_1) new_shape_2 = shape_2 else: new_shape_2 = shape_2 new_shape_1 = shape_1 new_shape = [] for e1,e2 in zip(new_shape_1, new_shape_2): used_names = set() symbol_map = {} sy_e1 = pyccel_to_sympy(e1, symbol_map, used_names) sy_e2 = pyccel_to_sympy(e2, symbol_map, used_names) if sy_e1 == sy_e2: new_shape.append(e1) elif sy_e1 == 1: new_shape.append(e2) elif sy_e2 == 1: new_shape.append(e1) elif sy_e1.is_constant() and not sy_e2.is_constant(): new_shape.append(e1) elif sy_e2.is_constant() and not sy_e1.is_constant(): new_shape.append(e2) elif not sy_e2.is_constant() and not sy_e1.is_constant()\ and not (sy_e1 - sy_e2).is_constant(): new_shape.append(e1) else: shape1_code = '({})'.format(' '.join([str(s)+',' for s in shape_1])) shape2_code = '({})'.format(' '.join([str(s)+',' for s in shape_2])) msg = 'operands could not be broadcast together with shapes {} {}' msg = msg.format(shape1_code, shape2_code) raise PyccelSemanticError(msg) return tuple(new_shape) class PyccelOperator(PyccelAstNode): __slots__ = ('_args', ) _attribute_nodes = ('_args',) def __init__(self, *args): self._args = tuple(self._handle_precedence(args)) if self.stage == 'syntactic': super().__init__() return self._set_dtype() self._set_shape_rank() self._set_order() super().__init__() def _set_dtype(self): self._dtype, self._precision = self._calculate_dtype(*self._args) def _set_shape_rank(self): self._shape, self._rank = self._calculate_shape_rank(*self._args) @property def precedence(self): return self._precedence def _handle_precedence(self, args): precedence = [getattr(a, 'precedence', 17) for a in args] if min(precedence) <= self._precedence: new_args = [] for i, (a,p) in enumerate(zip(args, precedence)): if (p < self._precedence or (p == self._precedence and i != 0)): new_args.append(PyccelAssociativeParenthesis(a)) else: new_args.append(a) args = tuple(new_args) return args def __str__(self): return repr(self) def _set_order(self): if self._rank is not None and self._rank > 1: if all(a.order == self._args[0].order for a in self._args): self._order = self._args[0].order else: self._order = 'C' else: self._order = None @property def args(self): return self._args class PyccelUnaryOperator(PyccelOperator): __slots__ = ('_dtype', '_precision','_shape','_rank','_order') def __init__(self, arg): super().__init__(arg) @staticmethod def _calculate_dtype(*args): a = args[0] dtype = a.dtype precision = a.precision return dtype, precision @staticmethod def _calculate_shape_rank(*args): a = args[0] rank = a.rank shape = a.shape return shape, rank class PyccelUnary(PyccelUnaryOperator): __slots__ = () _precedence = 14 def _handle_precedence(self, args): args = PyccelUnaryOperator._handle_precedence(self, args) args = tuple(PyccelAssociativeParenthesis(a) if isinstance(a, PyccelUnary) else a for a in args) return args def __repr__(self): return '+{}'.format(repr(self.args[0])) class PyccelUnarySub(PyccelUnary): __slots__ = () def __repr__(self): return '-{}'.format(repr(self.args[0])) class PyccelNot(PyccelUnaryOperator): __slots__ = () _precedence = 6 @staticmethod def _calculate_dtype(*args): dtype = NativeBool() precision = default_precision['bool'] return dtype, precision @staticmethod def _calculate_shape_rank(*args): rank = 0 shape = () return shape, rank def __repr__(self): return 'not {}'.format(repr(self.args[0])) class PyccelAssociativeParenthesis(PyccelUnaryOperator): __slots__ = () _precedence = 18 def _handle_precedence(self, args): return args def __repr__(self): return '({})'.format(repr(self.args[0])) class PyccelBinaryOperator(PyccelOperator): __slots__ = ('_dtype','_precision','_shape','_rank','_order') def __init__(self, arg1, arg2, simplify = False): super().__init__(arg1, arg2) @classmethod def _calculate_dtype(cls, *args): integers = [a for a in args if a.dtype in (NativeInteger(),NativeBool())] reals = [a for a in args if a.dtype is NativeReal()] complexes = [a for a in args if a.dtype is NativeComplex()] strs = [a for a in args if a.dtype is NativeString()] if strs: return cls._handle_str_type(strs) assert len(integers + reals + complexes) == 0 elif complexes: return cls._handle_complex_type(complexes) elif reals: return cls._handle_real_type(reals) elif integers: return cls._handle_integer_type(integers) else: raise TypeError('cannot determine the type of {}'.format(args)) @staticmethod def _handle_str_type(strs): raise TypeError("unsupported operand type(s) for /: 'str' and 'str'") @staticmethod def _handle_complex_type(complexes): dtype = NativeComplex() precision = max(a.precision for a in complexes) return dtype, precision @staticmethod def _handle_real_type(reals): dtype = NativeReal() precision = max(a.precision for a in reals) return dtype, precision @staticmethod def _handle_integer_type(integers): dtype = NativeInteger() precision = max(a.precision for a in integers) return dtype, precision @staticmethod def _calculate_shape_rank(*args): strs = [a for a in args if a.dtype is NativeString()] if strs: other = [a for a in args if a.dtype in (NativeInteger(), NativeBool(), NativeReal(), NativeComplex())] assert len(other) == 0 rank = 0 shape = () else: ranks = [a.rank for a in args] shapes = [a.shape for a in args] if None in ranks: rank = None shape = None elif all(sh is not None for tup in shapes for sh in tup): s = broadcast(args[0].shape, args[1].shape) shape = s rank = len(s) else: rank = max(a.rank for a in args) shape = [None]*rank return shape, rank class PyccelArithmeticOperator(PyccelBinaryOperator): __slots__ = () def _handle_precedence(self, args): args = PyccelBinaryOperator._handle_precedence(self, args) args = tuple(PyccelAssociativeParenthesis(a) if isinstance(a, PyccelUnary) else a for a in args) return args class PyccelPow(PyccelArithmeticOperator): __slots__ = () _precedence = 15 def __repr__(self): return '{} ** {}'.format(self.args[0], self.args[1]) class PyccelAdd(PyccelArithmeticOperator): __slots__ = () _precedence = 12 def __new__(cls, arg1, arg2, simplify = False): if simplify: if isinstance(arg2, PyccelUnarySub): return PyccelMinus(arg1, arg2.args[0], simplify = True) dtype, precision = cls._calculate_dtype(arg1, arg2) if isinstance(arg1, Literal) and isinstance(arg2, Literal): return convert_to_literal(arg1.python_value + arg2.python_value, dtype, precision) if dtype == arg2.dtype and precision == arg2.precision and \ isinstance(arg1, Literal) and arg1.python_value == 0: return arg2 if dtype == arg1.dtype and precision == arg1.precision and \ isinstance(arg2, Literal) and arg2.python_value == 0: return arg1 if isinstance(arg1, (LiteralInteger, LiteralFloat)) and \ isinstance(arg2, LiteralComplex) and \ arg2.real == LiteralFloat(0): return LiteralComplex(arg1, arg2.imag) elif isinstance(arg2, (LiteralInteger, LiteralFloat)) and \ isinstance(arg1, LiteralComplex) and \ arg1.real == LiteralFloat(0): return LiteralComplex(arg2, arg1.imag) else: return super().__new__(cls) @staticmethod def _handle_str_type(strs): dtype = NativeString() precision = None return dtype, precision def __repr__(self): return '{} + {}'.format(self.args[0], self.args[1]) class PyccelMul(PyccelArithmeticOperator): __slots__ = () _precedence = 13 def __new__(cls, arg1, arg2, simplify = False): if simplify: if (arg1 == 1): return arg2 if (arg2 == 1): return arg1 if (arg1 == 0 or arg2 == 0): dtype, precision = cls._calculate_dtype(arg1, arg2) return convert_to_literal(0, dtype, precision) if (isinstance(arg1, PyccelUnarySub) and arg1.args[0] == 1): return PyccelUnarySub(arg2) if (isinstance(arg2, PyccelUnarySub) and arg2.args[0] == 1): return PyccelUnarySub(arg1) if isinstance(arg1, Literal) and isinstance(arg2, Literal): dtype, precision = cls._calculate_dtype(arg1, arg2) return convert_to_literal(arg1.python_value * arg2.python_value, dtype, precision) return super().__new__(cls) def __repr__(self): return '{} * {}'.format(self.args[0], self.args[1]) class PyccelMinus(PyccelArithmeticOperator): __slots__ = () _precedence = 12 def __new__(cls, arg1, arg2, simplify = False): if simplify: if isinstance(arg2, PyccelUnarySub): return PyccelAdd(arg1, arg2.args[0], simplify = True) elif isinstance(arg1, Literal) and isinstance(arg2, Literal): dtype, precision = cls._calculate_dtype(arg1, arg2) return convert_to_literal(arg1.python_value - arg2.python_value, dtype, precision) if isinstance(arg1, LiteralFloat) and \ isinstance(arg2, LiteralComplex) and \ arg2.real == LiteralFloat(0): return LiteralComplex(arg1, -arg2.imag.python_value) elif isinstance(arg2, LiteralFloat) and \ isinstance(arg1, LiteralComplex) and \ arg1.real == LiteralFloat(0): return LiteralComplex(-arg2.python_value, arg1.imag) else: return super().__new__(cls) def __repr__(self): return '{} - {}'.format(repr(self.args[0]), repr(self.args[1])) class PyccelDiv(PyccelArithmeticOperator): __slots__ = () _precedence = 13 def __new__(cls, arg1, arg2, simplify=False): if simplify: if (arg2 == 1): return arg1 return super().__new__(cls) @staticmethod def _handle_integer_type(integers): dtype = NativeReal() precision = default_precision['real'] return dtype, precision def __repr__(self): return '{} + {}'.format(self.args[0], self.args[1]) def __repr__(self): return '{} / {}'.format(self.args[0], self.args[1]) class PyccelMod(PyccelArithmeticOperator): __slots__ = () _precedence = 13 def __repr__(self): return '{} % {}'.format(self.args[0], self.args[1]) class PyccelFloorDiv(PyccelArithmeticOperator): __slots__ = () _precedence = 13 def __repr__(self): return '{} // {}'.format(self.args[0], self.args[1]) class PyccelComparisonOperator(PyccelBinaryOperator): __slots__ = () _precedence = 7 @staticmethod def _calculate_dtype(*args): dtype = NativeBool() precision = default_precision['bool'] return dtype, precision class PyccelEq(PyccelComparisonOperator): __slots__ = () def __repr__(self): return '{} == {}'.format(self.args[0], self.args[1]) class PyccelNe(PyccelComparisonOperator): __slots__ = () def __repr__(self): return '{} != {}'.format(self.args[0], self.args[1]) class PyccelLt(PyccelComparisonOperator): __slots__ = () def __repr__(self): return '{} < {}'.format(self.args[0], self.args[1]) class PyccelLe(PyccelComparisonOperator): __slots__ = () def __repr__(self): return '{} <= {}'.format(self.args[0], self.args[1]) class PyccelGt(PyccelComparisonOperator): __slots__ = () def __repr__(self): return '{} > {}'.format(self.args[0], self.args[1]) class PyccelGe(PyccelComparisonOperator): __slots__ = () def __repr__(self): return '{} >= {}'.format(self.args[0], self.args[1]) class PyccelBooleanOperator(PyccelOperator): dtype = NativeBool() precision = default_precision['bool'] rank = 0 shape = () order = None __slots__ = () def _set_order(self): pass def _set_dtype(self): pass def _set_shape_rank(self): pass class PyccelAnd(PyccelBooleanOperator): __slots__ = () _precedence = 5 def _handle_precedence(self, args): args = PyccelBooleanOperator._handle_precedence(self, args) args = tuple(PyccelAssociativeParenthesis(a) if isinstance(a, PyccelOr) else a for a in args) return args def __repr__(self): return '{} and {}'.format(self.args[0], self.args[1]) class PyccelOr(PyccelBooleanOperator): __slots__ = () _precedence = 4 def _handle_precedence(self, args): args = PyccelBooleanOperator._handle_precedence(self, args) args = tuple(PyccelAssociativeParenthesis(a) if isinstance(a, PyccelAnd) else a for a in args) return args def __repr__(self): return '{} or {}'.format(self.args[0], self.args[1]) class PyccelIs(PyccelBooleanOperator): __slots__ = () _precedence = 7 def __init__(self, arg1, arg2): super().__init__(arg1, arg2) @property def lhs(self): return self._args[0] @property def rhs(self): return self._args[1] def __repr__(self): return '{} is {}'.format(self.args[0], self.args[1]) class PyccelIsNot(PyccelIs): __slots__ = () def __repr__(self): return '{} is not {}'.format(self.args[0], self.args[1]) class IfTernaryOperator(PyccelOperator): __slots__ = ('_dtype','_precision','_shape','_rank','_order') _precedence = 3 def __init__(self, cond, value_true, value_false): super().__init__(cond, value_true, value_false) if self.stage == 'syntactic': return if isinstance(value_true , Nil) or isinstance(value_false, Nil): errors.report('None is not implemented for Ternary Operator', severity='fatal') if isinstance(value_true , NativeString) or isinstance(value_false, NativeString): errors.report('String is not implemented for Ternary Operator', severity='fatal') if value_true.dtype != value_false.dtype: if value_true.dtype not in NativeNumeric or value_false.dtype not in NativeNumeric: errors.report('The types are incompatible in IfTernaryOperator', severity='fatal') if value_false.rank != value_true.rank : errors.report('Ternary Operator results should have the same rank', severity='fatal') if value_false.shape != value_true.shape : errors.report('Ternary Operator results should have the same shape', severity='fatal') @staticmethod def _calculate_dtype(cond, value_true, value_false): if value_true.dtype in NativeNumeric and value_false.dtype in NativeNumeric: dtype = max([value_true.dtype, value_false.dtype], key = NativeNumeric.index) else: dtype = value_true.dtype precision = max([value_true.precision, value_false.precision]) return dtype, precision @staticmethod def _calculate_shape_rank(cond, value_true, value_false): shape = value_true.shape rank = value_true.rank if rank is not None and rank > 1: if value_false.order != value_true.order : errors.report('Ternary Operator results should have the same order', severity='fatal') return shape, rank @property def cond(self): return self._args[0] @property def value_true(self): return self._args[1] @property def value_false(self): return self._args[2] Relational = (PyccelEq, PyccelNe, PyccelLt, PyccelLe, PyccelGt, PyccelGe, PyccelAnd, PyccelOr, PyccelNot, PyccelIs, PyccelIsNot)
true
true
f716dcc5929dc395b511c231b73a25ba28485635
607
py
Python
apps/shortener_app/migrations/0009_auto_20190123_1903.py
escrichov/shortener
f8a72edb0b40c20021541f5178f257590b478e02
[ "MIT" ]
6
2018-12-16T12:35:18.000Z
2020-06-07T13:06:17.000Z
apps/shortener_app/migrations/0009_auto_20190123_1903.py
escrichov/shortener
f8a72edb0b40c20021541f5178f257590b478e02
[ "MIT" ]
16
2019-06-10T19:10:01.000Z
2022-02-12T04:22:55.000Z
apps/shortener_app/migrations/0009_auto_20190123_1903.py
escrichov/shortener
f8a72edb0b40c20021541f5178f257590b478e02
[ "MIT" ]
1
2019-01-18T00:06:13.000Z
2019-01-18T00:06:13.000Z
# Generated by Django 2.1.5 on 2019-01-23 19:03 import datetime from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('shortener_app', '0008_auto_20181205_2300'), ] operations = [ migrations.AddField( model_name='shorturl', name='url_active', field=models.BooleanField(default=True), ), migrations.AddField( model_name='shorturl', name='url_active_last_checked', field=models.DateTimeField(default=datetime.datetime.utcnow), ), ]
24.28
73
0.614498
import datetime from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('shortener_app', '0008_auto_20181205_2300'), ] operations = [ migrations.AddField( model_name='shorturl', name='url_active', field=models.BooleanField(default=True), ), migrations.AddField( model_name='shorturl', name='url_active_last_checked', field=models.DateTimeField(default=datetime.datetime.utcnow), ), ]
true
true
f716dcdbe7007bf7839728fd2b9195c4311f4c1c
2,821
py
Python
src/elaspic_rest_api/app.py
elaspic/elaspic-rest-api
b1ed2dae1870b5d0c678d196e39c8c806959e640
[ "MIT" ]
null
null
null
src/elaspic_rest_api/app.py
elaspic/elaspic-rest-api
b1ed2dae1870b5d0c678d196e39c8c806959e640
[ "MIT" ]
null
null
null
src/elaspic_rest_api/app.py
elaspic/elaspic-rest-api
b1ed2dae1870b5d0c678d196e39c8c806959e640
[ "MIT" ]
null
null
null
import asyncio import logging from typing import Any, Dict import sentry_sdk from fastapi import BackgroundTasks, FastAPI from sentry_sdk.integrations.asgi import SentryAsgiMiddleware import elaspic_rest_api from elaspic_rest_api import config from elaspic_rest_api import jobsubmitter as js from elaspic_rest_api.types import DataIn logger = logging.getLogger(__name__) description = """\ This page lists `ELASPIC` REST API endpoints that are available for evaluating the effect of mutations on protein stability and protein interaction affinity. Please see the source code repository for more information: <https://gitlab.com/elaspic/elaspic-rest-api/>. """ app = FastAPI( title="ELASPIC REST API", description=description, version=elaspic_rest_api.__version__, root_path=config.ROOT_PATH, ) js_data: Dict[str, Any] = {} @app.post("/", status_code=200) async def submit_job(data_in: DataIn, background_tasks: BackgroundTasks): if data_in.api_token == config.API_TOKEN: background_tasks.add_task(js.submit_job, data_in, js_data["ds"]) return {"status": "submitted"} else: return {"status": "restricted"} @app.get("/status", status_code=200) async def get_pre_qsub_queue(api_token: str): queues_to_monitor = [ "pre_qsub_queue", "qsub_queue", "validation_queue", "elaspic2_pending_queue", "elaspic2_running_queue", ] ds: js.DataStructures = js_data["ds"] if api_token == config.API_TOKEN: result = { **{name: list(getattr(ds, name)._queue) for name in queues_to_monitor}, "monitored_jobs": [ (tuple(key), list(values)) for key, values in ds.monitored_jobs.items() ], } else: result = {} return result @app.get("/_ah/warmup", include_in_schema=False) def warmup(): return {} @app.on_event("startup") async def on_startup() -> None: js_data["ds"] = js.DataStructures() js_data["js_task"] = asyncio.create_task( js.start_jobsubmitter(js_data["ds"]), name="jobsubmitter" ) await asyncio.sleep(0.1) js_task = js_data["js_task"] if js_task.done() and (error := js_task.exception()): js_task.print_stack() logger.error("Task %s finished with an error: %s", js_task.name, error) @app.on_event("shutdown") async def on_shutdown() -> None: js_task = js_data["js_task"] js_task.cancel() if js_task.done() and (error := js_task.exception()): js_task.print_stack() logger.error("Task %s finished with an error: %s", js_task.name, error) await js.finalize_lingering_jobs(js_data["ds"]) await js_task if config.SENTRY_DSN: sentry_sdk.init(config.SENTRY_DSN, traces_sample_rate=1.0) app = SentryAsgiMiddleware(app) # type: ignore
27.930693
89
0.686636
import asyncio import logging from typing import Any, Dict import sentry_sdk from fastapi import BackgroundTasks, FastAPI from sentry_sdk.integrations.asgi import SentryAsgiMiddleware import elaspic_rest_api from elaspic_rest_api import config from elaspic_rest_api import jobsubmitter as js from elaspic_rest_api.types import DataIn logger = logging.getLogger(__name__) description = """\ This page lists `ELASPIC` REST API endpoints that are available for evaluating the effect of mutations on protein stability and protein interaction affinity. Please see the source code repository for more information: <https://gitlab.com/elaspic/elaspic-rest-api/>. """ app = FastAPI( title="ELASPIC REST API", description=description, version=elaspic_rest_api.__version__, root_path=config.ROOT_PATH, ) js_data: Dict[str, Any] = {} @app.post("/", status_code=200) async def submit_job(data_in: DataIn, background_tasks: BackgroundTasks): if data_in.api_token == config.API_TOKEN: background_tasks.add_task(js.submit_job, data_in, js_data["ds"]) return {"status": "submitted"} else: return {"status": "restricted"} @app.get("/status", status_code=200) async def get_pre_qsub_queue(api_token: str): queues_to_monitor = [ "pre_qsub_queue", "qsub_queue", "validation_queue", "elaspic2_pending_queue", "elaspic2_running_queue", ] ds: js.DataStructures = js_data["ds"] if api_token == config.API_TOKEN: result = { **{name: list(getattr(ds, name)._queue) for name in queues_to_monitor}, "monitored_jobs": [ (tuple(key), list(values)) for key, values in ds.monitored_jobs.items() ], } else: result = {} return result @app.get("/_ah/warmup", include_in_schema=False) def warmup(): return {} @app.on_event("startup") async def on_startup() -> None: js_data["ds"] = js.DataStructures() js_data["js_task"] = asyncio.create_task( js.start_jobsubmitter(js_data["ds"]), name="jobsubmitter" ) await asyncio.sleep(0.1) js_task = js_data["js_task"] if js_task.done() and (error := js_task.exception()): js_task.print_stack() logger.error("Task %s finished with an error: %s", js_task.name, error) @app.on_event("shutdown") async def on_shutdown() -> None: js_task = js_data["js_task"] js_task.cancel() if js_task.done() and (error := js_task.exception()): js_task.print_stack() logger.error("Task %s finished with an error: %s", js_task.name, error) await js.finalize_lingering_jobs(js_data["ds"]) await js_task if config.SENTRY_DSN: sentry_sdk.init(config.SENTRY_DSN, traces_sample_rate=1.0) app = SentryAsgiMiddleware(app)
true
true
f716dd589103e434f5c06b8eb30e4fe38d5df1b6
1,790
py
Python
rpncalc/binaryoperator.py
newmanrs/rpncalc
8663e5221efd78c12889b6db4eda20821b27d52a
[ "MIT" ]
null
null
null
rpncalc/binaryoperator.py
newmanrs/rpncalc
8663e5221efd78c12889b6db4eda20821b27d52a
[ "MIT" ]
11
2021-11-10T04:28:51.000Z
2022-02-25T05:19:22.000Z
rpncalc/binaryoperator.py
newmanrs/rpncalc
8663e5221efd78c12889b6db4eda20821b27d52a
[ "MIT" ]
null
null
null
import numpy import math from rpncalc.classes import ActionEnum class BinaryOperator(ActionEnum): addition = '+' subtraction = '-' multiplication = '*' division = '/' integer_division = '//' power = '^' atan2 = 'atan2', \ "Returns quadrant correct polar coordinate theta = atan2(y,x)" log_base = 'log_base', \ "Logarithm with prior arg as base" \ "Example: 1000 10 log_base returns 3" equals = '=' gt = '>' gte = '>=' lt = '<' lte = '<=' choose = 'choose' combinations = 'combo' def action(self): v1, v0 = self.take_2() o = type(self) match self: case o.addition: r = v0+v1 case o.subtraction: r = v0-v1 case o.multiplication: r = v0*v1 case o.division: r = v0/v1 case o.integer_division: r = v0//v1 case o.power: r = numpy.power(v0, v1) case o.log_base: r = numpy.log(v0)/numpy.log(v1) case o.atan2: r = numpy.arctan2(v0, v1) case o.equals: r = v0 == v1 case o.gt: r = v0 > v1 case o.gte: r = v0 >= v1 case o.lt: r = v0 < v1 case o.lte: r = v0 <= v1 case o.choose: f = math.factorial r = f(v0)//(f(v0-v1)) case o.combinations: f = math.factorial r = f(v0)//(f(v0-v1)*f(v1)) case _: msg = f"Missing case match for {self}" raise NotImplementedError(msg) self.push(r)
25.211268
70
0.430726
import numpy import math from rpncalc.classes import ActionEnum class BinaryOperator(ActionEnum): addition = '+' subtraction = '-' multiplication = '*' division = '/' integer_division = '//' power = '^' atan2 = 'atan2', \ "Returns quadrant correct polar coordinate theta = atan2(y,x)" log_base = 'log_base', \ "Logarithm with prior arg as base" \ "Example: 1000 10 log_base returns 3" equals = '=' gt = '>' gte = '>=' lt = '<' lte = '<=' choose = 'choose' combinations = 'combo' def action(self): v1, v0 = self.take_2() o = type(self) match self: case o.addition: r = v0+v1 case o.subtraction: r = v0-v1 case o.multiplication: r = v0*v1 case o.division: r = v0/v1 case o.integer_division: r = v0//v1 case o.power: r = numpy.power(v0, v1) case o.log_base: r = numpy.log(v0)/numpy.log(v1) case o.atan2: r = numpy.arctan2(v0, v1) case o.equals: r = v0 == v1 case o.gt: r = v0 > v1 case o.gte: r = v0 >= v1 case o.lt: r = v0 < v1 case o.lte: r = v0 <= v1 case o.choose: f = math.factorial r = f(v0)//(f(v0-v1)) case o.combinations: f = math.factorial r = f(v0)//(f(v0-v1)*f(v1)) case _: msg = f"Missing case match for {self}" raise NotImplementedError(msg) self.push(r)
true
true
f716de749187532c276040a0b1e00777b44337ce
592
py
Python
api_logic_server_cli/project_prototype/util.py
valhuber/ApiLogicServer
a4acd8d886a18d4d500e0fffffcaa2f1c0765040
[ "BSD-3-Clause" ]
71
2021-01-23T17:34:33.000Z
2022-03-29T13:11:29.000Z
api_logic_server_cli/project_prototype/util.py
valhuber/ApiLogicServer
a4acd8d886a18d4d500e0fffffcaa2f1c0765040
[ "BSD-3-Clause" ]
38
2021-01-24T21:56:30.000Z
2022-03-08T18:49:00.000Z
api_logic_server_cli/project_prototype/util.py
valhuber/ApiLogicServer
a4acd8d886a18d4d500e0fffffcaa2f1c0765040
[ "BSD-3-Clause" ]
14
2021-01-23T16:20:44.000Z
2022-03-24T10:48:28.000Z
import sqlite3 from os import path import sys import logging app_logger = logging.getLogger("api_logic_server_app") def log(msg: any) -> None: app_logger.info(msg) # print("TIL==> " + msg) def connection() -> sqlite3.Connection: ROOT: str = path.dirname(path.realpath(__file__)) log(ROOT) _connection = sqlite3.connect(path.join(ROOT, "sqlitedata.db")) return _connection def dbpath(dbname: str) -> str: ROOT: str = path.dirname(path.realpath(__file__)) log('ROOT: '+ROOT) PATH: str = path.join(ROOT, dbname) log('DBPATH: '+PATH) return PATH
22.769231
67
0.675676
import sqlite3 from os import path import sys import logging app_logger = logging.getLogger("api_logic_server_app") def log(msg: any) -> None: app_logger.info(msg) def connection() -> sqlite3.Connection: ROOT: str = path.dirname(path.realpath(__file__)) log(ROOT) _connection = sqlite3.connect(path.join(ROOT, "sqlitedata.db")) return _connection def dbpath(dbname: str) -> str: ROOT: str = path.dirname(path.realpath(__file__)) log('ROOT: '+ROOT) PATH: str = path.join(ROOT, dbname) log('DBPATH: '+PATH) return PATH
true
true
f716ded79220bf0850640a2412069bb981807960
3,633
py
Python
meegkit/utils/trca.py
ludovicdmt/python-meegkit
4aa4ba49354b996be20eda41660a550d1bd31f9a
[ "BSD-3-Clause" ]
null
null
null
meegkit/utils/trca.py
ludovicdmt/python-meegkit
4aa4ba49354b996be20eda41660a550d1bd31f9a
[ "BSD-3-Clause" ]
null
null
null
meegkit/utils/trca.py
ludovicdmt/python-meegkit
4aa4ba49354b996be20eda41660a550d1bd31f9a
[ "BSD-3-Clause" ]
null
null
null
"""TRCA utils.""" import numpy as np from scipy.signal import filtfilt, cheb1ord, cheby1 from scipy import stats def round_half_up(num, decimals=0): """Round half up round the last decimal of the number. The rules are: from 0 to 4 rounds down from 5 to 9 rounds up Parameters ---------- num : float Number to round decimals : number of decimals Returns ------- num rounded """ multiplier = 10 ** decimals return int(np.floor(num * multiplier + 0.5) / multiplier) def normfit(data, ci=0.95): """Compute the mean, std and confidence interval for them. Parameters ---------- data : array, shape=() Input data. ci : float Confidence interval (default=0.95). Returns ------- m : mean sigma : std deviation [m - h, m + h] : confidence interval of the mean [sigmaCI_lower, sigmaCI_upper] : confidence interval of the std """ arr = 1.0 * np.array(data) num = len(arr) avg, std_err = np.mean(arr), stats.sem(arr) h_int = std_err * stats.t.ppf((1 + ci) / 2., num - 1) var = np.var(data, ddof=1) var_ci_upper = var * (num - 1) / stats.chi2.ppf((1 - ci) / 2, num - 1) var_ci_lower = var * (num - 1) / stats.chi2.ppf(1 - (1 - ci) / 2, num - 1) sigma = np.sqrt(var) sigma_ci_lower = np.sqrt(var_ci_lower) sigma_ci_upper = np.sqrt(var_ci_upper) return avg, sigma, [avg - h_int, avg + h_int], [sigma_ci_lower, sigma_ci_upper] def itr(n, p, t): """Compute information transfer rate (ITR). Definition in [1]_. Parameters ---------- n : int Number of targets. p : float Target identification accuracy (0 <= p <= 1). t : float Average time for a selection (s). Returns ------- itr : float Information transfer rate [bits/min] References ---------- .. [1] M. Cheng, X. Gao, S. Gao, and D. Xu, "Design and Implementation of a Brain-Computer Interface With High Transfer Rates", IEEE Trans. Biomed. Eng. 49, 1181-1186, 2002. """ itr = 0 if (p < 0 or 1 < p): raise ValueError('Accuracy need to be between 0 and 1.') elif (p < 1 / n): raise ValueError('ITR might be incorrect because accuracy < chance') itr = 0 elif (p == 1): itr = np.log2(n) * 60 / t else: itr = (np.log2(n) + p * np.log2(p) + (1 - p) * np.log2((1 - p) / (n - 1))) * 60 / t return itr def bandpass(eeg, sfreq, Wp, Ws): """Filter bank design for decomposing EEG data into sub-band components. Parameters ---------- eeg : np.array, shape=(n_samples, n_chans[, n_trials]) Training data. sfreq : int Sampling frequency of the data. Wp : 2-tuple Passband for Chebyshev filter. Ws : 2-tuple Stopband for Chebyshev filter. Returns ------- y: np.array, shape=(n_trials, n_chans, n_samples) Sub-band components decomposed by a filter bank. See Also -------- scipy.signal.cheb1ord : Chebyshev type I filter order selection. """ # Chebyshev type I filter order selection. N, Wn = cheb1ord(Wp, Ws, 3, 40, fs=sfreq) # Chebyshev type I filter design B, A = cheby1(N, 0.5, Wn, btype="bandpass", fs=sfreq) # the arguments 'axis=0, padtype='odd', padlen=3*(max(len(B),len(A))-1)' # correspond to Matlab filtfilt : https://dsp.stackexchange.com/a/47945 y = filtfilt(B, A, eeg, axis=0, padtype='odd', padlen=3 * (max(len(B), len(A)) - 1)) return y
26.136691
78
0.570603
import numpy as np from scipy.signal import filtfilt, cheb1ord, cheby1 from scipy import stats def round_half_up(num, decimals=0): multiplier = 10 ** decimals return int(np.floor(num * multiplier + 0.5) / multiplier) def normfit(data, ci=0.95): arr = 1.0 * np.array(data) num = len(arr) avg, std_err = np.mean(arr), stats.sem(arr) h_int = std_err * stats.t.ppf((1 + ci) / 2., num - 1) var = np.var(data, ddof=1) var_ci_upper = var * (num - 1) / stats.chi2.ppf((1 - ci) / 2, num - 1) var_ci_lower = var * (num - 1) / stats.chi2.ppf(1 - (1 - ci) / 2, num - 1) sigma = np.sqrt(var) sigma_ci_lower = np.sqrt(var_ci_lower) sigma_ci_upper = np.sqrt(var_ci_upper) return avg, sigma, [avg - h_int, avg + h_int], [sigma_ci_lower, sigma_ci_upper] def itr(n, p, t): itr = 0 if (p < 0 or 1 < p): raise ValueError('Accuracy need to be between 0 and 1.') elif (p < 1 / n): raise ValueError('ITR might be incorrect because accuracy < chance') itr = 0 elif (p == 1): itr = np.log2(n) * 60 / t else: itr = (np.log2(n) + p * np.log2(p) + (1 - p) * np.log2((1 - p) / (n - 1))) * 60 / t return itr def bandpass(eeg, sfreq, Wp, Ws): N, Wn = cheb1ord(Wp, Ws, 3, 40, fs=sfreq) B, A = cheby1(N, 0.5, Wn, btype="bandpass", fs=sfreq) y = filtfilt(B, A, eeg, axis=0, padtype='odd', padlen=3 * (max(len(B), len(A)) - 1)) return y
true
true
f716df4ee14d7c3327f654e758f30bd597015ed0
4,458
py
Python
book_code_selected_keras_examples/cifar/cifar10_cnn.py
IntuitionMachine/DeepLearningGuide
7270b13ee5783a23482738cdf9d355c10d25360d
[ "MIT" ]
1
2019-05-02T02:53:34.000Z
2019-05-02T02:53:34.000Z
book_code_selected_keras_examples/cifar/cifar10_cnn.py
IntuitionMachine/DeepLearningGuide
7270b13ee5783a23482738cdf9d355c10d25360d
[ "MIT" ]
null
null
null
book_code_selected_keras_examples/cifar/cifar10_cnn.py
IntuitionMachine/DeepLearningGuide
7270b13ee5783a23482738cdf9d355c10d25360d
[ "MIT" ]
null
null
null
'''Train a simple deep CNN on the CIFAR10 small images dataset. It gets to 75% validation accuracy in 25 epochs, and 79% after 50 epochs. (it's still underfitting at that point, though). ''' from __future__ import print_function from time import time import keras from keras.datasets import cifar10 from keras.preprocessing.image import ImageDataGenerator from keras.models import Sequential, load_model from keras.layers import Dense, Dropout, Activation, Flatten from keras.layers import Conv2D, MaxPooling2D from keras.callbacks import TensorBoard import os batch_size = 32 num_classes = 10 epochs = 10 data_augmentation = True num_predictions = 20 save_dir = os.path.join(os.getcwd(), 'saved_models') model_name = 'keras_cifar10_trained_model.h5' # The data, split between train and test sets: (x_train, y_train), (x_test, y_test) = cifar10.load_data() print('x_train shape:', x_train.shape) print(x_train.shape[0], 'train samples') print(x_test.shape[0], 'test samples') # Convert class vectors to binary class matrices. y_train = keras.utils.to_categorical(y_train, num_classes) y_test = keras.utils.to_categorical(y_test, num_classes) model = Sequential() model.add(Conv2D(32, (3, 3), padding='same', input_shape=x_train.shape[1:])) model.add(Activation('relu')) model.add(Conv2D(32, (3, 3))) model.add(Activation('relu')) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Dropout(0.25)) model.add(Conv2D(64, (3, 3), padding='same')) model.add(Activation('relu')) model.add(Conv2D(64, (3, 3))) model.add(Activation('relu')) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Dropout(0.25)) model.add(Flatten()) model.add(Dense(512)) model.add(Activation('relu')) model.add(Dropout(0.5)) model.add(Dense(num_classes)) model.add(Activation('softmax')) # Print a summary of the model architecture model.summary() # initiate RMSprop optimizer opt = keras.optimizers.rmsprop(lr=0.0001, decay=1e-6) # Let's train the model using RMSprop model.compile(loss='categorical_crossentropy', optimizer=opt, metrics=['accuracy']) x_train = x_train.astype('float32') x_test = x_test.astype('float32') x_train /= 255 x_test /= 255 tensorboard = TensorBoard(log_dir="/logs/{}".format(time()), write_graph=True, write_images=True) if not data_augmentation: print('Not using data augmentation.') model.fit(x_train, y_train, batch_size=batch_size, epochs=epochs, validation_data=(x_test, y_test), shuffle=True) else: print('Using real-time data augmentation.') # This will do preprocessing and realtime data augmentation: datagen = ImageDataGenerator( featurewise_center=False, # set input mean to 0 over the dataset samplewise_center=False, # set each sample mean to 0 featurewise_std_normalization=False, # divide inputs by std of the dataset samplewise_std_normalization=False, # divide each input by its std zca_whitening=False, # apply ZCA whitening rotation_range=0, # randomly rotate images in the range (degrees, 0 to 180) width_shift_range=0.1, # randomly shift images horizontally (fraction of total width) height_shift_range=0.1, # randomly shift images vertically (fraction of total height) horizontal_flip=True, # randomly flip images vertical_flip=False) # randomly flip images # Compute quantities required for feature-wise normalization # (std, mean, and principal components if ZCA whitening is applied). datagen.fit(x_train) # Fit the model on the batches generated by datagen.flow(). model.fit_generator(datagen.flow(x_train, y_train, batch_size=batch_size), epochs=epochs, validation_data=(x_test, y_test), workers=4) # Save model and weights if not os.path.isdir(save_dir): os.makedirs(save_dir) model_path = os.path.join(save_dir, model_name) model.save(model_path) print('Saved trained model at %s ' % model_path) # Score trained model. scores = model.evaluate(x_test, y_test, verbose=1) print('Test loss:', scores[0]) print('Test accuracy:', scores[1]) # Load the saved model and run it on some data model = load_model('saved_models/keras_cifar10_trained_model.h5') scores = model.predict(x_test, batch_size=1) print('Test loss:', scores[0]) print('Test accuracy:', scores[1])
33.772727
97
0.711306
from __future__ import print_function from time import time import keras from keras.datasets import cifar10 from keras.preprocessing.image import ImageDataGenerator from keras.models import Sequential, load_model from keras.layers import Dense, Dropout, Activation, Flatten from keras.layers import Conv2D, MaxPooling2D from keras.callbacks import TensorBoard import os batch_size = 32 num_classes = 10 epochs = 10 data_augmentation = True num_predictions = 20 save_dir = os.path.join(os.getcwd(), 'saved_models') model_name = 'keras_cifar10_trained_model.h5' (x_train, y_train), (x_test, y_test) = cifar10.load_data() print('x_train shape:', x_train.shape) print(x_train.shape[0], 'train samples') print(x_test.shape[0], 'test samples') y_train = keras.utils.to_categorical(y_train, num_classes) y_test = keras.utils.to_categorical(y_test, num_classes) model = Sequential() model.add(Conv2D(32, (3, 3), padding='same', input_shape=x_train.shape[1:])) model.add(Activation('relu')) model.add(Conv2D(32, (3, 3))) model.add(Activation('relu')) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Dropout(0.25)) model.add(Conv2D(64, (3, 3), padding='same')) model.add(Activation('relu')) model.add(Conv2D(64, (3, 3))) model.add(Activation('relu')) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Dropout(0.25)) model.add(Flatten()) model.add(Dense(512)) model.add(Activation('relu')) model.add(Dropout(0.5)) model.add(Dense(num_classes)) model.add(Activation('softmax')) model.summary() opt = keras.optimizers.rmsprop(lr=0.0001, decay=1e-6) model.compile(loss='categorical_crossentropy', optimizer=opt, metrics=['accuracy']) x_train = x_train.astype('float32') x_test = x_test.astype('float32') x_train /= 255 x_test /= 255 tensorboard = TensorBoard(log_dir="/logs/{}".format(time()), write_graph=True, write_images=True) if not data_augmentation: print('Not using data augmentation.') model.fit(x_train, y_train, batch_size=batch_size, epochs=epochs, validation_data=(x_test, y_test), shuffle=True) else: print('Using real-time data augmentation.') # This will do preprocessing and realtime data augmentation: datagen = ImageDataGenerator( featurewise_center=False, # set input mean to 0 over the dataset samplewise_center=False, # set each sample mean to 0 featurewise_std_normalization=False, # divide inputs by std of the dataset samplewise_std_normalization=False, # divide each input by its std zca_whitening=False, # apply ZCA whitening rotation_range=0, # randomly rotate images in the range (degrees, 0 to 180) width_shift_range=0.1, # randomly shift images horizontally (fraction of total width) height_shift_range=0.1, # randomly shift images vertically (fraction of total height) horizontal_flip=True, # randomly flip images vertical_flip=False) # randomly flip images # Compute quantities required for feature-wise normalization # (std, mean, and principal components if ZCA whitening is applied). datagen.fit(x_train) # Fit the model on the batches generated by datagen.flow(). model.fit_generator(datagen.flow(x_train, y_train, batch_size=batch_size), epochs=epochs, validation_data=(x_test, y_test), workers=4) # Save model and weights if not os.path.isdir(save_dir): os.makedirs(save_dir) model_path = os.path.join(save_dir, model_name) model.save(model_path) print('Saved trained model at %s ' % model_path) # Score trained model. scores = model.evaluate(x_test, y_test, verbose=1) print('Test loss:', scores[0]) print('Test accuracy:', scores[1]) # Load the saved model and run it on some data model = load_model('saved_models/keras_cifar10_trained_model.h5') scores = model.predict(x_test, batch_size=1) print('Test loss:', scores[0]) print('Test accuracy:', scores[1])
true
true
f716dfa33ddc3edf22a30911544807cfce14bd8a
247,289
py
Python
tensorflow/python/framework/ops.py
jraman/tensorflow
41c6bf7c6215bea9bfb9bf0a9b63f2084e6f3058
[ "Apache-2.0" ]
1
2020-10-01T16:52:51.000Z
2020-10-01T16:52:51.000Z
tensorflow/python/framework/ops.py
rakeshacharya-d/tensorflow
9028828d3b8a2a622f7203a317002cc749531695
[ "Apache-2.0" ]
1
2022-02-10T01:08:48.000Z
2022-02-10T01:08:48.000Z
tensorflow/python/framework/ops.py
rakeshacharya-d/tensorflow
9028828d3b8a2a622f7203a317002cc749531695
[ "Apache-2.0" ]
null
null
null
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Classes and functions used to construct graphs.""" # pylint: disable=g-bad-name from __future__ import absolute_import from __future__ import division from __future__ import print_function import collections import re import sys import threading import types import numpy as np import six from six.moves import map # pylint: disable=redefined-builtin from six.moves import xrange # pylint: disable=redefined-builtin from tensorflow.core.framework import attr_value_pb2 from tensorflow.core.framework import function_pb2 from tensorflow.core.framework import graph_pb2 from tensorflow.core.framework import node_def_pb2 from tensorflow.core.framework import op_def_pb2 from tensorflow.core.framework import versions_pb2 from tensorflow.core.protobuf import config_pb2 # pywrap_tensorflow must be imported first to avoid profobuf issues. # (b/143110113) # pylint: disable=invalid-import-order,g-bad-import-order,unused-import from tensorflow.python import pywrap_tensorflow from tensorflow.python import pywrap_tfe # pylint: enable=invalid-import-order,g-bad-import-order,unused-import from tensorflow.python import tf2 from tensorflow.python.client import pywrap_tf_session from tensorflow.python.eager import context from tensorflow.python.eager import core from tensorflow.python.eager import monitoring from tensorflow.python.eager import tape from tensorflow.python.framework import c_api_util from tensorflow.python.framework import composite_tensor from tensorflow.python.framework import device as pydev from tensorflow.python.framework import dtypes from tensorflow.python.framework import errors from tensorflow.python.framework import indexed_slices from tensorflow.python.framework import registry from tensorflow.python.framework import tensor_conversion_registry from tensorflow.python.framework import tensor_like from tensorflow.python.framework import tensor_shape from tensorflow.python.framework import traceable_stack from tensorflow.python.framework import versions from tensorflow.python.ops import control_flow_util from tensorflow.python.platform import app from tensorflow.python.platform import tf_logging as logging from tensorflow.python.util import compat from tensorflow.python.util import decorator_utils from tensorflow.python.util import deprecation from tensorflow.python.util import function_utils from tensorflow.python.util import lock_util from tensorflow.python.util import memory from tensorflow.python.util import object_identity from tensorflow.python.util import tf_contextlib from tensorflow.python.util import tf_stack from tensorflow.python.util.compat import collections_abc from tensorflow.python.util.deprecation import deprecated_args from tensorflow.python.util.lazy_loader import LazyLoader from tensorflow.python.util.tf_export import kwarg_only from tensorflow.python.util.tf_export import tf_export ag_ctx = LazyLoader( "ag_ctx", globals(), "tensorflow.python.autograph.core.ag_ctx") # Temporary global switches determining if we should enable the work-in-progress # calls to the C API. These will be removed once all functionality is supported. _USE_C_API = True _USE_C_SHAPES = True _api_usage_gauge = monitoring.BoolGauge( "/tensorflow/api/ops_eager_execution", "Whether ops.enable_eager_execution() is called.") # pylint: disable=protected-access _TensorLike = tensor_like._TensorLike _DTYPES_INTERN_TABLE = dtypes._INTERN_TABLE # pylint: enable=protected-access def tensor_id(tensor): """Returns a unique identifier for this Tensor.""" return tensor._id # pylint: disable=protected-access class _UserDeviceSpec(object): """Store user-specified device and provide computation of merged device.""" def __init__(self, device_name_or_function): self._device_name_or_function = device_name_or_function self.display_name = str(self._device_name_or_function) self.function = device_name_or_function self.raw_string = None if isinstance(device_name_or_function, pydev.MergeDevice): self.is_null_merge = device_name_or_function.is_null_merge elif callable(device_name_or_function): self.is_null_merge = False dev_func = self._device_name_or_function func_name = function_utils.get_func_name(dev_func) func_code = function_utils.get_func_code(dev_func) if func_code: fname = func_code.co_filename lineno = func_code.co_firstlineno else: fname = "unknown" lineno = -1 self.display_name = "%s<%s, %d>" % (func_name, fname, lineno) elif device_name_or_function is None: # NOTE(taylorrobie): This MUST be False. None signals a break in the # device stack, so `is_null_merge` must be False for such a case to # allow callers to safely skip over null merges without missing a None. self.is_null_merge = False else: self.raw_string = device_name_or_function self.function = pydev.merge_device(device_name_or_function) self.is_null_merge = self.function.is_null_merge # We perform this check in __init__ because it is of non-trivial cost, # and self.string_merge is typically called many times. self.fast_string_merge = isinstance(self.function, pydev.MergeDevice) def string_merge(self, node_def): if self.fast_string_merge: return self.function.shortcut_string_merge(node_def) return compat.as_str(_device_string(self.function(node_def))) class NullContextmanager(object): def __init__(self, *args, **kwargs): pass def __enter__(self): pass def __exit__(self, type_arg, value_arg, traceback_arg): return False # False values do not suppress exceptions def _override_helper(clazz_object, operator, func): """Overrides (string) operator on Tensors to call func. Args: clazz_object: the class to override for; either Tensor or SparseTensor. operator: the string name of the operator to override. func: the function that replaces the overridden operator. Raises: ValueError: If operator has already been overwritten, or if operator is not allowed to be overwritten. """ existing = getattr(clazz_object, operator, None) if existing is not None: # Check to see if this is a default method-wrapper or slot wrapper which # will be true for the comparison operators. if not isinstance(existing, type(object.__lt__)): raise ValueError("operator %s cannot be overwritten again on class %s." % (operator, clazz_object)) if operator not in Tensor.OVERLOADABLE_OPERATORS: raise ValueError("Overriding %s is disallowed" % operator) setattr(clazz_object, operator, func) def _as_graph_element(obj): """Convert `obj` to a graph element if possible, otherwise return `None`. Args: obj: Object to convert. Returns: The result of `obj._as_graph_element()` if that method is available; otherwise `None`. """ conv_fn = getattr(obj, "_as_graph_element", None) if conv_fn and callable(conv_fn): return conv_fn() return None _TENSOR_LIKE_TYPES = tuple() def is_dense_tensor_like(t): """EXPERIMENTAL: Returns true if `t` implements the tensor interface. See `register_dense_tensor_like_type()` for the current definition of a "tensor-like type". Args: t: An object. Returns: True iff `t` is an instance of one of the registered "tensor-like" types. """ return isinstance(t, _TENSOR_LIKE_TYPES) def register_dense_tensor_like_type(tensor_type): """EXPERIMENTAL: Registers `tensor_type` as implementing the tensor interface. A "tensor-like type" can represent a single dense tensor, and implements the `name`, `dtype` and `shape` properties. Args: tensor_type: A type implementing the tensor interface. Raises: TypeError: If `tensor_type` does not implement the tensor interface. """ if not (hasattr(tensor_type, "name") and isinstance(tensor_type.name, property)): raise TypeError("Type %s does not define a `name` property" % tensor_type.__name__) if not (hasattr(tensor_type, "dtype") and isinstance(tensor_type.dtype, property)): raise TypeError("Type %s does not define a `dtype` property" % tensor_type.__name__) if not (hasattr(tensor_type, "shape") and isinstance(tensor_type.shape, property)): raise TypeError("Type %s does not define a `shape` property" % tensor_type.__name__) # We expect this list to be small, so choose quadratic complexity # for registration, so that we have a tuple that can be used for # more efficient `isinstance` checks later. global _TENSOR_LIKE_TYPES _TENSOR_LIKE_TYPES = tuple(list(_TENSOR_LIKE_TYPES) + [tensor_type]) def uid(): """A unique (within this program execution) integer.""" return pywrap_tfe.TFE_Py_UID() def numpy_text(tensor, is_repr=False): """Human readable representation of a tensor's numpy value.""" if tensor.dtype.is_numpy_compatible: # pylint: disable=protected-access text = repr(tensor._numpy()) if is_repr else str(tensor._numpy()) # pylint: enable=protected-access else: text = "<unprintable>" if "\n" in text: text = "\n" + text return text @tf_export(v1=["enable_tensor_equality"]) def enable_tensor_equality(): """Compare Tensors with element-wise comparison and thus be unhashable. Comparing tensors with element-wise allows comparisons such as tf.Variable(1.0) == 1.0. Element-wise equality implies that tensors are unhashable. Thus tensors can no longer be directly used in sets or as a key in a dictionary. """ Tensor._USE_EQUALITY = True # pylint: disable=protected-access @tf_export(v1=["disable_tensor_equality"]) def disable_tensor_equality(): """Compare Tensors by their id and be hashable. This is a legacy behaviour of TensorFlow and is highly discouraged. """ Tensor._USE_EQUALITY = False # pylint: disable=protected-access @tf_export("Tensor") class Tensor(_TensorLike): """A tensor represents a rectangular array of data. When writing a TensorFlow program, the main object you manipulate and pass around is the `tf.Tensor`. A `tf.Tensor` object represents a rectangular array of arbitrary dimension, filled with data of a specific data type. A `tf.Tensor` has the following properties: * a data type (float32, int32, or string, for example) * a shape Each element in the Tensor has the same data type, and the data type is always known. In eager execution, which is the default mode in TensorFlow, results are calculated immediately. >>> # Compute some values using a Tensor >>> c = tf.constant([[1.0, 2.0], [3.0, 4.0]]) >>> d = tf.constant([[1.0, 1.0], [0.0, 1.0]]) >>> e = tf.matmul(c, d) >>> print(e) tf.Tensor( [[1. 3.] [3. 7.]], shape=(2, 2), dtype=float32) Note that during eager execution, you may discover your `Tensors` are actually of type `EagerTensor`. This is an internal detail, but it does give you access to a useful function, `numpy`: >>> type(e) <class '...ops.EagerTensor'> >>> print(e.numpy()) [[1. 3.] [3. 7.]] TensorFlow can define computations without immediately executing them, most commonly inside `tf.function`s, as well as in (legacy) Graph mode. In those cases, the shape (that is, the rank of the Tensor and the size of each dimension) might be only partially known. Most operations produce tensors of fully-known shapes if the shapes of their inputs are also fully known, but in some cases it's only possible to find the shape of a tensor at execution time. There are specialized tensors; for these, see `tf.Variable`, `tf.constant`, `tf.placeholder`, `tf.SparseTensor`, and `tf.RaggedTensor`. For more on Tensors, see the [guide](https://tensorflow.org/guide/tensor`). """ # List of Python operators that we allow to override. OVERLOADABLE_OPERATORS = { # Binary. "__add__", "__radd__", "__sub__", "__rsub__", "__mul__", "__rmul__", "__div__", "__rdiv__", "__truediv__", "__rtruediv__", "__floordiv__", "__rfloordiv__", "__mod__", "__rmod__", "__lt__", "__le__", "__gt__", "__ge__", "__ne__", "__eq__", "__and__", "__rand__", "__or__", "__ror__", "__xor__", "__rxor__", "__getitem__", "__pow__", "__rpow__", # Unary. "__invert__", "__neg__", "__abs__", "__matmul__", "__rmatmul__" } # Whether to allow hashing or numpy-style equality _USE_EQUALITY = tf2.enabled() def __init__(self, op, value_index, dtype): """Creates a new `Tensor`. Args: op: An `Operation`. `Operation` that computes this tensor. value_index: An `int`. Index of the operation's endpoint that produces this tensor. dtype: A `DType`. Type of elements stored in this tensor. Raises: TypeError: If the op is not an `Operation`. """ if not isinstance(op, Operation): raise TypeError("op needs to be an Operation: %s" % op) self._op = op self._value_index = value_index self._dtype = dtypes.as_dtype(dtype) # This will be set by self._as_tf_output(). self._tf_output = None # This will be set by self.shape(). self._shape_val = None # List of operations that use this Tensor as input. We maintain this list # to easily navigate a computation graph. self._consumers = [] self._id = uid() self._name = None @staticmethod def _create_with_tf_output(op, value_index, dtype, tf_output): ret = Tensor(op, value_index, dtype) ret._tf_output = tf_output return ret @property def op(self): """The `Operation` that produces this tensor as an output.""" return self._op @property def dtype(self): """The `DType` of elements in this tensor.""" return self._dtype @property def graph(self): """The `Graph` that contains this tensor.""" return self._op.graph @property def name(self): """The string name of this tensor.""" if self._name is None: if not self._op.name: raise ValueError("Operation was not named: %s" % self._op) self._name = "%s:%d" % (self._op.name, self._value_index) return self._name @property def device(self): """The name of the device on which this tensor will be produced, or None.""" return self._op.device @property def shape(self): """Returns the `TensorShape` that represents the shape of this tensor. The shape is computed using shape inference functions that are registered in the Op for each `Operation`. See `tf.TensorShape` for more details of what a shape represents. The inferred shape of a tensor is used to provide shape information without having to execute the underlying kernel. This can be used for debugging and providing early error messages. For example: ```python >>> c = tf.constant([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]) >>> print(c.shape) # will be TensorShape([2, 3]) (2, 3) >>> d = tf.constant([[1.0, 0.0], [0.0, 1.0], [1.0, 0.0], [0.0, 1.0]]) >>> print(d.shape) (4, 2) # Raises a ValueError, because `c` and `d` do not have compatible # inner dimensions. >>> e = tf.matmul(c, d) Traceback (most recent call last): ... tensorflow.python.framework.errors_impl.InvalidArgumentError: Matrix size-incompatible: In[0]: [2,3], In[1]: [4,2] [Op:MatMul] name: MatMul/ # This works because we have compatible shapes. >>> f = tf.matmul(c, d, transpose_a=True, transpose_b=True) >>> print(f.shape) (3, 4) ``` In some cases, the inferred shape may have unknown dimensions. If the caller has additional information about the values of these dimensions, `Tensor.set_shape()` can be used to augment the inferred shape. Returns: A `tf.TensorShape` representing the shape of this tensor. """ if self._shape_val is None: self._shape_val = self._c_api_shape() return self._shape_val def _c_api_shape(self): """Returns the TensorShape of this tensor according to the C API.""" c_graph = self._op._graph._c_graph # pylint: disable=protected-access shape_vec, unknown_shape = pywrap_tf_session.TF_GraphGetTensorShapeHelper( c_graph, self._as_tf_output()) if unknown_shape: return tensor_shape.unknown_shape() else: shape_vec = [None if d == -1 else d for d in shape_vec] return tensor_shape.TensorShape(shape_vec) @property def _shape(self): logging.warning("Tensor._shape is private, use Tensor.shape " "instead. Tensor._shape will eventually be removed.") return self.shape @_shape.setter def _shape(self, value): raise ValueError( "Tensor._shape cannot be assigned, use Tensor.set_shape instead.") def _disallow_when_autograph_disabled(self, task): raise errors.OperatorNotAllowedInGraphError( "{} is not allowed: AutoGraph is disabled in this function." " Try decorating it directly with @tf.function.".format(task)) def _disallow_when_autograph_enabled(self, task): raise errors.OperatorNotAllowedInGraphError( "{} is not allowed: AutoGraph did not convert this function. Try" " decorating it directly with @tf.function.".format(task)) def _disallow_in_graph_mode(self, task): raise errors.OperatorNotAllowedInGraphError( "{} is not allowed in Graph execution. Use Eager execution or decorate" " this function with @tf.function.".format(task)) def _disallow_bool_casting(self): if ag_ctx.control_status_ctx().status == ag_ctx.Status.DISABLED: self._disallow_when_autograph_disabled( "using a `tf.Tensor` as a Python `bool`") elif ag_ctx.control_status_ctx().status == ag_ctx.Status.ENABLED: self._disallow_when_autograph_enabled( "using a `tf.Tensor` as a Python `bool`") else: # Default: V1-style Graph execution. self._disallow_in_graph_mode("using a `tf.Tensor` as a Python `bool`") def _disallow_iteration(self): if ag_ctx.control_status_ctx().status == ag_ctx.Status.DISABLED: self._disallow_when_autograph_disabled("iterating over `tf.Tensor`") elif ag_ctx.control_status_ctx().status == ag_ctx.Status.ENABLED: self._disallow_when_autograph_enabled("iterating over `tf.Tensor`") else: # Default: V1-style Graph execution. self._disallow_in_graph_mode("iterating over `tf.Tensor`") def __iter__(self): if not context.executing_eagerly(): self._disallow_iteration() shape = self._shape_tuple() if shape is None: raise TypeError("Cannot iterate over a tensor with unknown shape.") if not shape: raise TypeError("Cannot iterate over a scalar tensor.") if shape[0] is None: raise TypeError( "Cannot iterate over a tensor with unknown first dimension.") return _TensorIterator(self, shape[0]) def _shape_as_list(self): if self.shape.ndims is not None: return [dim.value for dim in self.shape.dims] else: return None def _shape_tuple(self): shape = self._shape_as_list() if shape is None: return None return tuple(shape) def _rank(self): """Integer rank of this Tensor, if known, else None. Returns: Integer rank or None """ return self.shape.ndims def get_shape(self): """Alias of `tf.Tensor.shape`.""" return self.shape def set_shape(self, shape): """Updates the shape of this tensor. This method can be called multiple times, and will merge the given `shape` with the current shape of this tensor. It can be used to provide additional information about the shape of this tensor that cannot be inferred from the graph alone. For example, this can be used to provide additional information about the shapes of images: ```python _, image_data = tf.compat.v1.TFRecordReader(...).read(...) image = tf.image.decode_png(image_data, channels=3) # The height and width dimensions of `image` are data dependent, and # cannot be computed without executing the op. print(image.shape) ==> TensorShape([Dimension(None), Dimension(None), Dimension(3)]) # We know that each image in this dataset is 28 x 28 pixels. image.set_shape([28, 28, 3]) print(image.shape) ==> TensorShape([Dimension(28), Dimension(28), Dimension(3)]) ``` NOTE: This shape is not enforced at runtime. Setting incorrect shapes can result in inconsistencies between the statically-known graph and the runtime value of tensors. For runtime validation of the shape, use `tf.ensure_shape` instead. Args: shape: A `TensorShape` representing the shape of this tensor, a `TensorShapeProto`, a list, a tuple, or None. Raises: ValueError: If `shape` is not compatible with the current shape of this tensor. """ # Reset cached shape. self._shape_val = None # We want set_shape to be reflected in the C API graph for when we run it. if not isinstance(shape, tensor_shape.TensorShape): shape = tensor_shape.TensorShape(shape) dim_list = [] if shape.dims is None: unknown_shape = True else: unknown_shape = False for dim in shape.dims: if dim.value is None: dim_list.append(-1) else: dim_list.append(dim.value) try: pywrap_tf_session.TF_GraphSetTensorShape_wrapper( self._op._graph._c_graph, # pylint: disable=protected-access self._as_tf_output(), dim_list, unknown_shape) except errors.InvalidArgumentError as e: # Convert to ValueError for backwards compatibility. raise ValueError(str(e)) @property def value_index(self): """The index of this tensor in the outputs of its `Operation`.""" return self._value_index def consumers(self): """Returns a list of `Operation`s that consume this tensor. Returns: A list of `Operation`s. """ consumer_names = pywrap_tf_session.TF_OperationOutputConsumers_wrapper( self._as_tf_output()) # pylint: disable=protected-access return [ self.graph._get_operation_by_name_unsafe(name) for name in consumer_names ] # pylint: enable=protected-access def _as_node_def_input(self): """Return a value to use for the NodeDef "input" attribute. The returned string can be used in a NodeDef "input" attribute to indicate that the NodeDef uses this Tensor as input. Raises: ValueError: if this Tensor's Operation does not have a name. Returns: a string. """ if not self._op.name: raise ValueError("Operation was not named: %s" % self._op) if self._value_index == 0: return self._op.name else: return "%s:%d" % (self._op.name, self._value_index) def _as_tf_output(self): # pylint: disable=protected-access # NOTE: Beyond preventing unnecessary (re-)allocation, the cached object # also guarantees that a dictionary of tf_output objects will retain a # deterministic (yet unsorted) order which prevents memory blowup in the # cache of executor(s) stored for every session. if self._tf_output is None: self._tf_output = c_api_util.tf_output(self.op._c_op, self.value_index) return self._tf_output # pylint: enable=protected-access def __str__(self): return "Tensor(\"%s\"%s%s%s)" % ( self.name, (", shape=%s" % self.get_shape()) if self.get_shape().ndims is not None else "", (", dtype=%s" % self._dtype.name) if self._dtype else "", (", device=%s" % self.device) if self.device else "") def __repr__(self): return "<tf.Tensor '%s' shape=%s dtype=%s>" % (self.name, self.get_shape(), self._dtype.name) def __hash__(self): g = getattr(self, "graph", None) if (Tensor._USE_EQUALITY and executing_eagerly_outside_functions() and (g is None or g.building_function)): raise TypeError("Tensor is unhashable. " "Instead, use tensor.ref() as the key.") else: return id(self) def __copy__(self): # TODO(b/77597810): get rid of Tensor copies. cls = self.__class__ result = cls.__new__(cls) result.__dict__.update(self.__dict__) return result # NOTE(mrry): This enables the Tensor's overloaded "right" binary # operators to run when the left operand is an ndarray, because it # accords the Tensor class higher priority than an ndarray, or a # numpy matrix. # TODO(mrry): Convert this to using numpy's __numpy_ufunc__ # mechanism, which allows more control over how Tensors interact # with ndarrays. __array_priority__ = 100 def __array__(self): raise NotImplementedError("Cannot convert a symbolic Tensor ({}) to a numpy" " array.".format(self.name)) def __len__(self): raise TypeError("len is not well defined for symbolic Tensors. ({}) " "Please call `x.shape` rather than `len(x)` for " "shape information.".format(self.name)) @staticmethod def _override_operator(operator, func): _override_helper(Tensor, operator, func) def __bool__(self): """Dummy method to prevent a tensor from being used as a Python `bool`. This overload raises a `TypeError` when the user inadvertently treats a `Tensor` as a boolean (most commonly in an `if` or `while` statement), in code that was not converted by AutoGraph. For example: ```python if tf.constant(True): # Will raise. # ... if tf.constant(5) < tf.constant(7): # Will raise. # ... ``` Raises: `TypeError`. """ self._disallow_bool_casting() def __nonzero__(self): """Dummy method to prevent a tensor from being used as a Python `bool`. This is the Python 2.x counterpart to `__bool__()` above. Raises: `TypeError`. """ self._disallow_bool_casting() def eval(self, feed_dict=None, session=None): """Evaluates this tensor in a `Session`. Note: If you are not using `compat.v1` libraries, you should not need this, (or `feed_dict` or `Session`). In eager execution (or within `tf.function`) you do not need to call `eval`. Calling this method will execute all preceding operations that produce the inputs needed for the operation that produces this tensor. *N.B.* Before invoking `Tensor.eval()`, its graph must have been launched in a session, and either a default session must be available, or `session` must be specified explicitly. Args: feed_dict: A dictionary that maps `Tensor` objects to feed values. See `tf.Session.run` for a description of the valid feed values. session: (Optional.) The `Session` to be used to evaluate this tensor. If none, the default session will be used. Returns: A numpy array corresponding to the value of this tensor. """ return _eval_using_default_session(self, feed_dict, self.graph, session) @deprecation.deprecated(None, "Use ref() instead.") def experimental_ref(self): return self.ref() def ref(self): # tf.Variable also has the same ref() API. If you update the # documentation here, please update tf.Variable.ref() as well. """Returns a hashable reference object to this Tensor. The primary use case for this API is to put tensors in a set/dictionary. We can't put tensors in a set/dictionary as `tensor.__hash__()` is no longer available starting Tensorflow 2.0. The following will raise an exception starting 2.0 >>> x = tf.constant(5) >>> y = tf.constant(10) >>> z = tf.constant(10) >>> tensor_set = {x, y, z} Traceback (most recent call last): ... TypeError: Tensor is unhashable. Instead, use tensor.ref() as the key. >>> tensor_dict = {x: 'five', y: 'ten'} Traceback (most recent call last): ... TypeError: Tensor is unhashable. Instead, use tensor.ref() as the key. Instead, we can use `tensor.ref()`. >>> tensor_set = {x.ref(), y.ref(), z.ref()} >>> x.ref() in tensor_set True >>> tensor_dict = {x.ref(): 'five', y.ref(): 'ten', z.ref(): 'ten'} >>> tensor_dict[y.ref()] 'ten' Also, the reference object provides `.deref()` function that returns the original Tensor. >>> x = tf.constant(5) >>> x.ref().deref() <tf.Tensor: shape=(), dtype=int32, numpy=5> """ return object_identity.Reference(self) # TODO(agarwal): consider getting rid of this. class _EagerTensorBase(Tensor): """Base class for EagerTensor.""" # __complex__, __int__, __float__ and __index__ may copy the tensor to CPU and # only work for scalars; values are cast as per numpy. def __complex__(self): return complex(self._numpy()) def __int__(self): return int(self._numpy()) def __long__(self): return long(self._numpy()) def __float__(self): return float(self._numpy()) def __index__(self): return self._numpy().__index__() def __bool__(self): return bool(self._numpy()) __nonzero__ = __bool__ def __format__(self, format_spec): return self._numpy().__format__(format_spec) def __reduce__(self): return convert_to_tensor, (self._numpy(),) def __copy__(self): # Eager Tensors are immutable so it's safe to return themselves as a copy. return self def __deepcopy__(self, memo): # Eager Tensors are immutable so it's safe to return themselves as a copy. del memo return self def __str__(self): return "tf.Tensor(%s, shape=%s, dtype=%s)" % (numpy_text(self), self.shape, self.dtype.name) def __repr__(self): return "<tf.Tensor: shape=%s, dtype=%s, numpy=%s>" % ( self.shape, self.dtype.name, numpy_text(self, is_repr=True)) def __len__(self): """Returns the length of the first dimension in the Tensor.""" if not self.shape.ndims: raise TypeError("Scalar tensor has no `len()`") # pylint: disable=protected-access try: return self._shape_tuple()[0] except core._NotOkStatusException as e: six.raise_from(core._status_to_exception(e.code, e.message), None) def _numpy_internal(self): raise NotImplementedError() def _numpy(self): # pylint: disable=protected-access try: return self._numpy_internal() except core._NotOkStatusException as e: six.raise_from(core._status_to_exception(e.code, e.message), None) @property def dtype(self): # Note: using the intern table directly here as this is # performance-sensitive in some models. return dtypes._INTERN_TABLE[self._datatype_enum()] # pylint: disable=protected-access def numpy(self): """Copy of the contents of this Tensor into a NumPy array or scalar. Unlike NumPy arrays, Tensors are immutable, so this method has to copy the contents to ensure safety. Use `memoryview` to get a readonly view of the contents without doing a copy: >>> t = tf.constant([42]) >>> np.array(memoryview(t)) array([42], dtype=int32) Note that `memoryview` is only zero-copy for Tensors on CPU. If a Tensor is on GPU, it will have to be transferred to CPU first in order for `memoryview` to work. Returns: A NumPy array of the same shape and dtype or a NumPy scalar, if this Tensor has rank 0. Raises: ValueError: If the dtype of this Tensor does not have a compatible NumPy dtype. """ # TODO(slebedev): Consider avoiding a copy for non-CPU or remote tensors. maybe_arr = self._numpy() # pylint: disable=protected-access return maybe_arr.copy() if isinstance(maybe_arr, np.ndarray) else maybe_arr @property def backing_device(self): """Returns the name of the device holding this tensor's memory. `.backing_device` is usually the same as `.device`, which returns the device on which the kernel of the operation that produced this tensor ran. However, some operations can produce tensors on a different device (e.g., an operation that executes on the GPU but produces output tensors in host memory). """ raise NotImplementedError() def _datatype_enum(self): raise NotImplementedError() def _shape_tuple(self): """The shape of this Tensor, as a tuple. This is more performant than tuple(shape().as_list()) as it avoids two list and one object creation. Marked private for now as from an API perspective, it would be better to have a single performant way of getting a shape rather than exposing shape() and shape_tuple() (and heaven forbid, shape_list() etc. as well!). Punting on that for now, but ideally one would work things out and remove the need for this method. Returns: tuple with the shape. """ raise NotImplementedError() def _rank(self): """Integer rank of this Tensor. Unlike regular Tensors, the rank is always known for EagerTensors. This is more performant than len(self._shape_tuple()) Returns: Integer rank """ raise NotImplementedError() def _num_elements(self): """Number of elements of this Tensor. Unlike regular Tensors, the number of elements is always known for EagerTensors. This is more performant than tensor.shape.num_elements Returns: Long - num elements in the tensor """ raise NotImplementedError() def _copy_to_device(self, device_name): # pylint: disable=redefined-outer-name raise NotImplementedError() @staticmethod def _override_operator(name, func): setattr(_EagerTensorBase, name, func) def _copy_nograd(self, ctx=None, device_name=None): """Copies tensor to dest device, but doesn't record the operation.""" # Creates a new tensor on the dest device. if ctx is None: ctx = context.context() if device_name is None: device_name = ctx.device_name # pylint: disable=protected-access try: ctx.ensure_initialized() new_tensor = self._copy_to_device(device_name) except core._NotOkStatusException as e: six.raise_from(core._status_to_exception(e.code, e.message), None) return new_tensor def _copy(self, ctx=None, device_name=None): """Copies tensor to dest device.""" new_tensor = self._copy_nograd(ctx, device_name) # Record the copy on tape and define backprop copy as well. if context.executing_eagerly(): self_device = self.device def grad_fun(dresult): return [ dresult._copy(device_name=self_device) if hasattr(dresult, "_copy") else dresult ] tape.record_operation("_copy", [new_tensor], [self], grad_fun) return new_tensor # pylint: enable=protected-access @property def shape(self): if self._tensor_shape is None: # pylint: disable=access-member-before-definition # pylint: disable=protected-access try: # `_tensor_shape` is declared and defined in the definition of # `EagerTensor`, in C. self._tensor_shape = tensor_shape.TensorShape(self._shape_tuple()) except core._NotOkStatusException as e: six.raise_from(core._status_to_exception(e.code, e.message), None) return self._tensor_shape def get_shape(self): """Alias of Tensor.shape.""" return self.shape def _shape_as_list(self): """The shape of the tensor as a list.""" return list(self._shape_tuple()) @property def ndim(self): """Returns the number of Tensor dimensions.""" return self.shape.ndims @deprecation.deprecated(None, "Use tf.identity instead.") def cpu(self): """A copy of this Tensor with contents backed by host memory.""" return self._copy(context.context(), "CPU:0") @deprecation.deprecated(None, "Use tf.identity instead.") def gpu(self, gpu_index=0): """A copy of this Tensor with contents backed by memory on the GPU. Arguments: gpu_index: Identifies which GPU to place the contents on the returned Tensor in. Returns: A GPU-memory backed Tensor object initialized with the same contents as this Tensor. """ return self._copy(context.context(), "GPU:" + str(gpu_index)) def set_shape(self, shape): if not self.shape.is_compatible_with(shape): raise ValueError( "Tensor's shape %s is not compatible with supplied shape %s" % (self.shape, shape)) # Methods not supported / implemented for Eager Tensors. @property def op(self): raise AttributeError( "Tensor.op is meaningless when eager execution is enabled.") @property def graph(self): raise AttributeError( "Tensor.graph is meaningless when eager execution is enabled.") @property def name(self): raise AttributeError( "Tensor.name is meaningless when eager execution is enabled.") @property def value_index(self): raise AttributeError( "Tensor.value_index is meaningless when eager execution is enabled.") def consumers(self): raise NotImplementedError( "Tensor.consumers is meaningless when eager execution is enabled.") def _add_consumer(self, consumer): raise NotImplementedError( "_add_consumer not supported when eager execution is enabled.") def _as_node_def_input(self): raise NotImplementedError( "_as_node_def_input not supported when eager execution is enabled.") def _as_tf_output(self): raise NotImplementedError( "_as_tf_output not supported when eager execution is enabled.") def eval(self, feed_dict=None, session=None): raise NotImplementedError( "eval is not supported when eager execution is enabled, " "is .numpy() what you're looking for?") # This call creates an EagerTensor class, as a subclass of _EagerTensorBase, and # registers it with the current module. EagerTensor = pywrap_tfe.TFE_Py_InitEagerTensor(_EagerTensorBase) register_dense_tensor_like_type(Tensor) @tf_export(v1=["convert_to_tensor"]) def convert_to_tensor_v1(value, dtype=None, name=None, preferred_dtype=None, dtype_hint=None): """Converts the given `value` to a `Tensor`. This function converts Python objects of various types to `Tensor` objects. It accepts `Tensor` objects, numpy arrays, Python lists, and Python scalars. For example: ```python import numpy as np def my_func(arg): arg = tf.convert_to_tensor(arg, dtype=tf.float32) return tf.matmul(arg, arg) + arg # The following calls are equivalent. value_1 = my_func(tf.constant([[1.0, 2.0], [3.0, 4.0]])) value_2 = my_func([[1.0, 2.0], [3.0, 4.0]]) value_3 = my_func(np.array([[1.0, 2.0], [3.0, 4.0]], dtype=np.float32)) ``` This function can be useful when composing a new operation in Python (such as `my_func` in the example above). All standard Python op constructors apply this function to each of their Tensor-valued inputs, which allows those ops to accept numpy arrays, Python lists, and scalars in addition to `Tensor` objects. Note: This function diverges from default Numpy behavior for `float` and `string` types when `None` is present in a Python list or scalar. Rather than silently converting `None` values, an error will be thrown. Args: value: An object whose type has a registered `Tensor` conversion function. dtype: Optional element type for the returned tensor. If missing, the type is inferred from the type of `value`. name: Optional name to use if a new `Tensor` is created. preferred_dtype: Optional element type for the returned tensor, used when dtype is None. In some cases, a caller may not have a dtype in mind when converting to a tensor, so preferred_dtype can be used as a soft preference. If the conversion to `preferred_dtype` is not possible, this argument has no effect. dtype_hint: same meaning as preferred_dtype, and overrides it. Returns: A `Tensor` based on `value`. Raises: TypeError: If no conversion function is registered for `value` to `dtype`. RuntimeError: If a registered conversion function returns an invalid value. ValueError: If the `value` is a tensor not of given `dtype` in graph mode. """ preferred_dtype = deprecation.deprecated_argument_lookup( "dtype_hint", dtype_hint, "preferred_dtype", preferred_dtype) return convert_to_tensor_v2(value, dtype, preferred_dtype, name) @tf_export("convert_to_tensor", v1=[]) def convert_to_tensor_v2(value, dtype=None, dtype_hint=None, name=None): """Converts the given `value` to a `Tensor`. This function converts Python objects of various types to `Tensor` objects. It accepts `Tensor` objects, numpy arrays, Python lists, and Python scalars. For example: >>> def my_func(arg): ... arg = tf.convert_to_tensor(arg, dtype=tf.float32) ... return arg >>> # The following calls are equivalent. >>> value_1 = my_func(tf.constant([[1.0, 2.0], [3.0, 4.0]])) >>> print(value_1) tf.Tensor( [[1. 2.] [3. 4.]], shape=(2, 2), dtype=float32) >>> value_2 = my_func([[1.0, 2.0], [3.0, 4.0]]) >>> print(value_2) tf.Tensor( [[1. 2.] [3. 4.]], shape=(2, 2), dtype=float32) >>> value_3 = my_func(np.array([[1.0, 2.0], [3.0, 4.0]], dtype=np.float32)) >>> print(value_3) tf.Tensor( [[1. 2.] [3. 4.]], shape=(2, 2), dtype=float32) This function can be useful when composing a new operation in Python (such as `my_func` in the example above). All standard Python op constructors apply this function to each of their Tensor-valued inputs, which allows those ops to accept numpy arrays, Python lists, and scalars in addition to `Tensor` objects. Note: This function diverges from default Numpy behavior for `float` and `string` types when `None` is present in a Python list or scalar. Rather than silently converting `None` values, an error will be thrown. Args: value: An object whose type has a registered `Tensor` conversion function. dtype: Optional element type for the returned tensor. If missing, the type is inferred from the type of `value`. dtype_hint: Optional element type for the returned tensor, used when dtype is None. In some cases, a caller may not have a dtype in mind when converting to a tensor, so dtype_hint can be used as a soft preference. If the conversion to `dtype_hint` is not possible, this argument has no effect. name: Optional name to use if a new `Tensor` is created. Returns: A `Tensor` based on `value`. Raises: TypeError: If no conversion function is registered for `value` to `dtype`. RuntimeError: If a registered conversion function returns an invalid value. ValueError: If the `value` is a tensor not of given `dtype` in graph mode. """ return convert_to_tensor( value=value, dtype=dtype, name=name, preferred_dtype=dtype_hint, as_ref=False) def _error_prefix(name): return "" if name is None else "%s: " % name def convert_to_tensor(value, dtype=None, name=None, as_ref=False, preferred_dtype=None, dtype_hint=None, ctx=None, accepted_result_types=(Tensor,)): """Implementation of the public convert_to_tensor.""" # TODO(b/142518781): Fix all call-sites and remove redundant arg preferred_dtype = preferred_dtype or dtype_hint if isinstance(value, EagerTensor): if ctx is None: ctx = context.context() if not ctx.executing_eagerly(): graph = get_default_graph() if not graph.building_function: raise RuntimeError("Attempting to capture an EagerTensor without " "building a function.") return graph.capture(value, name=name) if dtype is not None: dtype = dtypes.as_dtype(dtype) if isinstance(value, Tensor): if dtype is not None and not dtype.is_compatible_with(value.dtype): raise ValueError( "Tensor conversion requested dtype %s for Tensor with dtype %s: %r" % (dtype.name, value.dtype.name, value)) return value if preferred_dtype is not None: preferred_dtype = dtypes.as_dtype(preferred_dtype) for base_type, conversion_func in tensor_conversion_registry.get(type(value)): # If dtype is None but preferred_dtype is not None, we try to # cast to preferred_dtype first. ret = None if dtype is None and preferred_dtype is not None: try: ret = conversion_func( value, dtype=preferred_dtype, name=name, as_ref=as_ref) except (TypeError, ValueError): # Could not coerce the conversion to use the preferred dtype. pass else: if (ret is not NotImplemented and ret.dtype.base_dtype != preferred_dtype.base_dtype): raise TypeError("convert_to_tensor did not convert to " "the preferred dtype: %s vs %s " % (ret.dtype.base_dtype, preferred_dtype.base_dtype)) if ret is None: ret = conversion_func(value, dtype=dtype, name=name, as_ref=as_ref) if ret is NotImplemented: continue if not isinstance(ret, accepted_result_types): raise RuntimeError( "%sConversion function %r for type %s returned non-Tensor: %r" % (_error_prefix(name), conversion_func, base_type, ret)) if dtype and not dtype.is_compatible_with(ret.dtype): raise RuntimeError( "%sConversion function %r for type %s returned incompatible " "dtype: requested = %s, actual = %s" % (_error_prefix(name), conversion_func, base_type, dtype.name, ret.dtype.name)) return ret raise TypeError("%sCannot convert %r with type %s to Tensor: " "no conversion function registered." % (_error_prefix(name), value, type(value))) internal_convert_to_tensor = convert_to_tensor def internal_convert_n_to_tensor(values, dtype=None, name=None, as_ref=False, preferred_dtype=None, ctx=None): """Converts `values` to a list of `Tensor` objects. Args: values: A list of objects that can be consumed by `tf.convert_to_tensor()`. dtype: (Optional.) The required `DType` of the returned `Tensor` objects. name: (Optional.) A name prefix to used when a new `Tensor` is created, in which case element `i` will be given the name `name + '_' + i`. as_ref: True if the caller wants the results as ref tensors. preferred_dtype: Optional element type for the returned tensors, used when dtype is None. In some cases, a caller may not have a dtype in mind when converting to a tensor, so preferred_dtype can be used as a soft preference. If the conversion to `preferred_dtype` is not possible, this argument has no effect. ctx: The value of context.context(). Returns: A list of `Tensor` and/or `IndexedSlices` objects. Raises: TypeError: If no conversion function is registered for an element in `values`. RuntimeError: If a registered conversion function returns an invalid value. """ if not isinstance(values, collections_abc.Sequence): raise TypeError("values must be a sequence.") ret = [] if ctx is None: ctx = context.context() for i, value in enumerate(values): n = None if name is None else "%s_%d" % (name, i) ret.append( convert_to_tensor( value, dtype=dtype, name=n, as_ref=as_ref, preferred_dtype=preferred_dtype, ctx=ctx)) return ret def convert_n_to_tensor(values, dtype=None, name=None, preferred_dtype=None): """Converts `values` to a list of `Tensor` objects. Args: values: A list of objects that can be consumed by `tf.convert_to_tensor()`. dtype: (Optional.) The required `DType` of the returned `Tensor` objects. name: (Optional.) A name prefix to used when a new `Tensor` is created, in which case element `i` will be given the name `name + '_' + i`. preferred_dtype: Optional element type for the returned tensors, used when dtype is None. In some cases, a caller may not have a dtype in mind when converting to a tensor, so preferred_dtype can be used as a soft preference. If the conversion to `preferred_dtype` is not possible, this argument has no effect. Returns: A list of `Tensor` and/or `IndexedSlices` objects. Raises: TypeError: If no conversion function is registered for an element in `values`. RuntimeError: If a registered conversion function returns an invalid value. """ return internal_convert_n_to_tensor( values=values, dtype=dtype, name=name, preferred_dtype=preferred_dtype, as_ref=False) def convert_to_tensor_or_composite(value, dtype=None, name=None): """Converts the given object to a `Tensor` or `CompositeTensor`. If `value` is a `CompositeTensor` it is returned unmodified. Otherwise, it is converted to a `Tensor` using `convert_to_tensor()`. Args: value: A `CompositeTensor` or an object that can be consumed by `convert_to_tensor()`. dtype: (Optional.) The required `DType` of the returned `Tensor` or `CompositeTensor`. name: (Optional.) A name to use if a new `Tensor` is created. Returns: A `Tensor` or `CompositeTensor`, based on `value`. Raises: ValueError: If `dtype` does not match the element type of `value`. """ return internal_convert_to_tensor_or_composite( value=value, dtype=dtype, name=name, as_ref=False) def internal_convert_to_tensor_or_composite(value, dtype=None, name=None, as_ref=False): """Converts the given object to a `Tensor` or `CompositeTensor`. If `value` is a `CompositeTensor` it is returned unmodified. Otherwise, it is converted to a `Tensor` using `convert_to_tensor()`. Args: value: A `CompositeTensor`, or an object that can be consumed by `convert_to_tensor()`. dtype: (Optional.) The required `DType` of the returned `Tensor` or `CompositeTensor`. name: (Optional.) A name to use if a new `Tensor` is created. as_ref: True if the caller wants the results as ref tensors. Returns: A `Tensor` or `CompositeTensor`, based on `value`. Raises: ValueError: If `dtype` does not match the element type of `value`. """ if isinstance(value, composite_tensor.CompositeTensor): value_dtype = getattr(value, "dtype", None) if dtype and not dtypes.as_dtype(dtype).is_compatible_with(value_dtype): raise ValueError( "Tensor conversion requested dtype %s for Tensor with dtype %s: %r" % (dtypes.as_dtype(dtype).name, value.dtype.name, str(value))) return value else: return convert_to_tensor( value, dtype=dtype, name=name, as_ref=as_ref, accepted_result_types=(Tensor, composite_tensor.CompositeTensor)) def internal_convert_n_to_tensor_or_composite(values, dtype=None, name=None, as_ref=False): """Converts `values` to a list of `Tensor` or `CompositeTensor` objects. Any `CompositeTensor` objects in `values` are returned unmodified. Args: values: A list of `None`, `CompositeTensor`, or objects that can be consumed by `convert_to_tensor()`. dtype: (Optional.) The required `DType` of the returned `Tensor`s or `CompositeTensor`s. name: (Optional.) A name prefix to used when a new `Tensor` is created, in which case element `i` will be given the name `name + '_' + i`. as_ref: True if the caller wants the results as ref tensors. Returns: A list of `Tensor`, `CompositeTensor`, and/or `None` objects. Raises: TypeError: If no conversion function is registered for an element in `values`. RuntimeError: If a registered conversion function returns an invalid value. """ if not isinstance(values, collections_abc.Sequence): raise TypeError("values must be a sequence.") ret = [] for i, value in enumerate(values): if value is None: ret.append(value) else: n = None if name is None else "%s_%d" % (name, i) ret.append( internal_convert_to_tensor_or_composite( value, dtype=dtype, name=n, as_ref=as_ref)) return ret def convert_n_to_tensor_or_composite(values, dtype=None, name=None): """Converts `values` to a list of `Output` or `CompositeTensor` objects. Any `CompositeTensor` objects in `values` are returned unmodified. Args: values: A list of `None`, `CompositeTensor``, or objects that can be consumed by `convert_to_tensor()`. dtype: (Optional.) The required `DType` of the returned `Tensor`s or `CompositeTensor`s. name: (Optional.) A name prefix to used when a new `Tensor` is created, in which case element `i` will be given the name `name + '_' + i`. Returns: A list of `Tensor` and/or `CompositeTensor` objects. Raises: TypeError: If no conversion function is registered for an element in `values`. RuntimeError: If a registered conversion function returns an invalid value. """ return internal_convert_n_to_tensor_or_composite( values=values, dtype=dtype, name=name, as_ref=False) def _device_string(dev_spec): if pydev.is_device_spec(dev_spec): return dev_spec.to_string() else: return dev_spec def _NodeDef(op_type, name, attrs=None): """Create a NodeDef proto. Args: op_type: Value for the "op" attribute of the NodeDef proto. name: Value for the "name" attribute of the NodeDef proto. attrs: Dictionary where the key is the attribute name (a string) and the value is the respective "attr" attribute of the NodeDef proto (an AttrValue). Returns: A node_def_pb2.NodeDef protocol buffer. """ node_def = node_def_pb2.NodeDef(op=compat.as_bytes(op_type), name=compat.as_bytes(name)) if attrs: for k, v in six.iteritems(attrs): node_def.attr[k].CopyFrom(v) return node_def # Copied from core/framework/node_def_util.cc # TODO(mrry,josh11b): Consolidate this validation in C++ code. _VALID_OP_NAME_REGEX = re.compile("^[A-Za-z0-9.][A-Za-z0-9_.\\-/>]*$") _VALID_SCOPE_NAME_REGEX = re.compile("^[A-Za-z0-9_.\\-/>]*$") def _create_c_op(graph, node_def, inputs, control_inputs, op_def=None): """Creates a TF_Operation. Args: graph: a `Graph`. node_def: `node_def_pb2.NodeDef` for the operation to create. inputs: A flattened list of `Tensor`s. This function handles grouping tensors into lists as per attributes in the `node_def`. control_inputs: A list of `Operation`s to set as control dependencies. op_def: Optional. `op_def_pb2.OpDef` for the operation to create. If not specified, is looked up from the `graph` using `node_def.op`. Returns: A wrapped TF_Operation*. """ if op_def is None: op_def = graph._get_op_def(node_def.op) # pylint: disable=protected-access # TODO(skyewm): op_def_library.apply_op() flattens the incoming inputs. # Refactor so we don't have to do this here. inputs = _reconstruct_sequence_inputs(op_def, inputs, node_def.attr) # pylint: disable=protected-access op_desc = pywrap_tf_session.TF_NewOperation(graph._c_graph, compat.as_str(node_def.op), compat.as_str(node_def.name)) if node_def.device: pywrap_tf_session.TF_SetDevice(op_desc, compat.as_str(node_def.device)) # Add inputs for op_input in inputs: if isinstance(op_input, (list, tuple)): pywrap_tf_session.TF_AddInputList(op_desc, [t._as_tf_output() for t in op_input]) else: pywrap_tf_session.TF_AddInput(op_desc, op_input._as_tf_output()) # Add control inputs for control_input in control_inputs: pywrap_tf_session.TF_AddControlInput(op_desc, control_input._c_op) # pylint: enable=protected-access # Add attrs for name, attr_value in node_def.attr.items(): serialized = attr_value.SerializeToString() # TODO(skyewm): this creates and deletes a new TF_Status for every attr. # It might be worth creating a convenient way to re-use the same status. pywrap_tf_session.TF_SetAttrValueProto(op_desc, compat.as_str(name), serialized) try: c_op = pywrap_tf_session.TF_FinishOperation(op_desc) except errors.InvalidArgumentError as e: # Convert to ValueError for backwards compatibility. raise ValueError(str(e)) return c_op @tf_export("Operation") class Operation(object): """Represents a graph node that performs computation on tensors. An `Operation` is a node in a `tf.Graph` that takes zero or more `Tensor` objects as input, and produces zero or more `Tensor` objects as output. Objects of type `Operation` are created by calling a Python op constructor (such as `tf.matmul`) within a `tf.function` or under a `tf.Graph.as_default` context manager. For example, within a `tf.function`, `c = tf.matmul(a, b)` creates an `Operation` of type "MatMul" that takes tensors `a` and `b` as input, and produces `c` as output. If a `tf.compat.v1.Session` is used, an `Operation` of a `tf.Graph` can be executed by passing it to `tf.Session.run`. `op.run()` is a shortcut for calling `tf.compat.v1.get_default_session().run(op)`. """ def __init__(self, node_def, g, inputs=None, output_types=None, control_inputs=None, input_types=None, original_op=None, op_def=None): r"""Creates an `Operation`. NOTE: This constructor validates the name of the `Operation` (passed as `node_def.name`). Valid `Operation` names match the following regular expression: [A-Za-z0-9.][A-Za-z0-9_.\\-/]* Args: node_def: `node_def_pb2.NodeDef`. `NodeDef` for the `Operation`. Used for attributes of `node_def_pb2.NodeDef`, typically `name`, `op`, and `device`. The `input` attribute is irrelevant here as it will be computed when generating the model. g: `Graph`. The parent graph. inputs: list of `Tensor` objects. The inputs to this `Operation`. output_types: list of `DType` objects. List of the types of the `Tensors` computed by this operation. The length of this list indicates the number of output endpoints of the `Operation`. control_inputs: list of operations or tensors from which to have a control dependency. input_types: List of `DType` objects representing the types of the tensors accepted by the `Operation`. By default uses `[x.dtype.base_dtype for x in inputs]`. Operations that expect reference-typed inputs must specify these explicitly. original_op: Optional. Used to associate the new `Operation` with an existing `Operation` (for example, a replica with the op that was replicated). op_def: Optional. The `op_def_pb2.OpDef` proto that describes the op type that this `Operation` represents. Raises: TypeError: if control inputs are not Operations or Tensors, or if `node_def` is not a `NodeDef`, or if `g` is not a `Graph`, or if `inputs` are not tensors, or if `inputs` and `input_types` are incompatible. ValueError: if the `node_def` name is not valid. """ # For internal use only: `node_def` can be set to a TF_Operation to create # an Operation for that op. This is useful for creating Operations for ops # indirectly created by C API methods, e.g. the ops created by # TF_ImportGraphDef. When `node_def` is a TF_Operation, all optional fields # should be None. if isinstance(node_def, node_def_pb2.NodeDef): if node_def.ByteSize() >= (1 << 31) or node_def.ByteSize() < 0: raise ValueError( "Cannot create a tensor proto whose content is larger than 2GB.") if not _VALID_OP_NAME_REGEX.match(node_def.name): raise ValueError("'%s' is not a valid node name" % node_def.name) c_op = None elif type(node_def).__name__ == "TF_Operation": assert inputs is None assert output_types is None assert control_inputs is None assert input_types is None assert original_op is None assert op_def is None c_op = node_def else: raise TypeError("node_def needs to be a NodeDef: %s" % node_def) if not isinstance(g, Graph): raise TypeError("g needs to be a Graph: %s" % g) self._graph = g if inputs is None: inputs = [] elif not isinstance(inputs, list): raise TypeError("inputs needs to be a list of Tensors: %s" % inputs) for a in inputs: if not isinstance(a, Tensor): raise TypeError("input needs to be a Tensor: %s" % a) if input_types is None: input_types = [i.dtype.base_dtype for i in inputs] else: if not all( x.is_compatible_with(i.dtype) for i, x in zip(inputs, input_types)): raise TypeError("In op '%s', input types (%s) are not compatible " "with expected types (%s)" % (node_def.name, [i.dtype for i in inputs], input_types)) # Build the list of control inputs. control_input_ops = [] if control_inputs: for c in control_inputs: control_op = None if isinstance(c, Operation): control_op = c elif isinstance(c, (Tensor, IndexedSlices)): control_op = c.op else: raise TypeError("Control input must be an Operation, " "a Tensor, or IndexedSlices: %s" % c) control_input_ops.append(control_op) # This will be set by self.inputs. self._inputs_val = None # pylint: disable=protected-access self._original_op = original_op self._traceback = tf_stack.extract_stack() # List of _UserDevSpecs holding code location of device context manager # invocations and the users original argument to them. self._device_code_locations = None # Dict mapping op name to file and line information for op colocation # context managers. self._colocation_code_locations = None self._control_flow_context = self.graph._get_control_flow_context() # Gradient function for this op. There are three ways to specify gradient # function, and first available gradient gets used, in the following order. # 1. self._gradient_function # 2. Gradient name registered by "_gradient_op_type" attribute. # 3. Gradient name registered by op.type. self._gradient_function = None # Initialize self._c_op. if c_op: self._c_op = c_op op_def = g._get_op_def(pywrap_tf_session.TF_OperationOpType(c_op)) name = self.name else: if op_def is None: op_def = self._graph._get_op_def(node_def.op) self._c_op = _create_c_op(self._graph, node_def, inputs, control_input_ops, op_def) name = compat.as_str(node_def.name) # pylint: enable=protected-access self._is_stateful = op_def.is_stateful # Initialize self._outputs. num_outputs = pywrap_tf_session.TF_OperationNumOutputs(self._c_op) self._outputs = [] for i in range(num_outputs): tf_output = c_api_util.tf_output(self._c_op, i) output_type = pywrap_tf_session.TF_OperationOutputType(tf_output) tensor = Tensor._create_with_tf_output(self, i, output_type, tf_output) # pylint: disable=protected-access self._outputs.append(tensor) self._id_value = self._graph._add_op(self, name) # pylint: disable=protected-access if not c_op: self._control_flow_post_processing(input_tensors=inputs) def _control_flow_post_processing(self, input_tensors=None): """Add this op to its control flow context. This may add new ops and change this op's inputs. self.inputs must be available before calling this method. Args: input_tensors: (Optional.) A list of `Tensors` corresponding to the inputs of this op, which should be equivalent to `self.inputs`. Pass this argument to avoid evaluating `self.inputs` unnecessarily. """ if input_tensors is None: input_tensors = self.inputs for input_tensor in input_tensors: control_flow_util.CheckInputFromValidContext(self, input_tensor.op) if self._control_flow_context is not None: self._control_flow_context.AddOp(self) def colocation_groups(self): """Returns the list of colocation groups of the op.""" default_colocation_group = [compat.as_bytes("loc:@%s" % self.name)] try: class_attr = self.get_attr("_class") except ValueError: # This op has no explicit colocation group, so it is itself its # own root of a colocation group. return default_colocation_group attr_groups = [ class_name for class_name in class_attr if class_name.startswith(b"loc:@") ] # If there are no colocation groups in the explicit _class field, # return the default colocation group. return attr_groups if attr_groups else default_colocation_group def values(self): """DEPRECATED: Use outputs.""" return tuple(self.outputs) def _get_control_flow_context(self): """Returns the control flow context of this op. Returns: A context object. """ return self._control_flow_context def _set_control_flow_context(self, ctx): """Sets the current control flow context of this op. Args: ctx: a context object. """ self._control_flow_context = ctx @property def name(self): """The full name of this operation.""" return pywrap_tf_session.TF_OperationName(self._c_op) @property def _id(self): """The unique integer id of this operation.""" return self._id_value @property def device(self): """The name of the device to which this op has been assigned, if any. Returns: The string name of the device to which this op has been assigned, or an empty string if it has not been assigned to a device. """ return pywrap_tf_session.TF_OperationDevice(self._c_op) @property def _device_assignments(self): """Code locations for device context managers active at op creation. This property will return a list of traceable_stack.TraceableObject instances where .obj is a string representing the assigned device (or information about the function that would be applied to this op to compute the desired device) and the filename and lineno members record the location of the relevant device context manager. For example, suppose file_a contained these lines: file_a.py: 15: with tf.device('/gpu:0'): 16: node_b = tf.constant(4, name='NODE_B') Then a TraceableObject t_obj representing the device context manager would have these member values: t_obj.obj -> '/gpu:0' t_obj.filename = 'file_a.py' t_obj.lineno = 15 and node_b.op._device_assignments would return the list [t_obj]. Returns: [str: traceable_stack.TraceableObject, ...] as per this method's description, above. """ return self._device_code_locations or [] @property def _colocation_dict(self): """Code locations for colocation context managers active at op creation. This property will return a dictionary for which the keys are nodes with which this Operation is colocated, and for which the values are traceable_stack.TraceableObject instances. The TraceableObject instances record the location of the relevant colocation context manager but have the "obj" field set to None to prevent leaking private data. For example, suppose file_a contained these lines: file_a.py: 14: node_a = tf.constant(3, name='NODE_A') 15: with tf.compat.v1.colocate_with(node_a): 16: node_b = tf.constant(4, name='NODE_B') Then a TraceableObject t_obj representing the colocation context manager would have these member values: t_obj.obj -> None t_obj.filename = 'file_a.py' t_obj.lineno = 15 and node_b.op._colocation_dict would return the dictionary { 'NODE_A': t_obj } Returns: {str: traceable_stack.TraceableObject} as per this method's description, above. """ locations_dict = self._colocation_code_locations or {} return locations_dict.copy() @property def _output_types(self): """List this operation's output types. Returns: List of the types of the Tensors computed by this operation. Each element in the list is an integer whose value is one of the TF_DataType enums defined in pywrap_tf_session.h The length of this list indicates the number of output endpoints of the operation. """ num_outputs = pywrap_tf_session.TF_OperationNumOutputs(self._c_op) output_types = [ int(pywrap_tf_session.TF_OperationOutputType(self._tf_output(i))) for i in xrange(num_outputs) ] return output_types def _tf_output(self, output_idx): """Create and return a new TF_Output for output_idx'th output of this op.""" tf_output = pywrap_tf_session.TF_Output() tf_output.oper = self._c_op tf_output.index = output_idx return tf_output def _tf_input(self, input_idx): """Create and return a new TF_Input for input_idx'th input of this op.""" tf_input = pywrap_tf_session.TF_Input() tf_input.oper = self._c_op tf_input.index = input_idx return tf_input def _set_device(self, device): # pylint: disable=redefined-outer-name """Set the device of this operation. Args: device: string or device.. The device to set. """ self._set_device_from_string(compat.as_str(_device_string(device))) def _set_device_from_string(self, device_str): """Fast path to set device if the type is known to be a string. This function is called frequently enough during graph construction that there are non-trivial performance gains if the caller can guarantee that the specified device is already a string. Args: device_str: A string specifying where to place this op. """ pywrap_tf_session.SetRequestedDevice( self._graph._c_graph, # pylint: disable=protected-access self._c_op, # pylint: disable=protected-access device_str) def _update_input(self, index, tensor): """Update the input to this operation at the given index. NOTE: This is for TF internal use only. Please don't use it. Args: index: the index of the input to update. tensor: the Tensor to be used as the input at the given index. Raises: TypeError: if tensor is not a Tensor, or if input tensor type is not convertible to dtype. ValueError: if the Tensor is from a different graph. """ if not isinstance(tensor, Tensor): raise TypeError("tensor must be a Tensor: %s" % tensor) _assert_same_graph(self, tensor) # Reset cached inputs. self._inputs_val = None pywrap_tf_session.UpdateEdge( self._graph._c_graph, # pylint: disable=protected-access tensor._as_tf_output(), # pylint: disable=protected-access self._tf_input(index)) def _add_while_inputs(self, tensors): """See AddWhileInputHack in python_api.h. NOTE: This is for TF internal use only. Please don't use it. Args: tensors: list of Tensors Raises: TypeError: if tensor is not a Tensor, or if input tensor type is not convertible to dtype. ValueError: if the Tensor is from a different graph. """ for tensor in tensors: if not isinstance(tensor, Tensor): raise TypeError("tensor must be a Tensor: %s" % tensor) _assert_same_graph(self, tensor) # Reset cached inputs. self._inputs_val = None pywrap_tf_session.AddWhileInputHack( self._graph._c_graph, # pylint: disable=protected-access tensor._as_tf_output(), # pylint: disable=protected-access self._c_op) def _add_control_inputs(self, ops): """Add a list of new control inputs to this operation. Args: ops: the list of Operations to add as control input. Raises: TypeError: if ops is not a list of Operations. ValueError: if any op in ops is from a different graph. """ for op in ops: if not isinstance(op, Operation): raise TypeError("op must be an Operation: %s" % op) pywrap_tf_session.AddControlInput( self._graph._c_graph, # pylint: disable=protected-access self._c_op, # pylint: disable=protected-access op._c_op) # pylint: disable=protected-access def _add_control_input(self, op): """Add a new control input to this operation. Args: op: the Operation to add as control input. Raises: TypeError: if op is not an Operation. ValueError: if op is from a different graph. """ if not isinstance(op, Operation): raise TypeError("op must be an Operation: %s" % op) pywrap_tf_session.AddControlInput( self._graph._c_graph, # pylint: disable=protected-access self._c_op, # pylint: disable=protected-access op._c_op) # pylint: disable=protected-access def _remove_all_control_inputs(self): """Removes any control inputs to this operation.""" pywrap_tf_session.RemoveAllControlInputs(self._graph._c_graph, self._c_op) # pylint: disable=protected-access def _add_outputs(self, types, shapes): """Adds new Tensors to self.outputs. Note: this is generally unsafe to use. This is used in certain situations in conjunction with _set_type_list_attr. Arguments: types: list of DTypes shapes: list of TensorShapes """ assert len(types) == len(shapes) orig_num_outputs = len(self.outputs) for i in range(len(types)): t = Tensor(self, orig_num_outputs + i, types[i]) self._outputs.append(t) t.set_shape(shapes[i]) def __str__(self): return str(self.node_def) def __repr__(self): return "<tf.Operation '%s' type=%s>" % (self.name, self.type) @property def outputs(self): """The list of `Tensor` objects representing the outputs of this op.""" return self._outputs @property def inputs(self): """The sequence of `Tensor` objects representing the data inputs of this op.""" if self._inputs_val is None: # pylint: disable=protected-access self._inputs_val = tuple( map(self.graph._get_tensor_by_tf_output, pywrap_tf_session.GetOperationInputs(self._c_op))) # pylint: enable=protected-access return self._inputs_val @property def _input_types(self): num_inputs = pywrap_tf_session.TF_OperationNumInputs(self._c_op) input_types = [ dtypes.as_dtype( pywrap_tf_session.TF_OperationInputType(self._tf_input(i))) for i in xrange(num_inputs) ] return input_types @property def control_inputs(self): """The `Operation` objects on which this op has a control dependency. Before this op is executed, TensorFlow will ensure that the operations in `self.control_inputs` have finished executing. This mechanism can be used to run ops sequentially for performance reasons, or to ensure that the side effects of an op are observed in the correct order. Returns: A list of `Operation` objects. """ control_c_ops = pywrap_tf_session.TF_OperationGetControlInputs_wrapper( self._c_op) # pylint: disable=protected-access return [ self.graph._get_operation_by_name_unsafe( pywrap_tf_session.TF_OperationName(c_op)) for c_op in control_c_ops ] # pylint: enable=protected-access @property def _control_outputs(self): """The `Operation` objects which have a control dependency on this op. Before any of the ops in self._control_outputs can execute tensorflow will ensure self has finished executing. Returns: A list of `Operation` objects. """ control_c_ops = pywrap_tf_session.TF_OperationGetControlOutputs_wrapper( self._c_op) # pylint: disable=protected-access return [ self.graph._get_operation_by_name_unsafe( pywrap_tf_session.TF_OperationName(c_op)) for c_op in control_c_ops ] # pylint: enable=protected-access @property def type(self): """The type of the op (e.g. `"MatMul"`).""" return pywrap_tf_session.TF_OperationOpType(self._c_op) @property def graph(self): """The `Graph` that contains this operation.""" return self._graph @property def node_def(self): # pylint: disable=line-too-long """Returns the `NodeDef` representation of this operation. Returns: A [`NodeDef`](https://www.tensorflow.org/code/tensorflow/core/framework/node_def.proto) protocol buffer. """ # pylint: enable=line-too-long with c_api_util.tf_buffer() as buf: pywrap_tf_session.TF_OperationToNodeDef(self._c_op, buf) data = pywrap_tf_session.TF_GetBuffer(buf) node_def = node_def_pb2.NodeDef() node_def.ParseFromString(compat.as_bytes(data)) return node_def @property def op_def(self): # pylint: disable=line-too-long """Returns the `OpDef` proto that represents the type of this op. Returns: An [`OpDef`](https://www.tensorflow.org/code/tensorflow/core/framework/op_def.proto) protocol buffer. """ # pylint: enable=line-too-long return self._graph._get_op_def(self.type) @property def traceback(self): """Returns the call stack from when this operation was constructed.""" return self._traceback def _set_attr(self, attr_name, attr_value): """Private method used to set an attribute in the node_def.""" buf = pywrap_tf_session.TF_NewBufferFromString( compat.as_bytes(attr_value.SerializeToString())) try: self._set_attr_with_buf(attr_name, buf) finally: pywrap_tf_session.TF_DeleteBuffer(buf) def _set_attr_with_buf(self, attr_name, attr_buf): """Set an attr in the node_def with a pre-allocated buffer.""" # pylint: disable=protected-access pywrap_tf_session.SetAttr(self._graph._c_graph, self._c_op, attr_name, attr_buf) # pylint: enable=protected-access def _set_func_attr(self, attr_name, func_name): """Private method used to set a function attribute in the node_def.""" func = attr_value_pb2.NameAttrList(name=func_name) self._set_attr(attr_name, attr_value_pb2.AttrValue(func=func)) def _set_func_list_attr(self, attr_name, func_names): """Private method used to set a list(function) attribute in the node_def.""" funcs = [attr_value_pb2.NameAttrList(name=func_name) for func_name in func_names] funcs_list = attr_value_pb2.AttrValue.ListValue(func=funcs) self._set_attr(attr_name, attr_value_pb2.AttrValue(list=funcs_list)) def _set_type_list_attr(self, attr_name, types): """Private method used to set a list(type) attribute in the node_def.""" if not types: return if isinstance(types[0], dtypes.DType): types = [dt.as_datatype_enum for dt in types] types_list = attr_value_pb2.AttrValue.ListValue(type=types) self._set_attr(attr_name, attr_value_pb2.AttrValue(list=types_list)) def _set_shape_list_attr(self, attr_name, shapes): """Private method used to set a list(shape) attribute in the node_def.""" shapes = [s.as_proto() for s in shapes] shapes_list = attr_value_pb2.AttrValue.ListValue(shape=shapes) self._set_attr(attr_name, attr_value_pb2.AttrValue(list=shapes_list)) def _clear_attr(self, attr_name): """Private method used to clear an attribute in the node_def.""" # pylint: disable=protected-access pywrap_tf_session.ClearAttr(self._graph._c_graph, self._c_op, attr_name) # pylint: enable=protected-access def get_attr(self, name): """Returns the value of the attr of this op with the given `name`. Args: name: The name of the attr to fetch. Returns: The value of the attr, as a Python object. Raises: ValueError: If this op does not have an attr with the given `name`. """ fields = ("s", "i", "f", "b", "type", "shape", "tensor", "func") try: with c_api_util.tf_buffer() as buf: pywrap_tf_session.TF_OperationGetAttrValueProto(self._c_op, name, buf) data = pywrap_tf_session.TF_GetBuffer(buf) except errors.InvalidArgumentError as e: # Convert to ValueError for backwards compatibility. raise ValueError(str(e)) x = attr_value_pb2.AttrValue() x.ParseFromString(data) oneof_value = x.WhichOneof("value") if oneof_value is None: return [] if oneof_value == "list": for f in fields: if getattr(x.list, f): if f == "type": return [dtypes.as_dtype(t) for t in x.list.type] else: return list(getattr(x.list, f)) return [] if oneof_value == "type": return dtypes.as_dtype(x.type) assert oneof_value in fields, "Unsupported field type in " + str(x) return getattr(x, oneof_value) def _get_attr_type(self, name): """Returns the `DType` value of the attr of this op with the given `name`.""" try: dtype_enum = pywrap_tf_session.TF_OperationGetAttrType(self._c_op, name) return _DTYPES_INTERN_TABLE[dtype_enum] except errors.InvalidArgumentError as e: # Convert to ValueError for backwards compatibility. raise ValueError(str(e)) def _get_attr_bool(self, name): """Returns the `bool` value of the attr of this op with the given `name`.""" try: return pywrap_tf_session.TF_OperationGetAttrBool(self._c_op, name) except errors.InvalidArgumentError as e: # Convert to ValueError for backwards compatibility. raise ValueError(str(e)) def _get_attr_int(self, name): """Returns the `int` value of the attr of this op with the given `name`.""" try: return pywrap_tf_session.TF_OperationGetAttrInt(self._c_op, name) except errors.InvalidArgumentError as e: # Convert to ValueError for backwards compatibility. raise ValueError(str(e)) def run(self, feed_dict=None, session=None): """Runs this operation in a `Session`. Calling this method will execute all preceding operations that produce the inputs needed for this operation. *N.B.* Before invoking `Operation.run()`, its graph must have been launched in a session, and either a default session must be available, or `session` must be specified explicitly. Args: feed_dict: A dictionary that maps `Tensor` objects to feed values. See `tf.Session.run` for a description of the valid feed values. session: (Optional.) The `Session` to be used to run to this operation. If none, the default session will be used. """ _run_using_default_session(self, feed_dict, self.graph, session) _gradient_registry = registry.Registry("gradient") @tf_export("RegisterGradient") class RegisterGradient(object): """A decorator for registering the gradient function for an op type. This decorator is only used when defining a new op type. For an op with `m` inputs and `n` outputs, the gradient function is a function that takes the original `Operation` and `n` `Tensor` objects (representing the gradients with respect to each output of the op), and returns `m` `Tensor` objects (representing the partial gradients with respect to each input of the op). For example, assuming that operations of type `"Sub"` take two inputs `x` and `y`, and return a single output `x - y`, the following gradient function would be registered: ```python @tf.RegisterGradient("Sub") def _sub_grad(unused_op, grad): return grad, tf.negative(grad) ``` The decorator argument `op_type` is the string type of an operation. This corresponds to the `OpDef.name` field for the proto that defines the operation. """ def __init__(self, op_type): """Creates a new decorator with `op_type` as the Operation type. Args: op_type: The string type of an operation. This corresponds to the `OpDef.name` field for the proto that defines the operation. Raises: TypeError: If `op_type` is not string. """ if not isinstance(op_type, six.string_types): raise TypeError("op_type must be a string") self._op_type = op_type def __call__(self, f): """Registers the function `f` as gradient function for `op_type`.""" _gradient_registry.register(f, self._op_type) return f @deprecation.deprecated_endpoints("NotDifferentiable", "NoGradient") @tf_export("no_gradient", v1=["no_gradient", "NotDifferentiable", "NoGradient"]) def no_gradient(op_type): """Specifies that ops of type `op_type` is not differentiable. This function should *not* be used for operations that have a well-defined gradient that is not yet implemented. This function is only used when defining a new op type. It may be used for ops such as `tf.size()` that are not differentiable. For example: ```python tf.no_gradient("Size") ``` The gradient computed for 'op_type' will then propagate zeros. For ops that have a well-defined gradient but are not yet implemented, no declaration should be made, and an error *must* be thrown if an attempt to request its gradient is made. Args: op_type: The string type of an operation. This corresponds to the `OpDef.name` field for the proto that defines the operation. Raises: TypeError: If `op_type` is not a string. """ if not isinstance(op_type, six.string_types): raise TypeError("op_type must be a string") _gradient_registry.register(None, op_type) # Aliases for the old names, will be eventually removed. NoGradient = no_gradient NotDifferentiable = no_gradient def get_gradient_function(op): """Returns the function that computes gradients for "op".""" if not op.inputs: return None gradient_function = op._gradient_function # pylint: disable=protected-access if gradient_function: return gradient_function try: op_type = op.get_attr("_gradient_op_type") except ValueError: op_type = op.type return _gradient_registry.lookup(op_type) def set_shape_and_handle_data_for_outputs(_): """No op. TODO(b/74620627): Remove this.""" pass class OpStats(object): """A holder for statistics about an operator. This class holds information about the resource requirements for an op, including the size of its weight parameters on-disk and how many FLOPS it requires to execute forward inference. If you define a new operation, you can create a function that will return a set of information about its usage of the CPU and disk space when serialized. The function itself takes a Graph object that's been set up so you can call methods like get_tensor_by_name to help calculate the results, and a NodeDef argument. """ def __init__(self, statistic_type, value=None): """Sets up the initial placeholders for the statistics.""" self.statistic_type = statistic_type self.value = value @property def statistic_type(self): return self._statistic_type @statistic_type.setter def statistic_type(self, statistic_type): self._statistic_type = statistic_type @property def value(self): return self._value @value.setter def value(self, value): self._value = value def __iadd__(self, other): if other.statistic_type != self.statistic_type: raise ValueError("Can't add an OpStat of type %s to one of %s." % (self.statistic_type, other.statistic_type)) if self.value is None: self.value = other.value elif other.value is not None: self._value += other.value return self _stats_registry = registry.Registry("statistical functions") class RegisterStatistics(object): """A decorator for registering the statistics function for an op type. This decorator can be defined for an op type so that it gives a report on the resources used by an instance of an operator, in the form of an OpStats object. Well-known types of statistics include these so far: - flops: When running a graph, the bulk of the computation happens doing numerical calculations like matrix multiplications. This type allows a node to return how many floating-point operations it takes to complete. The total number of FLOPs for a graph is a good guide to its expected latency. You can add your own statistics just by picking a new type string, registering functions for the ops you care about, and then calling get_stats_for_node_def. If a statistic for an op is registered multiple times, a KeyError will be raised. Since the statistics is counted on a per-op basis. It is not suitable for model parameters (capacity), which is expected to be counted only once, even if it is shared by multiple ops. (e.g. RNN) For example, you can define a new metric called doohickey for a Foo operation by placing this in your code: ```python @ops.RegisterStatistics("Foo", "doohickey") def _calc_foo_bojangles(unused_graph, unused_node_def): return ops.OpStats("doohickey", 20) ``` Then in client code you can retrieve the value by making this call: ```python doohickey = ops.get_stats_for_node_def(graph, node_def, "doohickey") ``` If the NodeDef is for an op with a registered doohickey function, you'll get back the calculated amount in doohickey.value, or None if it's not defined. """ def __init__(self, op_type, statistic_type): """Saves the `op_type` as the `Operation` type.""" if not isinstance(op_type, six.string_types): raise TypeError("op_type must be a string.") if "," in op_type: raise TypeError("op_type must not contain a comma.") self._op_type = op_type if not isinstance(statistic_type, six.string_types): raise TypeError("statistic_type must be a string.") if "," in statistic_type: raise TypeError("statistic_type must not contain a comma.") self._statistic_type = statistic_type def __call__(self, f): """Registers "f" as the statistics function for "op_type".""" _stats_registry.register(f, self._op_type + "," + self._statistic_type) return f def get_stats_for_node_def(graph, node, statistic_type): """Looks up the node's statistics function in the registry and calls it. This function takes a Graph object and a NodeDef from a GraphDef, and if there's an associated statistics method, calls it and returns a result. If no function has been registered for the particular node type, it returns an empty statistics object. Args: graph: A Graph object that's been set up with the node's graph. node: A NodeDef describing the operator. statistic_type: A string identifying the statistic we're interested in. Returns: An OpStats object containing information about resource usage. """ try: stats_func = _stats_registry.lookup(node.op + "," + statistic_type) result = stats_func(graph, node) except LookupError: result = OpStats(statistic_type) return result def name_from_scope_name(name): """Returns the name of an op given the name of its scope. Args: name: the name of the scope. Returns: the name of the op (equal to scope name minus any trailing slash). """ return name[:-1] if (name and name[-1] == "/") else name _MUTATION_LOCK_GROUP = 0 _SESSION_RUN_LOCK_GROUP = 1 @tf_export("Graph") class Graph(object): """A TensorFlow computation, represented as a dataflow graph. Graphs are used by `tf.function`s to represent the function's computations. Each graph contains a set of `tf.Operation` objects, which represent units of computation; and `tf.Tensor` objects, which represent the units of data that flow between operations. ### Using graphs directly (deprecated) A `tf.Graph` can be constructed and used directly without a `tf.function`, as was required in TensorFlow 1, but this is deprecated and it is recommended to use a `tf.function` instead. If a graph is directly used, other deprecated TensorFlow 1 classes are also required to execute the graph, such as a `tf.compat.v1.Session`. A default graph can be registered with the `tf.Graph.as_default` context manager. Then, operations will be added to the graph instead of being executed eagerly. For example: ```python g = tf.Graph() with g.as_default(): # Define operations and tensors in `g`. c = tf.constant(30.0) assert c.graph is g ``` `tf.compat.v1.get_default_graph()` can be used to obtain the default graph. Important note: This class *is not* thread-safe for graph construction. All operations should be created from a single thread, or external synchronization must be provided. Unless otherwise specified, all methods are not thread-safe. A `Graph` instance supports an arbitrary number of "collections" that are identified by name. For convenience when building a large graph, collections can store groups of related objects: for example, the `tf.Variable` uses a collection (named `tf.GraphKeys.GLOBAL_VARIABLES`) for all variables that are created during the construction of a graph. The caller may define additional collections by specifying a new name. """ def __init__(self): """Creates a new, empty Graph.""" # Protects core state that can be returned via public accessors. # Thread-safety is provided on a best-effort basis to support buggy # programs, and is not guaranteed by the public `tf.Graph` API. # # NOTE(mrry): This does not protect the various stacks. A warning will # be reported if these are used from multiple threads self._lock = threading.RLock() # The group lock synchronizes Session.run calls with methods that create # and mutate ops (e.g. Graph.create_op()). This synchronization is # necessary because it's illegal to modify an operation after it's been run. # The group lock allows any number of threads to mutate ops at the same time # but if any modification is going on, all Session.run calls have to wait. # Similarly, if one or more Session.run calls are going on, all mutate ops # have to wait until all Session.run calls have finished. self._group_lock = lock_util.GroupLock(num_groups=2) self._nodes_by_id = {} # GUARDED_BY(self._lock) self._next_id_counter = 0 # GUARDED_BY(self._lock) self._nodes_by_name = {} # GUARDED_BY(self._lock) self._version = 0 # GUARDED_BY(self._lock) # Maps a name used in the graph to the next id to use for that name. self._names_in_use = {} self._stack_state_is_thread_local = False self._thread_local = threading.local() # Functions that will be applied to choose a device if none is specified. # In TF2.x or after switch_to_thread_local(), # self._thread_local._device_function_stack is used instead. self._graph_device_function_stack = traceable_stack.TraceableStack() # Default original_op applied to new ops. self._default_original_op = None # Current control flow context. It could be either CondContext or # WhileContext defined in ops/control_flow_ops.py self._control_flow_context = None # A new node will depend of the union of all of the nodes in the stack. # In TF2.x or after switch_to_thread_local(), # self._thread_local._control_dependencies_stack is used instead. self._graph_control_dependencies_stack = [] # Arbitrary collections of objects. self._collections = {} # The graph-level random seed self._seed = None # A dictionary of attributes that should be applied to all ops. self._attr_scope_map = {} # A map from op type to the kernel label that should be used. self._op_to_kernel_label_map = {} # A map from op type to an alternative op type that should be used when # computing gradients. self._gradient_override_map = {} # A map from op type to a gradient function that should be used instead. self._gradient_function_map = {} # True if the graph is considered "finalized". In that case no # new operations can be added. self._finalized = False # Functions defined in the graph self._functions = collections.OrderedDict() # Default GraphDef versions self._graph_def_versions = versions_pb2.VersionDef( producer=versions.GRAPH_DEF_VERSION, min_consumer=versions.GRAPH_DEF_VERSION_MIN_CONSUMER) self._building_function = False # Stack of colocate_with ops. In TF2.x or after switch_to_thread_local(), # self._thread_local._colocation_stack is used instead. self._graph_colocation_stack = traceable_stack.TraceableStack() # Set of tensors that are dangerous to feed! self._unfeedable_tensors = object_identity.ObjectIdentitySet() # Set of operations that are dangerous to fetch! self._unfetchable_ops = set() # A map of tensor handle placeholder to tensor dtype. self._handle_feeders = {} # A map from tensor handle to its read op. self._handle_readers = {} # A map from tensor handle to its move op. self._handle_movers = {} # A map from tensor handle to its delete op. self._handle_deleters = {} # Allow optimizers and other objects to pseudo-uniquely key graphs (this key # will be shared when defining function graphs, for example, so optimizers # being called inside function definitions behave as if they were seeing the # actual outside graph). self._graph_key = "grap-key-%d/" % (uid(),) # A string with the last reduction method passed to # losses.compute_weighted_loss(), or None. This is required only for # backward compatibility with Estimator and optimizer V1 use cases. self._last_loss_reduction = None # Flag that is used to indicate whether loss has been scaled by optimizer. # If this flag has been set, then estimator uses it to scale losss back # before reporting. This is required only for backward compatibility with # Estimator and optimizer V1 use cases. self._is_loss_scaled_by_optimizer = False self._container = "" # Set to True if this graph is being built in an # AutomaticControlDependencies context. self._add_control_dependencies = False # Cache for OpDef protobufs retrieved via the C API. self._op_def_cache = {} # Cache for constant results of `broadcast_gradient_args()`. The keys are # tuples of fully-defined shapes: (x_shape_tuple, y_shape_tuple), and the # values are tuples of reduction indices: (rx, ry). self._bcast_grad_args_cache = {} # Cache for constant results of `reduced_shape()`. The keys are pairs of # tuples: (input_shape_tuple, reduction_indices_tuple), and the values # are pairs of tuples: (output_shape_kept_dims, tile_scaling). self._reduced_shape_cache = {} # TODO(skyewm): fold as much of the above as possible into the C # implementation self._scoped_c_graph = c_api_util.ScopedTFGraph() # The C API requires all ops to have shape functions. Disable this # requirement (many custom ops do not have shape functions, and we don't # want to break these existing cases). pywrap_tf_session.SetRequireShapeInferenceFns(self._c_graph, False) if tf2.enabled(): self.switch_to_thread_local() # Note: this method is private because the API of tf.Graph() is public and # frozen, and this functionality is still not ready for public visibility. @tf_contextlib.contextmanager def _variable_creator_scope(self, creator, priority=100): """Scope which defines a variable creation function. Args: creator: A callable taking `next_creator` and `kwargs`. See the `tf.variable_creator_scope` docstring. priority: Creators with a higher `priority` are called first. Within the same priority, creators are called inner-to-outer. Yields: `_variable_creator_scope` is a context manager with a side effect, but doesn't return a value. Raises: RuntimeError: If variable creator scopes are not properly nested. """ # This step keeps a reference to the existing stack, and it also initializes # self._thread_local._variable_creator_stack if it doesn't exist yet. old = self._variable_creator_stack new = list(old) new.append((priority, creator)) # Sorting is stable, so we'll put higher-priority creators later in the list # but otherwise maintain registration order. new.sort(key=lambda item: item[0]) self._thread_local._variable_creator_stack = new # pylint: disable=protected-access try: yield finally: if self._thread_local._variable_creator_stack is not new: # pylint: disable=protected-access raise RuntimeError( "Exiting variable_creator_scope without proper nesting.") self._thread_local._variable_creator_stack = old # pylint: disable=protected-access # Note: this method is private because the API of tf.Graph() is public and # frozen, and this functionality is still not ready for public visibility. @property def _variable_creator_stack(self): if not hasattr(self._thread_local, "_variable_creator_stack"): self._thread_local._variable_creator_stack = [] # pylint: disable=protected-access # This previously returned a copy of the stack instead of the stack itself, # to guard against accidental mutation. Consider, however, code that wants # to save and restore the variable creator stack: # def f(): # original_stack = graph._variable_creator_stack # graph._variable_creator_stack = new_stack # ... # Some code # graph._variable_creator_stack = original_stack # # And lets say you have some code that calls this function with some # variable_creator: # def g(): # with variable_scope.variable_creator_scope(creator): # f() # When exiting the variable creator scope, it would see a different stack # object than it expected leading to a "Exiting variable_creator_scope # without proper nesting" error. return self._thread_local._variable_creator_stack # pylint: disable=protected-access @_variable_creator_stack.setter def _variable_creator_stack(self, variable_creator_stack): self._thread_local._variable_creator_stack = variable_creator_stack # pylint: disable=protected-access def _check_not_finalized(self): """Check if the graph is finalized. Raises: RuntimeError: If the graph finalized. """ if self._finalized: raise RuntimeError("Graph is finalized and cannot be modified.") def _add_op(self, op, op_name): """Adds 'op' to the graph and returns the unique ID for the added Operation. Args: op: the Operation to add. op_name: the name of the Operation. Returns: An integer that is a unique ID for the added Operation. """ self._check_not_finalized() with self._lock: self._next_id_counter += 1 op_id = self._next_id_counter self._nodes_by_id[op_id] = op self._nodes_by_name[op_name] = op self._version = max(self._version, op_id) return op_id @property def _c_graph(self): if self._scoped_c_graph: return self._scoped_c_graph.graph return None @property def version(self): """Returns a version number that increases as ops are added to the graph. Note that this is unrelated to the `tf.Graph.graph_def_versions`. Returns: An integer version that increases as ops are added to the graph. """ if self._finalized: return self._version with self._lock: return self._version @property def graph_def_versions(self): # pylint: disable=line-too-long """The GraphDef version information of this graph. For details on the meaning of each version, see [`GraphDef`](https://www.tensorflow.org/code/tensorflow/core/framework/graph.proto). Returns: A `VersionDef`. """ # pylint: enable=line-too-long with c_api_util.tf_buffer() as buf: pywrap_tf_session.TF_GraphVersions(self._c_graph, buf) data = pywrap_tf_session.TF_GetBuffer(buf) version_def = versions_pb2.VersionDef() version_def.ParseFromString(compat.as_bytes(data)) return version_def @property def seed(self): """The graph-level random seed of this graph.""" return self._seed @seed.setter def seed(self, seed): self._seed = seed @property def finalized(self): """True if this graph has been finalized.""" return self._finalized def finalize(self): """Finalizes this graph, making it read-only. After calling `g.finalize()`, no new operations can be added to `g`. This method is used to ensure that no operations are added to a graph when it is shared between multiple threads, for example when using a `tf.compat.v1.train.QueueRunner`. """ self._finalized = True def _unsafe_unfinalize(self): """Opposite of `finalize`. Internal interface. NOTE: Unfinalizing a graph could have negative impact on performance, especially in a multi-threaded environment. Unfinalizing a graph when it is in use by a Session may lead to undefined behavior. Ensure that all sessions using a graph are closed before calling this method. """ self._finalized = False def _get_control_flow_context(self): """Returns the current control flow context. Returns: A context object. """ return self._control_flow_context def _set_control_flow_context(self, ctx): """Sets the current control flow context. Args: ctx: a context object. """ self._control_flow_context = ctx def _copy_functions_to_graph_def(self, graph_def, starting_bytesize): """If this graph contains functions, copy them to `graph_def`.""" bytesize = starting_bytesize for f in self._functions.values(): bytesize += f.definition.ByteSize() if bytesize >= (1 << 31) or bytesize < 0: raise ValueError("GraphDef cannot be larger than 2GB.") graph_def.library.function.extend([f.definition]) if f.grad_func_name: grad_def = function_pb2.GradientDef() grad_def.function_name = f.name grad_def.gradient_func = f.grad_func_name graph_def.library.gradient.extend([grad_def]) def _as_graph_def(self, from_version=None, add_shapes=False): # pylint: disable=line-too-long """Returns a serialized `GraphDef` representation of this graph. The serialized `GraphDef` can be imported into another `Graph` (using `tf.import_graph_def`) or used with the [C++ Session API](../../../../api_docs/cc/index.md). This method is thread-safe. Args: from_version: Optional. If this is set, returns a `GraphDef` containing only the nodes that were added to this graph since its `version` property had the given value. add_shapes: If true, adds an "_output_shapes" list attr to each node with the inferred shapes of each of its outputs. Returns: A tuple containing a [`GraphDef`](https://www.tensorflow.org/code/tensorflow/core/framework/graph.proto) protocol buffer, and the version of the graph to which that `GraphDef` corresponds. Raises: ValueError: If the `graph_def` would be too large. """ # pylint: enable=line-too-long with self._lock: with c_api_util.tf_buffer() as buf: pywrap_tf_session.TF_GraphToGraphDef(self._c_graph, buf) data = pywrap_tf_session.TF_GetBuffer(buf) graph = graph_pb2.GraphDef() graph.ParseFromString(compat.as_bytes(data)) # Strip the experimental library field iff it's empty. if not graph.library.function: graph.ClearField("library") if add_shapes: for node in graph.node: op = self._nodes_by_name[node.name] if op.outputs: node.attr["_output_shapes"].list.shape.extend( [output.get_shape().as_proto() for output in op.outputs]) for function_def in graph.library.function: defined_function = self._functions[function_def.signature.name] try: func_graph = defined_function.graph except AttributeError: # _DefinedFunction doesn't have a graph, _EagerDefinedFunction # does. Both rely on ops.py, so we can't really isinstance check # them. continue input_shapes = function_def.attr["_input_shapes"] try: func_graph_inputs = func_graph.inputs except AttributeError: continue # TODO(b/141471245): Fix the inconsistency when inputs of func graph # are appended during gradient computation of while/cond. for input_tensor, _ in zip(func_graph_inputs, function_def.signature.input_arg): if input_tensor.dtype == dtypes.resource: # TODO(allenl): Save and restore handle data, then save the # resource placeholder's shape. Right now some shape functions get # confused if we set the shape of the resource placeholder (to a # scalar of course) and there isn't any handle data. input_shapes.list.shape.add().CopyFrom( tensor_shape.TensorShape(None).as_proto()) else: input_shapes.list.shape.add().CopyFrom( input_tensor.get_shape().as_proto()) for node in function_def.node_def: try: op = func_graph.get_operation_by_name(node.name) except KeyError: continue outputs = op.outputs if op.type == "StatefulPartitionedCall": # Filter out any extra outputs (possibly added by function # backpropagation rewriting). num_outputs = len(node.attr["Tout"].list.type) outputs = outputs[:num_outputs] node.attr["_output_shapes"].list.shape.extend( [output.get_shape().as_proto() for output in outputs]) return graph, self._version def as_graph_def(self, from_version=None, add_shapes=False): # pylint: disable=line-too-long """Returns a serialized `GraphDef` representation of this graph. The serialized `GraphDef` can be imported into another `Graph` (using `tf.import_graph_def`) or used with the [C++ Session API](../../api_docs/cc/index.md). This method is thread-safe. Args: from_version: Optional. If this is set, returns a `GraphDef` containing only the nodes that were added to this graph since its `version` property had the given value. add_shapes: If true, adds an "_output_shapes" list attr to each node with the inferred shapes of each of its outputs. Returns: A [`GraphDef`](https://www.tensorflow.org/code/tensorflow/core/framework/graph.proto) protocol buffer. Raises: ValueError: If the `graph_def` would be too large. """ # pylint: enable=line-too-long result, _ = self._as_graph_def(from_version, add_shapes) return result def _is_function(self, name): """Tests whether 'name' is registered in this graph's function library. Args: name: string op name. Returns: bool indicating whether or not 'name' is registered in function library. """ return compat.as_str(name) in self._functions def _get_function(self, name): """Returns the function definition for 'name'. Args: name: string function name. Returns: The function def proto. """ return self._functions.get(compat.as_str(name), None) def _add_function(self, function): """Adds a function to the graph. After the function has been added, you can call to the function by passing the function name in place of an op name to `Graph.create_op()`. Args: function: A `_DefinedFunction` object. Raises: ValueError: if another function is defined with the same name. """ name = function.name # Sanity checks on gradient definition. if (function.grad_func_name is not None) and (function.python_grad_func is not None): raise ValueError("Gradient defined twice for function %s" % name) # Add function to graph # pylint: disable=protected-access gradient = ( function._grad_func._c_func.func if function._grad_func else None) pywrap_tf_session.TF_GraphCopyFunction(self._c_graph, function._c_func.func, gradient) # pylint: enable=protected-access self._functions[compat.as_str(name)] = function # Need a new-enough consumer to support the functions we add to the graph. if self._graph_def_versions.min_consumer < 12: self._graph_def_versions.min_consumer = 12 @property def building_function(self): """Returns True iff this graph represents a function.""" return self._building_function # Helper functions to create operations. @deprecated_args(None, "Shapes are always computed; don't use the compute_shapes " "as it has no effect.", "compute_shapes") def create_op( self, op_type, inputs, dtypes=None, # pylint: disable=redefined-outer-name input_types=None, name=None, attrs=None, op_def=None, compute_shapes=True, compute_device=True): """Creates an `Operation` in this graph. This is a low-level interface for creating an `Operation`. Most programs will not call this method directly, and instead use the Python op constructors, such as `tf.constant()`, which add ops to the default graph. Args: op_type: The `Operation` type to create. This corresponds to the `OpDef.name` field for the proto that defines the operation. inputs: A list of `Tensor` objects that will be inputs to the `Operation`. dtypes: (Optional) A list of `DType` objects that will be the types of the tensors that the operation produces. input_types: (Optional.) A list of `DType`s that will be the types of the tensors that the operation consumes. By default, uses the base `DType` of each input in `inputs`. Operations that expect reference-typed inputs must specify `input_types` explicitly. name: (Optional.) A string name for the operation. If not specified, a name is generated based on `op_type`. attrs: (Optional.) A dictionary where the key is the attribute name (a string) and the value is the respective `attr` attribute of the `NodeDef` proto that will represent the operation (an `AttrValue` proto). op_def: (Optional.) The `OpDef` proto that describes the `op_type` that the operation will have. compute_shapes: (Optional.) Deprecated. Has no effect (shapes are always computed). compute_device: (Optional.) If True, device functions will be executed to compute the device property of the Operation. Raises: TypeError: if any of the inputs is not a `Tensor`. ValueError: if colocation conflicts with existing device assignment. Returns: An `Operation` object. """ del compute_shapes for idx, a in enumerate(inputs): if not isinstance(a, Tensor): raise TypeError("Input #%d is not a tensor: %s" % (idx, a)) return self._create_op_internal(op_type, inputs, dtypes, input_types, name, attrs, op_def, compute_device) def _create_op_internal( self, op_type, inputs, dtypes=None, # pylint: disable=redefined-outer-name input_types=None, name=None, attrs=None, op_def=None, compute_device=True): """Creates an `Operation` in this graph. Implements `Graph.create_op()` without the overhead of the deprecation wrapper. Args: op_type: The `Operation` type to create. This corresponds to the `OpDef.name` field for the proto that defines the operation. inputs: A list of `Tensor` objects that will be inputs to the `Operation`. dtypes: (Optional) A list of `DType` objects that will be the types of the tensors that the operation produces. input_types: (Optional.) A list of `DType`s that will be the types of the tensors that the operation consumes. By default, uses the base `DType` of each input in `inputs`. Operations that expect reference-typed inputs must specify `input_types` explicitly. name: (Optional.) A string name for the operation. If not specified, a name is generated based on `op_type`. attrs: (Optional.) A dictionary where the key is the attribute name (a string) and the value is the respective `attr` attribute of the `NodeDef` proto that will represent the operation (an `AttrValue` proto). op_def: (Optional.) The `OpDef` proto that describes the `op_type` that the operation will have. compute_device: (Optional.) If True, device functions will be executed to compute the device property of the Operation. Raises: ValueError: if colocation conflicts with existing device assignment. Returns: An `Operation` object. """ self._check_not_finalized() if name is None: name = op_type # If a names ends with a '/' it is a "name scope" and we use it as-is, # after removing the trailing '/'. if name and name[-1] == "/": name = name_from_scope_name(name) else: name = self.unique_name(name) node_def = _NodeDef(op_type, name, attrs) input_ops = set(t.op for t in inputs) control_inputs = self._control_dependencies_for_inputs(input_ops) # _create_op_helper mutates the new Operation. `_mutation_lock` ensures a # Session.run call cannot occur between creating and mutating the op. with self._mutation_lock(): ret = Operation( node_def, self, inputs=inputs, output_types=dtypes, control_inputs=control_inputs, input_types=input_types, original_op=self._default_original_op, op_def=op_def) self._create_op_helper(ret, compute_device=compute_device) return ret def _create_op_from_tf_operation(self, c_op, compute_device=True): """Creates an `Operation` in this graph from the supplied TF_Operation. This method is like create_op() except the new Operation is constructed using `c_op`. The returned Operation will have `c_op` as its _c_op field. This is used to create Operation objects around TF_Operations created indirectly by the C API (e.g. by TF_ImportGraphDef, TF_FinishWhile). This function does not call Operation._control_flow_post_processing or Graph._control_dependencies_for_inputs (since the inputs may not be available yet). The caller is responsible for calling these methods. Args: c_op: a wrapped TF_Operation compute_device: (Optional.) If True, device functions will be executed to compute the device property of the Operation. Returns: An `Operation` object. """ self._check_not_finalized() ret = Operation(c_op, self) # If a name_scope was created with ret.name but no nodes were created in it, # the name will still appear in _names_in_use even though the name hasn't # been used. This is ok, just leave _names_in_use as-is in this case. # TODO(skyewm): make the C API guarantee no name conflicts. name_key = ret.name.lower() if name_key not in self._names_in_use: self._names_in_use[name_key] = 1 self._create_op_helper(ret, compute_device=compute_device) return ret def _create_op_helper(self, op, compute_device=True): """Common logic for creating an op in this graph.""" # Apply any additional attributes requested. Do not overwrite any existing # attributes. for key, value in self._attr_scope_map.items(): try: op.get_attr(key) except ValueError: if callable(value): value = value(op.node_def) if not isinstance(value, (type(None), attr_value_pb2.AttrValue)): raise TypeError( "Callable for scope map key '%s' must return either None or " "an AttrValue protocol buffer; but it returned: %s" % (key, value)) if value: op._set_attr(key, value) # pylint: disable=protected-access # Apply a kernel label if one has been specified for this op type. try: kernel_label = self._op_to_kernel_label_map[op.type] op._set_attr("_kernel", # pylint: disable=protected-access attr_value_pb2.AttrValue(s=compat.as_bytes(kernel_label))) except KeyError: pass op._gradient_function = self._gradient_function_map.get(op.type) # pylint: disable=protected-access # Apply the overriding op type for gradients if one has been specified for # this op type. try: mapped_op_type = self._gradient_override_map[op.type] op._set_attr("_gradient_op_type", # pylint: disable=protected-access attr_value_pb2.AttrValue(s=compat.as_bytes(mapped_op_type))) except KeyError: pass self._record_op_seen_by_control_dependencies(op) if compute_device: self._apply_device_functions(op) # Snapshot the colocation stack metadata before we might generate error # messages using it. Note that this snapshot depends on the actual stack # and is independent of the op's _class attribute. # pylint: disable=protected-access op._colocation_code_locations = self._snapshot_colocation_stack_metadata() # pylint: enable=protected-access if self._colocation_stack: all_colocation_groups = [] for colocation_op in self._colocation_stack.peek_objs(): all_colocation_groups.extend(colocation_op.colocation_groups()) if colocation_op.device: # pylint: disable=protected-access op._set_device(colocation_op.device) # pylint: enable=protected-access all_colocation_groups = sorted(set(all_colocation_groups)) # pylint: disable=protected-access op._set_attr( "_class", attr_value_pb2.AttrValue( list=attr_value_pb2.AttrValue.ListValue(s=all_colocation_groups))) # pylint: enable=protected-access # Sets "container" attribute if # (1) self._container is not None # (2) "is_stateful" is set in OpDef # (3) "container" attribute is in OpDef # (4) "container" attribute is None if self._container and op._is_stateful: # pylint: disable=protected-access try: container_attr = op.get_attr("container") except ValueError: # "container" attribute is not in OpDef pass else: if not container_attr: op._set_attr("container", attr_value_pb2.AttrValue( # pylint: disable=protected-access s=compat.as_bytes(self._container))) def _add_new_tf_operations(self, compute_devices=True): """Creates `Operations` in this graph for any new TF_Operations. This is useful for when TF_Operations are indirectly created by the C API outside of the Operation constructor (e.g. by TF_ImportGraphDef, TF_FinishWhile). This ensures there are corresponding Operations for all TF_Operations in the underlying TF_Graph. Args: compute_devices: (Optional.) If True, device functions will be executed to compute the device properties of each new Operation. Returns: A list of the new `Operation` objects. """ # Create all Operation objects before accessing their inputs since an op may # be created before its inputs. new_ops = [ self._create_op_from_tf_operation(c_op, compute_device=compute_devices) for c_op in c_api_util.new_tf_operations(self) ] # pylint: disable=protected-access for op in new_ops: new_control_inputs = self._control_dependencies_for_inputs(op.inputs) op._add_control_inputs(new_control_inputs) op._control_flow_post_processing() # pylint: enable=protected-access return new_ops def as_graph_element(self, obj, allow_tensor=True, allow_operation=True): """Returns the object referred to by `obj`, as an `Operation` or `Tensor`. This function validates that `obj` represents an element of this graph, and gives an informative error message if it is not. This function is the canonical way to get/validate an object of one of the allowed types from an external argument reference in the Session API. This method may be called concurrently from multiple threads. Args: obj: A `Tensor`, an `Operation`, or the name of a tensor or operation. Can also be any object with an `_as_graph_element()` method that returns a value of one of these types. Note: `_as_graph_element` will be called inside the graph's lock and so may not modify the graph. allow_tensor: If true, `obj` may refer to a `Tensor`. allow_operation: If true, `obj` may refer to an `Operation`. Returns: The `Tensor` or `Operation` in the Graph corresponding to `obj`. Raises: TypeError: If `obj` is not a type we support attempting to convert to types. ValueError: If `obj` is of an appropriate type but invalid. For example, an invalid string. KeyError: If `obj` is not an object in the graph. """ if self._finalized: return self._as_graph_element_locked(obj, allow_tensor, allow_operation) with self._lock: return self._as_graph_element_locked(obj, allow_tensor, allow_operation) def _as_graph_element_locked(self, obj, allow_tensor, allow_operation): """See `Graph.as_graph_element()` for details.""" # The vast majority of this function is figuring # out what an API user might be doing wrong, so # that we can give helpful error messages. # # Ideally, it would be nice to split it up, but we # need context to generate nice error messages. if allow_tensor and allow_operation: types_str = "Tensor or Operation" elif allow_tensor: types_str = "Tensor" elif allow_operation: types_str = "Operation" else: raise ValueError("allow_tensor and allow_operation can't both be False.") temp_obj = _as_graph_element(obj) if temp_obj is not None: obj = temp_obj # If obj appears to be a name... if isinstance(obj, compat.bytes_or_text_types): name = compat.as_str(obj) if ":" in name and allow_tensor: # Looks like a Tensor name and can be a Tensor. try: op_name, out_n = name.split(":") out_n = int(out_n) except: raise ValueError("The name %s looks a like a Tensor name, but is " "not a valid one. Tensor names must be of the " "form \"<op_name>:<output_index>\"." % repr(name)) if op_name in self._nodes_by_name: op = self._nodes_by_name[op_name] else: raise KeyError("The name %s refers to a Tensor which does not " "exist. The operation, %s, does not exist in the " "graph." % (repr(name), repr(op_name))) try: return op.outputs[out_n] except: raise KeyError("The name %s refers to a Tensor which does not " "exist. The operation, %s, exists but only has " "%s outputs." % (repr(name), repr(op_name), len(op.outputs))) elif ":" in name and not allow_tensor: # Looks like a Tensor name but can't be a Tensor. raise ValueError("Name %s appears to refer to a Tensor, not a %s." % (repr(name), types_str)) elif ":" not in name and allow_operation: # Looks like an Operation name and can be an Operation. if name not in self._nodes_by_name: raise KeyError("The name %s refers to an Operation not in the " "graph." % repr(name)) return self._nodes_by_name[name] elif ":" not in name and not allow_operation: # Looks like an Operation name but can't be an Operation. if name in self._nodes_by_name: # Yep, it's an Operation name err_msg = ("The name %s refers to an Operation, not a %s." % (repr(name), types_str)) else: err_msg = ("The name %s looks like an (invalid) Operation name, " "not a %s." % (repr(name), types_str)) err_msg += (" Tensor names must be of the form " "\"<op_name>:<output_index>\".") raise ValueError(err_msg) elif isinstance(obj, Tensor) and allow_tensor: # Actually obj is just the object it's referring to. if obj.graph is not self: raise ValueError("Tensor %s is not an element of this graph." % obj) return obj elif isinstance(obj, Operation) and allow_operation: # Actually obj is just the object it's referring to. if obj.graph is not self: raise ValueError("Operation %s is not an element of this graph." % obj) return obj else: # We give up! raise TypeError("Can not convert a %s into a %s." % (type(obj).__name__, types_str)) def get_operations(self): """Return the list of operations in the graph. You can modify the operations in place, but modifications to the list such as inserts/delete have no effect on the list of operations known to the graph. This method may be called concurrently from multiple threads. Returns: A list of Operations. """ if self._finalized: return list(self._nodes_by_id.values()) with self._lock: return list(self._nodes_by_id.values()) def get_operation_by_name(self, name): """Returns the `Operation` with the given `name`. This method may be called concurrently from multiple threads. Args: name: The name of the `Operation` to return. Returns: The `Operation` with the given `name`. Raises: TypeError: If `name` is not a string. KeyError: If `name` does not correspond to an operation in this graph. """ if not isinstance(name, six.string_types): raise TypeError("Operation names are strings (or similar), not %s." % type(name).__name__) return self.as_graph_element(name, allow_tensor=False, allow_operation=True) def _get_operation_by_name_unsafe(self, name): """Returns the `Operation` with the given `name`. This is a internal unsafe version of get_operation_by_name. It skips many checks and does not have user friendly error messages but runs considerably faster. This method may be called concurrently from multiple threads. Args: name: The name of the `Operation` to return. Returns: The `Operation` with the given `name`. Raises: KeyError: If `name` does not correspond to an operation in this graph. """ if self._finalized: return self._nodes_by_name[name] with self._lock: return self._nodes_by_name[name] def _get_operation_by_tf_operation(self, tf_oper): op_name = pywrap_tf_session.TF_OperationName(tf_oper) return self._get_operation_by_name_unsafe(op_name) def get_tensor_by_name(self, name): """Returns the `Tensor` with the given `name`. This method may be called concurrently from multiple threads. Args: name: The name of the `Tensor` to return. Returns: The `Tensor` with the given `name`. Raises: TypeError: If `name` is not a string. KeyError: If `name` does not correspond to a tensor in this graph. """ # Names should be strings. if not isinstance(name, six.string_types): raise TypeError("Tensor names are strings (or similar), not %s." % type(name).__name__) return self.as_graph_element(name, allow_tensor=True, allow_operation=False) def _get_tensor_by_tf_output(self, tf_output): """Returns the `Tensor` representing `tf_output`. Note that there is only one such `Tensor`, i.e. multiple calls to this function with the same TF_Output value will always return the same `Tensor` object. Args: tf_output: A wrapped `TF_Output` (the C API equivalent of `Tensor`). Returns: The `Tensor` that represents `tf_output`. """ op = self._get_operation_by_tf_operation(tf_output.oper) return op.outputs[tf_output.index] @property def _last_id(self): return self._next_id_counter def _get_op_def(self, type): # pylint: disable=redefined-builtin """Returns the `OpDef` proto for `type`. `type` is a string.""" # NOTE: No locking is required because the lookup and insertion operations # on Python dictionaries are atomic. try: return self._op_def_cache[type] except KeyError: with c_api_util.tf_buffer() as buf: # pylint: disable=protected-access pywrap_tf_session.TF_GraphGetOpDef(self._c_graph, compat.as_bytes(type), buf) # pylint: enable=protected-access data = pywrap_tf_session.TF_GetBuffer(buf) op_def = op_def_pb2.OpDef() op_def.ParseFromString(compat.as_bytes(data)) self._op_def_cache[type] = op_def return op_def def as_default(self): """Returns a context manager that makes this `Graph` the default graph. This method should be used if you want to create multiple graphs in the same process. For convenience, a global default graph is provided, and all ops will be added to this graph if you do not create a new graph explicitly. Use this method with the `with` keyword to specify that ops created within the scope of a block should be added to this graph. In this case, once the scope of the `with` is exited, the previous default graph is set again as default. There is a stack, so it's ok to have multiple nested levels of `as_default` calls. The default graph is a property of the current thread. If you create a new thread, and wish to use the default graph in that thread, you must explicitly add a `with g.as_default():` in that thread's function. The following code examples are equivalent: ```python # 1. Using Graph.as_default(): g = tf.Graph() with g.as_default(): c = tf.constant(5.0) assert c.graph is g # 2. Constructing and making default: with tf.Graph().as_default() as g: c = tf.constant(5.0) assert c.graph is g ``` If eager execution is enabled ops created under this context manager will be added to the graph instead of executed eagerly. Returns: A context manager for using this graph as the default graph. """ return _default_graph_stack.get_controller(self) @property def collections(self): """Returns the names of the collections known to this graph.""" return list(self._collections) def add_to_collection(self, name, value): """Stores `value` in the collection with the given `name`. Note that collections are not sets, so it is possible to add a value to a collection several times. Args: name: The key for the collection. The `GraphKeys` class contains many standard names for collections. value: The value to add to the collection. """ # pylint: disable=g-doc-exception self._check_not_finalized() with self._lock: if name not in self._collections: self._collections[name] = [value] else: self._collections[name].append(value) def add_to_collections(self, names, value): """Stores `value` in the collections given by `names`. Note that collections are not sets, so it is possible to add a value to a collection several times. This function makes sure that duplicates in `names` are ignored, but it will not check for pre-existing membership of `value` in any of the collections in `names`. `names` can be any iterable, but if `names` is a string, it is treated as a single collection name. Args: names: The keys for the collections to add to. The `GraphKeys` class contains many standard names for collections. value: The value to add to the collections. """ # Make sure names are unique, but treat strings as a single collection name names = (names,) if isinstance(names, six.string_types) else set(names) for name in names: self.add_to_collection(name, value) def get_collection_ref(self, name): """Returns a list of values in the collection with the given `name`. If the collection exists, this returns the list itself, which can be modified in place to change the collection. If the collection does not exist, it is created as an empty list and the list is returned. This is different from `get_collection()` which always returns a copy of the collection list if it exists and never creates an empty collection. Args: name: The key for the collection. For example, the `GraphKeys` class contains many standard names for collections. Returns: The list of values in the collection with the given `name`, or an empty list if no value has been added to that collection. """ # pylint: disable=g-doc-exception with self._lock: coll_list = self._collections.get(name, None) if coll_list is None: coll_list = [] self._collections[name] = coll_list return coll_list def get_collection(self, name, scope=None): """Returns a list of values in the collection with the given `name`. This is different from `get_collection_ref()` which always returns the actual collection list if it exists in that it returns a new list each time it is called. Args: name: The key for the collection. For example, the `GraphKeys` class contains many standard names for collections. scope: (Optional.) A string. If supplied, the resulting list is filtered to include only items whose `name` attribute matches `scope` using `re.match`. Items without a `name` attribute are never returned if a scope is supplied. The choice of `re.match` means that a `scope` without special tokens filters by prefix. Returns: The list of values in the collection with the given `name`, or an empty list if no value has been added to that collection. The list contains the values in the order under which they were collected. """ # pylint: disable=g-doc-exception with self._lock: collection = self._collections.get(name, None) if collection is None: return [] if scope is None: return list(collection) else: c = [] regex = re.compile(scope) for item in collection: try: if regex.match(item.name): c.append(item) except AttributeError: # Collection items with no name are ignored. pass return c def get_all_collection_keys(self): """Returns a list of collections used in this graph.""" with self._lock: return [x for x in self._collections if isinstance(x, six.string_types)] def clear_collection(self, name): """Clears all values in a collection. Args: name: The key for the collection. The `GraphKeys` class contains many standard names for collections. """ self._check_not_finalized() with self._lock: if name in self._collections: del self._collections[name] @tf_contextlib.contextmanager def _original_op(self, op): """Python 'with' handler to help annotate ops with their originator. An op may have an 'original_op' property that indicates the op on which it was based. For example a replica op is based on the op that was replicated and a gradient op is based on the op that was differentiated. All ops created in the scope of this 'with' handler will have the given 'op' as their original op. Args: op: The Operation that all ops created in this scope will have as their original op. Yields: Nothing. """ old_original_op = self._default_original_op self._default_original_op = op try: yield finally: self._default_original_op = old_original_op @property def _name_stack(self): # This may be called from a thread where name_stack doesn't yet exist. if not hasattr(self._thread_local, "_name_stack"): self._thread_local._name_stack = "" return self._thread_local._name_stack @_name_stack.setter def _name_stack(self, name_stack): self._thread_local._name_stack = name_stack # pylint: disable=g-doc-return-or-yield,line-too-long @tf_contextlib.contextmanager def name_scope(self, name): """Returns a context manager that creates hierarchical names for operations. A graph maintains a stack of name scopes. A `with name_scope(...):` statement pushes a new name onto the stack for the lifetime of the context. The `name` argument will be interpreted as follows: * A string (not ending with '/') will create a new name scope, in which `name` is appended to the prefix of all operations created in the context. If `name` has been used before, it will be made unique by calling `self.unique_name(name)`. * A scope previously captured from a `with g.name_scope(...) as scope:` statement will be treated as an "absolute" name scope, which makes it possible to re-enter existing scopes. * A value of `None` or the empty string will reset the current name scope to the top-level (empty) name scope. For example: ```python with tf.Graph().as_default() as g: c = tf.constant(5.0, name="c") assert c.op.name == "c" c_1 = tf.constant(6.0, name="c") assert c_1.op.name == "c_1" # Creates a scope called "nested" with g.name_scope("nested") as scope: nested_c = tf.constant(10.0, name="c") assert nested_c.op.name == "nested/c" # Creates a nested scope called "inner". with g.name_scope("inner"): nested_inner_c = tf.constant(20.0, name="c") assert nested_inner_c.op.name == "nested/inner/c" # Create a nested scope called "inner_1". with g.name_scope("inner"): nested_inner_1_c = tf.constant(30.0, name="c") assert nested_inner_1_c.op.name == "nested/inner_1/c" # Treats `scope` as an absolute name scope, and # switches to the "nested/" scope. with g.name_scope(scope): nested_d = tf.constant(40.0, name="d") assert nested_d.op.name == "nested/d" with g.name_scope(""): e = tf.constant(50.0, name="e") assert e.op.name == "e" ``` The name of the scope itself can be captured by `with g.name_scope(...) as scope:`, which stores the name of the scope in the variable `scope`. This value can be used to name an operation that represents the overall result of executing the ops in a scope. For example: ```python inputs = tf.constant(...) with g.name_scope('my_layer') as scope: weights = tf.Variable(..., name="weights") biases = tf.Variable(..., name="biases") affine = tf.matmul(inputs, weights) + biases output = tf.nn.relu(affine, name=scope) ``` NOTE: This constructor validates the given `name`. Valid scope names match one of the following regular expressions: [A-Za-z0-9.][A-Za-z0-9_.\\-/]* (for scopes at the root) [A-Za-z0-9_.\\-/]* (for other scopes) Args: name: A name for the scope. Returns: A context manager that installs `name` as a new name scope. Raises: ValueError: If `name` is not a valid scope name, according to the rules above. """ if name: if isinstance(name, compat.bytes_or_text_types): name = compat.as_str(name) if self._name_stack: # Scopes created in a nested scope may have initial characters # that are illegal as the initial character of an op name # (viz. '-', '\', '/', and '_'). if not _VALID_SCOPE_NAME_REGEX.match(name): raise ValueError("'%s' is not a valid scope name" % name) else: # Scopes created in the root must match the more restrictive # op name regex, which constrains the initial character. if not _VALID_OP_NAME_REGEX.match(name): raise ValueError("'%s' is not a valid scope name" % name) old_stack = self._name_stack if not name: # Both for name=None and name="" we re-set to empty scope. new_stack = None elif name[-1] == "/": new_stack = name_from_scope_name(name) else: new_stack = self.unique_name(name) self._name_stack = new_stack try: yield "" if new_stack is None else new_stack + "/" finally: self._name_stack = old_stack # pylint: enable=g-doc-return-or-yield,line-too-long def unique_name(self, name, mark_as_used=True): """Return a unique operation name for `name`. Note: You rarely need to call `unique_name()` directly. Most of the time you just need to create `with g.name_scope()` blocks to generate structured names. `unique_name` is used to generate structured names, separated by `"/"`, to help identify operations when debugging a graph. Operation names are displayed in error messages reported by the TensorFlow runtime, and in various visualization tools such as TensorBoard. If `mark_as_used` is set to `True`, which is the default, a new unique name is created and marked as in use. If it's set to `False`, the unique name is returned without actually being marked as used. This is useful when the caller simply wants to know what the name to be created will be. Args: name: The name for an operation. mark_as_used: Whether to mark this name as being used. Returns: A string to be passed to `create_op()` that will be used to name the operation being created. """ if self._name_stack: name = self._name_stack + "/" + name # For the sake of checking for names in use, we treat names as case # insensitive (e.g. foo = Foo). name_key = name.lower() i = self._names_in_use.get(name_key, 0) # Increment the number for "name_key". if mark_as_used: self._names_in_use[name_key] = i + 1 if i > 0: base_name_key = name_key # Make sure the composed name key is not already used. while name_key in self._names_in_use: name_key = "%s_%d" % (base_name_key, i) i += 1 # Mark the composed name_key as used in case someone wants # to call unique_name("name_1"). if mark_as_used: self._names_in_use[name_key] = 1 # Return the new name with the original capitalization of the given name. name = "%s_%d" % (name, i - 1) return name def get_name_scope(self): """Returns the current name scope. For example: ```python with tf.name_scope('scope1'): with tf.name_scope('scope2'): print(tf.compat.v1.get_default_graph().get_name_scope()) ``` would print the string `scope1/scope2`. Returns: A string representing the current name scope. """ return self._name_stack @tf_contextlib.contextmanager def _colocate_with_for_gradient(self, op, gradient_uid, ignore_existing=False): with self.colocate_with(op, ignore_existing): if gradient_uid is not None and self._control_flow_context is not None: self._control_flow_context.EnterGradientColocation(op, gradient_uid) try: yield finally: self._control_flow_context.ExitGradientColocation(op, gradient_uid) else: yield @tf_contextlib.contextmanager def colocate_with(self, op, ignore_existing=False): """Returns a context manager that specifies an op to colocate with. Note: this function is not for public use, only for internal libraries. For example: ```python a = tf.Variable([1.0]) with g.colocate_with(a): b = tf.constant(1.0) c = tf.add(a, b) ``` `b` and `c` will always be colocated with `a`, no matter where `a` is eventually placed. **NOTE** Using a colocation scope resets any existing device constraints. If `op` is `None` then `ignore_existing` must be `True` and the new scope resets all colocation and device constraints. Args: op: The op to colocate all created ops with, or `None`. ignore_existing: If true, only applies colocation of this op within the context, rather than applying all colocation properties on the stack. If `op` is `None`, this value must be `True`. Raises: ValueError: if op is None but ignore_existing is False. Yields: A context manager that specifies the op with which to colocate newly created ops. """ if op is None and not ignore_existing: raise ValueError("Trying to reset colocation (op is None) but " "ignore_existing is not True") op = _op_to_colocate_with(op, self) # By default, colocate_with resets the device function stack, # since colocate_with is typically used in specific internal # library functions where colocation is intended to be "stronger" # than device functions. # # In the future, a caller may specify that device_functions win # over colocation, in which case we can add support. device_fn_tmp = self._device_function_stack self._device_function_stack = traceable_stack.TraceableStack() if ignore_existing: current_stack = self._colocation_stack self._colocation_stack = traceable_stack.TraceableStack() if op is not None: # offset refers to the stack frame used for storing code location. # We use 4, the sum of 1 to use our caller's stack frame and 3 # to jump over layers of context managers above us. self._colocation_stack.push_obj(op, offset=4) try: yield finally: # Restore device function stack self._device_function_stack = device_fn_tmp if op is not None: self._colocation_stack.pop_obj() # Reset the colocation stack if requested. if ignore_existing: self._colocation_stack = current_stack def _add_device_to_stack(self, device_name_or_function, offset=0): """Add device to stack manually, separate from a context manager.""" total_offset = 1 + offset spec = _UserDeviceSpec(device_name_or_function) self._device_function_stack.push_obj(spec, offset=total_offset) return spec @tf_contextlib.contextmanager def device(self, device_name_or_function): # pylint: disable=line-too-long """Returns a context manager that specifies the default device to use. The `device_name_or_function` argument may either be a device name string, a device function, or None: * If it is a device name string, all operations constructed in this context will be assigned to the device with that name, unless overridden by a nested `device()` context. * If it is a function, it will be treated as a function from Operation objects to device name strings, and invoked each time a new Operation is created. The Operation will be assigned to the device with the returned name. * If it is None, all `device()` invocations from the enclosing context will be ignored. For information about the valid syntax of device name strings, see the documentation in [`DeviceNameUtils`](https://www.tensorflow.org/code/tensorflow/core/util/device_name_utils.h). For example: ```python with g.device('/device:GPU:0'): # All operations constructed in this context will be placed # on GPU 0. with g.device(None): # All operations constructed in this context will have no # assigned device. # Defines a function from `Operation` to device string. def matmul_on_gpu(n): if n.type == "MatMul": return "/device:GPU:0" else: return "/cpu:0" with g.device(matmul_on_gpu): # All operations of type "MatMul" constructed in this context # will be placed on GPU 0; all other operations will be placed # on CPU 0. ``` **N.B.** The device scope may be overridden by op wrappers or other library code. For example, a variable assignment op `v.assign()` must be colocated with the `tf.Variable` `v`, and incompatible device scopes will be ignored. Args: device_name_or_function: The device name or function to use in the context. Yields: A context manager that specifies the default device to use for newly created ops. Raises: RuntimeError: If device scopes are not properly nested. """ self._add_device_to_stack(device_name_or_function, offset=2) old_top_of_stack = self._device_function_stack.peek_top_obj() try: yield finally: new_top_of_stack = self._device_function_stack.peek_top_obj() if old_top_of_stack is not new_top_of_stack: raise RuntimeError("Exiting device scope without proper scope nesting.") self._device_function_stack.pop_obj() def _apply_device_functions(self, op): """Applies the current device function stack to the given operation.""" # Apply any device functions in LIFO order, so that the most recently # pushed function has the first chance to apply a device to the op. # We apply here because the result can depend on the Operation's # signature, which is computed in the Operation constructor. # pylint: disable=protected-access prior_device_string = None for device_spec in self._device_function_stack.peek_objs(): if device_spec.is_null_merge: continue if device_spec.function is None: break device_string = device_spec.string_merge(op) # Take advantage of the fact that None is a singleton and Python interns # strings, since identity checks are faster than equality checks. if device_string is not prior_device_string: op._set_device_from_string(device_string) prior_device_string = device_string op._device_code_locations = self._snapshot_device_function_stack_metadata() # pylint: enable=protected-access # pylint: disable=g-doc-return-or-yield @tf_contextlib.contextmanager def container(self, container_name): """Returns a context manager that specifies the resource container to use. Stateful operations, such as variables and queues, can maintain their states on devices so that they can be shared by multiple processes. A resource container is a string name under which these stateful operations are tracked. These resources can be released or cleared with `tf.Session.reset()`. For example: ```python with g.container('experiment0'): # All stateful Operations constructed in this context will be placed # in resource container "experiment0". v1 = tf.Variable([1.0]) v2 = tf.Variable([2.0]) with g.container("experiment1"): # All stateful Operations constructed in this context will be # placed in resource container "experiment1". v3 = tf.Variable([3.0]) q1 = tf.queue.FIFOQueue(10, tf.float32) # All stateful Operations constructed in this context will be # be created in the "experiment0". v4 = tf.Variable([4.0]) q1 = tf.queue.FIFOQueue(20, tf.float32) with g.container(""): # All stateful Operations constructed in this context will be # be placed in the default resource container. v5 = tf.Variable([5.0]) q3 = tf.queue.FIFOQueue(30, tf.float32) # Resets container "experiment0", after which the state of v1, v2, v4, q1 # will become undefined (such as uninitialized). tf.Session.reset(target, ["experiment0"]) ``` Args: container_name: container name string. Returns: A context manager for defining resource containers for stateful ops, yields the container name. """ original_container = self._container self._container = container_name try: yield self._container finally: self._container = original_container # pylint: enable=g-doc-return-or-yield class _ControlDependenciesController(object): """Context manager for `control_dependencies()`.""" def __init__(self, graph, control_inputs): """Create a new `_ControlDependenciesController`. A `_ControlDependenciesController` is the context manager for `with tf.control_dependencies()` blocks. These normally nest, as described in the documentation for `control_dependencies()`. The `control_inputs` argument list control dependencies that must be added to the current set of control dependencies. Because of uniquification the set can be empty even if the caller passed a list of ops. The special value `None` indicates that we want to start a new empty set of control dependencies instead of extending the current set. In that case we also clear the current control flow context, which is an additional mechanism to add control dependencies. Args: graph: The graph that this controller is managing. control_inputs: List of ops to use as control inputs in addition to the current control dependencies. None to indicate that the dependencies should be cleared. """ self._graph = graph if control_inputs is None: self._control_inputs_val = [] self._new_stack = True else: self._control_inputs_val = control_inputs self._new_stack = False self._seen_nodes = set() self._old_stack = None self._old_control_flow_context = None # pylint: disable=protected-access def __enter__(self): if self._new_stack: # Clear the control_dependencies graph. self._old_stack = self._graph._control_dependencies_stack self._graph._control_dependencies_stack = [] # Clear the control_flow_context too. self._old_control_flow_context = self._graph._get_control_flow_context() self._graph._set_control_flow_context(None) self._graph._push_control_dependencies_controller(self) def __exit__(self, unused_type, unused_value, unused_traceback): self._graph._pop_control_dependencies_controller(self) if self._new_stack: self._graph._control_dependencies_stack = self._old_stack self._graph._set_control_flow_context(self._old_control_flow_context) # pylint: enable=protected-access @property def control_inputs(self): return self._control_inputs_val def add_op(self, op): if isinstance(op, Tensor): op = op.ref() self._seen_nodes.add(op) def op_in_group(self, op): if isinstance(op, Tensor): op = op.ref() return op in self._seen_nodes def _push_control_dependencies_controller(self, controller): self._control_dependencies_stack.append(controller) def _pop_control_dependencies_controller(self, controller): assert self._control_dependencies_stack[-1] is controller self._control_dependencies_stack.pop() def _current_control_dependencies(self): ret = set() for controller in self._control_dependencies_stack: for op in controller.control_inputs: ret.add(op) return ret def _control_dependencies_for_inputs(self, input_ops): """For an op that takes `input_ops` as inputs, compute control inputs. The returned control dependencies should yield an execution that is equivalent to adding all control inputs in self._control_dependencies_stack to a newly created op. However, this function attempts to prune the returned control dependencies by observing that nodes created within the same `with control_dependencies(...):` block may have data dependencies that make the explicit approach redundant. Args: input_ops: The data input ops for an op to be created. Returns: A list of control inputs for the op to be created. """ ret = [] for controller in self._control_dependencies_stack: # If any of the input_ops already depends on the inputs from controller, # we say that the new op is dominated (by that input), and we therefore # do not need to add control dependencies for this controller's inputs. dominated = False for op in input_ops: if controller.op_in_group(op): dominated = True break if not dominated: # Don't add a control input if we already have a data dependency on i. # NOTE(mrry): We do not currently track transitive data dependencies, # so we may add redundant control inputs. ret.extend(c for c in controller.control_inputs if c not in input_ops) return ret def _record_op_seen_by_control_dependencies(self, op): """Record that the given op depends on all registered control dependencies. Args: op: An Operation. """ for controller in self._control_dependencies_stack: controller.add_op(op) def control_dependencies(self, control_inputs): """Returns a context manager that specifies control dependencies. Use with the `with` keyword to specify that all operations constructed within the context should have control dependencies on `control_inputs`. For example: ```python with g.control_dependencies([a, b, c]): # `d` and `e` will only run after `a`, `b`, and `c` have executed. d = ... e = ... ``` Multiple calls to `control_dependencies()` can be nested, and in that case a new `Operation` will have control dependencies on the union of `control_inputs` from all active contexts. ```python with g.control_dependencies([a, b]): # Ops constructed here run after `a` and `b`. with g.control_dependencies([c, d]): # Ops constructed here run after `a`, `b`, `c`, and `d`. ``` You can pass None to clear the control dependencies: ```python with g.control_dependencies([a, b]): # Ops constructed here run after `a` and `b`. with g.control_dependencies(None): # Ops constructed here run normally, not waiting for either `a` or `b`. with g.control_dependencies([c, d]): # Ops constructed here run after `c` and `d`, also not waiting # for either `a` or `b`. ``` *N.B.* The control dependencies context applies *only* to ops that are constructed within the context. Merely using an op or tensor in the context does not add a control dependency. The following example illustrates this point: ```python # WRONG def my_func(pred, tensor): t = tf.matmul(tensor, tensor) with tf.control_dependencies([pred]): # The matmul op is created outside the context, so no control # dependency will be added. return t # RIGHT def my_func(pred, tensor): with tf.control_dependencies([pred]): # The matmul op is created in the context, so a control dependency # will be added. return tf.matmul(tensor, tensor) ``` Also note that though execution of ops created under this scope will trigger execution of the dependencies, the ops created under this scope might still be pruned from a normal tensorflow graph. For example, in the following snippet of code the dependencies are never executed: ```python loss = model.loss() with tf.control_dependencies(dependencies): loss = loss + tf.constant(1) # note: dependencies ignored in the # backward pass return tf.gradients(loss, model.variables) ``` This is because evaluating the gradient graph does not require evaluating the constant(1) op created in the forward pass. Args: control_inputs: A list of `Operation` or `Tensor` objects which must be executed or computed before running the operations defined in the context. Can also be `None` to clear the control dependencies. Returns: A context manager that specifies control dependencies for all operations constructed within the context. Raises: TypeError: If `control_inputs` is not a list of `Operation` or `Tensor` objects. """ if control_inputs is None: return self._ControlDependenciesController(self, None) # First convert the inputs to ops, and deduplicate them. # NOTE(mrry): Other than deduplication, we do not currently track direct # or indirect dependencies between control_inputs, which may result in # redundant control inputs. control_ops = [] current = self._current_control_dependencies() for c in control_inputs: # The hasattr(handle) is designed to match ResourceVariables. This is so # control dependencies on a variable or on an unread variable don't # trigger reads. if (isinstance(c, IndexedSlices) or (hasattr(c, "_handle") and hasattr(c, "op"))): c = c.op c = self.as_graph_element(c) if isinstance(c, Tensor): c = c.op elif not isinstance(c, Operation): raise TypeError("Control input must be Operation or Tensor: %s" % c) if c not in current: control_ops.append(c) current.add(c) return self._ControlDependenciesController(self, control_ops) # pylint: disable=g-doc-return-or-yield @tf_contextlib.contextmanager def _attr_scope(self, attr_map): """EXPERIMENTAL: A context manager for setting attributes on operators. This context manager can be used to add additional attributes to operators within the scope of the context. For example: with ops.Graph().as_default() as g: f_1 = Foo() # No extra attributes with g._attr_scope({"_a": tf.attr_value_pb2.AttrValue(b=False)}): f_2 = Foo() # Additional attribute _a=False with g._attr_scope({"_a": tf.attr_value_pb2.AttrValue(b=True)}): f_3 = Foo() # Additional attribute _a=False with g._attr_scope({"_a": None}): f_4 = Foo() # No additional attributes. Args: attr_map: A dictionary mapping attr name strings to AttrValue protocol buffers or None. Returns: A context manager that sets the kernel label to be used for one or more ops created in that context. Raises: TypeError: If attr_map is not a dictionary mapping strings to AttrValue protobufs. """ if not isinstance(attr_map, dict): raise TypeError("attr_map must be a dictionary mapping " "strings to AttrValue protocol buffers") # The saved_attrs dictionary stores any currently-set labels that # will be overridden by this context manager. saved_attrs = {} # Install the given attribute for name, attr in attr_map.items(): if not (isinstance(name, six.string_types) and (isinstance(attr, (type(None), attr_value_pb2.AttrValue)) or callable(attr))): raise TypeError("attr_map must be a dictionary mapping " "strings to AttrValue protocol buffers or " "callables that emit AttrValue protocol buffers") try: saved_attrs[name] = self._attr_scope_map[name] except KeyError: pass if attr is None: del self._attr_scope_map[name] else: self._attr_scope_map[name] = attr try: yield # The code within the context runs here. finally: # Remove the attributes set for this context, and restore any saved # attributes. for name, attr in attr_map.items(): try: self._attr_scope_map[name] = saved_attrs[name] except KeyError: del self._attr_scope_map[name] # pylint: enable=g-doc-return-or-yield # pylint: disable=g-doc-return-or-yield @tf_contextlib.contextmanager def _kernel_label_map(self, op_to_kernel_label_map): """EXPERIMENTAL: A context manager for setting kernel labels. This context manager can be used to select particular implementations of kernels within the scope of the context. For example: with ops.Graph().as_default() as g: f_1 = Foo() # Uses the default registered kernel for the Foo op. with g.kernel_label_map({"Foo": "v_2"}): f_2 = Foo() # Uses the registered kernel with label "v_2" # for the Foo op. with g.kernel_label_map({"Foo": "v_3"}): f_3 = Foo() # Uses the registered kernel with label "v_3" # for the Foo op. with g.kernel_label_map({"Foo": ""}): f_4 = Foo() # Uses the default registered kernel # for the Foo op. Args: op_to_kernel_label_map: A dictionary mapping op type strings to kernel label strings. Returns: A context manager that sets the kernel label to be used for one or more ops created in that context. Raises: TypeError: If op_to_kernel_label_map is not a dictionary mapping strings to strings. """ if not isinstance(op_to_kernel_label_map, dict): raise TypeError("op_to_kernel_label_map must be a dictionary mapping " "strings to strings") # The saved_labels dictionary stores any currently-set labels that # will be overridden by this context manager. saved_labels = {} # Install the given label for op_type, label in op_to_kernel_label_map.items(): if not (isinstance(op_type, six.string_types) and isinstance(label, six.string_types)): raise TypeError("op_to_kernel_label_map must be a dictionary mapping " "strings to strings") try: saved_labels[op_type] = self._op_to_kernel_label_map[op_type] except KeyError: pass self._op_to_kernel_label_map[op_type] = label try: yield # The code within the context runs here. finally: # Remove the labels set for this context, and restore any saved labels. for op_type, label in op_to_kernel_label_map.items(): try: self._op_to_kernel_label_map[op_type] = saved_labels[op_type] except KeyError: del self._op_to_kernel_label_map[op_type] # pylint: enable=g-doc-return-or-yield @tf_contextlib.contextmanager def _override_gradient_function(self, gradient_function_map): """Specify gradient function for the given op type.""" # This is an internal API and we don't need nested context for this. assert not self._gradient_function_map self._gradient_function_map = gradient_function_map yield self._gradient_function_map = {} # pylint: disable=g-doc-return-or-yield @tf_contextlib.contextmanager def gradient_override_map(self, op_type_map): """EXPERIMENTAL: A context manager for overriding gradient functions. This context manager can be used to override the gradient function that will be used for ops within the scope of the context. For example: ```python @tf.RegisterGradient("CustomSquare") def _custom_square_grad(op, grad): # ... with tf.Graph().as_default() as g: c = tf.constant(5.0) s_1 = tf.square(c) # Uses the default gradient for tf.square. with g.gradient_override_map({"Square": "CustomSquare"}): s_2 = tf.square(s_2) # Uses _custom_square_grad to compute the # gradient of s_2. ``` Args: op_type_map: A dictionary mapping op type strings to alternative op type strings. Returns: A context manager that sets the alternative op type to be used for one or more ops created in that context. Raises: TypeError: If `op_type_map` is not a dictionary mapping strings to strings. """ if not isinstance(op_type_map, dict): raise TypeError("op_type_map must be a dictionary mapping " "strings to strings") # The saved_mappings dictionary stores any currently-set mappings that # will be overridden by this context manager. saved_mappings = {} # Install the given label for op_type, mapped_op_type in op_type_map.items(): if not (isinstance(op_type, six.string_types) and isinstance(mapped_op_type, six.string_types)): raise TypeError("op_type_map must be a dictionary mapping " "strings to strings") try: saved_mappings[op_type] = self._gradient_override_map[op_type] except KeyError: pass self._gradient_override_map[op_type] = mapped_op_type try: yield # The code within the context runs here. finally: # Remove the labels set for this context, and restore any saved labels. for op_type, mapped_op_type in op_type_map.items(): try: self._gradient_override_map[op_type] = saved_mappings[op_type] except KeyError: del self._gradient_override_map[op_type] # pylint: enable=g-doc-return-or-yield def prevent_feeding(self, tensor): """Marks the given `tensor` as unfeedable in this graph.""" self._unfeedable_tensors.add(tensor) def is_feedable(self, tensor): """Returns `True` if and only if `tensor` is feedable.""" return tensor not in self._unfeedable_tensors def prevent_fetching(self, op): """Marks the given `op` as unfetchable in this graph.""" self._unfetchable_ops.add(op) def is_fetchable(self, tensor_or_op): """Returns `True` if and only if `tensor_or_op` is fetchable.""" if isinstance(tensor_or_op, Tensor): return tensor_or_op.op not in self._unfetchable_ops else: return tensor_or_op not in self._unfetchable_ops def switch_to_thread_local(self): """Make device, colocation and dependencies stacks thread-local. Device, colocation and dependencies stacks are not thread-local be default. If multiple threads access them, then the state is shared. This means that one thread may affect the behavior of another thread. After this method is called, the stacks become thread-local. If multiple threads access them, then the state is not shared. Each thread uses its own value; a thread doesn't affect other threads by mutating such a stack. The initial value for every thread's stack is set to the current value of the stack when `switch_to_thread_local()` was first called. """ if not self._stack_state_is_thread_local: self._stack_state_is_thread_local = True @property def _device_function_stack(self): if self._stack_state_is_thread_local: # This may be called from a thread where device_function_stack doesn't yet # exist. # pylint: disable=protected-access if not hasattr(self._thread_local, "_device_function_stack"): stack_copy_for_this_thread = self._graph_device_function_stack.copy() self._thread_local._device_function_stack = stack_copy_for_this_thread return self._thread_local._device_function_stack # pylint: enable=protected-access else: return self._graph_device_function_stack @property def _device_functions_outer_to_inner(self): user_device_specs = self._device_function_stack.peek_objs() device_functions = [spec.function for spec in user_device_specs] device_functions_outer_to_inner = list(reversed(device_functions)) return device_functions_outer_to_inner def _snapshot_device_function_stack_metadata(self): """Return device function stack as a list of TraceableObjects. Returns: [traceable_stack.TraceableObject, ...] where each TraceableObject's .obj member is a displayable name for the user's argument to Graph.device, and the filename and lineno members point to the code location where Graph.device was called directly or indirectly by the user. """ snapshot = [] for obj in self._device_function_stack.peek_traceable_objs(): obj_copy = obj.copy_metadata() obj_copy.obj = obj.obj.display_name snapshot.append(obj_copy) return snapshot @_device_function_stack.setter def _device_function_stack(self, device_function_stack): if self._stack_state_is_thread_local: # pylint: disable=protected-access self._thread_local._device_function_stack = device_function_stack # pylint: enable=protected-access else: self._graph_device_function_stack = device_function_stack @property def _colocation_stack(self): """Return thread-local copy of colocation stack.""" if self._stack_state_is_thread_local: # This may be called from a thread where colocation_stack doesn't yet # exist. # pylint: disable=protected-access if not hasattr(self._thread_local, "_colocation_stack"): stack_copy_for_this_thread = self._graph_colocation_stack.copy() self._thread_local._colocation_stack = stack_copy_for_this_thread return self._thread_local._colocation_stack # pylint: enable=protected-access else: return self._graph_colocation_stack def _snapshot_colocation_stack_metadata(self): """Return colocation stack metadata as a dictionary.""" return { traceable_obj.obj.name: traceable_obj.copy_metadata() for traceable_obj in self._colocation_stack.peek_traceable_objs() } @_colocation_stack.setter def _colocation_stack(self, colocation_stack): if self._stack_state_is_thread_local: # pylint: disable=protected-access self._thread_local._colocation_stack = colocation_stack # pylint: enable=protected-access else: self._graph_colocation_stack = colocation_stack @property def _control_dependencies_stack(self): if self._stack_state_is_thread_local: # This may be called from a thread where control_dependencies_stack # doesn't yet exist. if not hasattr(self._thread_local, "_control_dependencies_stack"): self._thread_local._control_dependencies_stack = ( self._graph_control_dependencies_stack[:]) return self._thread_local._control_dependencies_stack else: return self._graph_control_dependencies_stack @_control_dependencies_stack.setter def _control_dependencies_stack(self, control_dependencies): if self._stack_state_is_thread_local: self._thread_local._control_dependencies_stack = control_dependencies else: self._graph_control_dependencies_stack = control_dependencies @property def _distribution_strategy_stack(self): """A stack to maintain distribution strategy context for each thread.""" if not hasattr(self._thread_local, "_distribution_strategy_stack"): self._thread_local._distribution_strategy_stack = [] # pylint: disable=protected-access return self._thread_local._distribution_strategy_stack # pylint: disable=protected-access @_distribution_strategy_stack.setter def _distribution_strategy_stack(self, _distribution_strategy_stack): self._thread_local._distribution_strategy_stack = ( # pylint: disable=protected-access _distribution_strategy_stack) @property def _global_distribute_strategy_scope(self): """For implementing `tf.distribute.set_strategy()`.""" if not hasattr(self._thread_local, "distribute_strategy_scope"): self._thread_local.distribute_strategy_scope = None return self._thread_local.distribute_strategy_scope @_global_distribute_strategy_scope.setter def _global_distribute_strategy_scope(self, distribute_strategy_scope): self._thread_local.distribute_strategy_scope = (distribute_strategy_scope) @property def _auto_cast_variable_read_dtype(self): """The dtype that instances of `AutoCastVariable` will be casted to. This is None if `AutoCastVariables` should not be casted. See `AutoCastVariable` for more information. Returns: The dtype that instances of `AutoCastVariable` will be casted to. """ if not hasattr(self._thread_local, "_auto_cast_variable_read_dtype"): self._thread_local._auto_cast_variable_read_dtype = None # pylint: disable=protected-access return self._thread_local._auto_cast_variable_read_dtype # pylint: disable=protected-access @_auto_cast_variable_read_dtype.setter def _auto_cast_variable_read_dtype(self, dtype): if dtype: dtype = dtypes.as_dtype(dtype) self._thread_local._auto_cast_variable_read_dtype = dtype # pylint: disable=protected-access @tf_contextlib.contextmanager def _enable_auto_casting_variables(self, dtype): """Context manager to automatically cast AutoCastVariables. If an AutoCastVariable `var` is used under this context manager, it will be casted to `dtype` before being used. See `AutoCastVariable` for more information. Args: dtype: The dtype that AutoCastVariables should be casted to. Yields: Nothing. """ prev_read_dtype = self._auto_cast_variable_read_dtype try: self._auto_cast_variable_read_dtype = dtype yield finally: self._auto_cast_variable_read_dtype = prev_read_dtype def _mutation_lock(self): """Returns a lock to guard code that creates & mutates ops. See the comment for self._group_lock for more info. """ return self._group_lock.group(_MUTATION_LOCK_GROUP) def _session_run_lock(self): """Returns a lock to guard code for Session.run. See the comment for self._group_lock for more info. """ return self._group_lock.group(_SESSION_RUN_LOCK_GROUP) # TODO(agarwal): currently device directives in an outer eager scope will not # apply to inner graph mode code. Fix that. @tf_export(v1=["device"]) def device(device_name_or_function): """Wrapper for `Graph.device()` using the default graph. See `tf.Graph.device` for more details. Args: device_name_or_function: The device name or function to use in the context. Returns: A context manager that specifies the default device to use for newly created ops. Raises: RuntimeError: If eager execution is enabled and a function is passed in. """ if context.executing_eagerly(): if callable(device_name_or_function): raise RuntimeError( "tf.device does not support functions when eager execution " "is enabled.") return context.device(device_name_or_function) elif executing_eagerly_outside_functions(): @tf_contextlib.contextmanager def combined(device_name_or_function): with get_default_graph().device(device_name_or_function): if not callable(device_name_or_function): with context.device(device_name_or_function): yield else: yield return combined(device_name_or_function) else: return get_default_graph().device(device_name_or_function) @tf_export("device", v1=[]) def device_v2(device_name): """Specifies the device for ops created/executed in this context. This function specifies the device to be used for ops created/executed in a particular context. Nested contexts will inherit and also create/execute their ops on the specified device. If a specific device is not required, consider not using this function so that a device can be automatically assigned. In general the use of this function is optional. `device_name` can be fully specified, as in "/job:worker/task:1/device:cpu:0", or partially specified, containing only a subset of the "/"-separated fields. Any fields which are specified will override device annotations from outer scopes. For example: ```python with tf.device('/job:foo'): # ops created here have devices with /job:foo with tf.device('/job:bar/task:0/device:gpu:2'): # ops created here have the fully specified device above with tf.device('/device:gpu:1'): # ops created here have the device '/job:foo/device:gpu:1' ``` Args: device_name: The device name to use in the context. Returns: A context manager that specifies the default device to use for newly created ops. Raises: RuntimeError: If a function is passed in. """ if callable(device_name): raise RuntimeError("tf.device does not support functions.") return device(device_name) @tf_export(v1=["container"]) def container(container_name): """Wrapper for `Graph.container()` using the default graph. Args: container_name: The container string to use in the context. Returns: A context manager that specifies the default container to use for newly created stateful ops. """ return get_default_graph().container(container_name) def _colocate_with_for_gradient(op, gradient_uid, ignore_existing=False): if context.executing_eagerly(): if op is not None: if not hasattr(op, "device"): op = internal_convert_to_tensor_or_indexed_slices(op) return device(op.device) else: return NullContextmanager() else: default_graph = get_default_graph() if isinstance(op, EagerTensor): if default_graph.building_function: return default_graph.device(op.device) else: raise ValueError("Encountered an Eager-defined Tensor during graph " "construction, but a function was not being built.") return default_graph._colocate_with_for_gradient( op, gradient_uid=gradient_uid, ignore_existing=ignore_existing) # Internal interface to colocate_with. colocate_with has been deprecated from # public API. There are still a few internal uses of colocate_with. Add internal # only API for those uses to avoid deprecation warning. def colocate_with(op, ignore_existing=False): return _colocate_with_for_gradient(op, None, ignore_existing=ignore_existing) @deprecation.deprecated( date=None, instructions="Colocations handled automatically by placer.") @tf_export(v1=["colocate_with"]) def _colocate_with(op, ignore_existing=False): return colocate_with(op, ignore_existing) @tf_export("control_dependencies") def control_dependencies(control_inputs): """Wrapper for `Graph.control_dependencies()` using the default graph. See `tf.Graph.control_dependencies` for more details. When eager execution is enabled, any callable object in the `control_inputs` list will be called. Args: control_inputs: A list of `Operation` or `Tensor` objects which must be executed or computed before running the operations defined in the context. Can also be `None` to clear the control dependencies. If eager execution is enabled, any callable object in the `control_inputs` list will be called. Returns: A context manager that specifies control dependencies for all operations constructed within the context. """ if context.executing_eagerly(): if control_inputs: # Execute any pending callables. for control in control_inputs: if callable(control): control() return NullContextmanager() else: return get_default_graph().control_dependencies(control_inputs) class _DefaultStack(threading.local): """A thread-local stack of objects for providing implicit defaults.""" def __init__(self): super(_DefaultStack, self).__init__() self._enforce_nesting = True self.stack = [] def get_default(self): return self.stack[-1] if len(self.stack) >= 1 else None def reset(self): self.stack = [] def is_cleared(self): return not self.stack @property def enforce_nesting(self): return self._enforce_nesting @enforce_nesting.setter def enforce_nesting(self, value): self._enforce_nesting = value @tf_contextlib.contextmanager def get_controller(self, default): """A context manager for manipulating a default stack.""" self.stack.append(default) try: yield default finally: # stack may be empty if reset() was called if self.stack: if self._enforce_nesting: if self.stack[-1] is not default: raise AssertionError( "Nesting violated for default stack of %s objects" % type(default)) self.stack.pop() else: self.stack.remove(default) _default_session_stack = _DefaultStack() # pylint: disable=protected-access def default_session(session): """Python "with" handler for defining a default session. This function provides a means of registering a session for handling Tensor.eval() and Operation.run() calls. It is primarily intended for use by session.Session, but can be used with any object that implements the Session.run() interface. Use with the "with" keyword to specify that Tensor.eval() and Operation.run() invocations within the scope of a block should be executed by a particular session. The default session applies to the current thread only, so it is always possible to inspect the call stack and determine the scope of a default session. If you create a new thread, and wish to use the default session in that thread, you must explicitly add a "with ops.default_session(sess):" block in that thread's function. Example: The following code examples are equivalent: # 1. Using the Session object directly: sess = ... c = tf.constant(5.0) sess.run(c) # 2. Using default_session(): sess = ... with ops.default_session(sess): c = tf.constant(5.0) result = c.eval() # 3. Overriding default_session(): sess = ... with ops.default_session(sess): c = tf.constant(5.0) with ops.default_session(...): c.eval(session=sess) Args: session: The session to be installed as the default session. Returns: A context manager for the default session. """ return _default_session_stack.get_controller(session) @tf_export(v1=["get_default_session"]) def get_default_session(): """Returns the default session for the current thread. The returned `Session` will be the innermost session on which a `Session` or `Session.as_default()` context has been entered. NOTE: The default session is a property of the current thread. If you create a new thread, and wish to use the default session in that thread, you must explicitly add a `with sess.as_default():` in that thread's function. Returns: The default `Session` being used in the current thread. """ return _default_session_stack.get_default() def _eval_using_default_session(tensors, feed_dict, graph, session=None): """Uses the default session to evaluate one or more tensors. Args: tensors: A single Tensor, or a list of Tensor objects. feed_dict: A dictionary that maps Tensor objects (or tensor names) to lists, numpy ndarrays, TensorProtos, or strings. graph: The graph in which the tensors are defined. session: (Optional) A different session to use to evaluate "tensors". Returns: Either a single numpy ndarray if "tensors" is a single tensor; or a list of numpy ndarrays that each correspond to the respective element in "tensors". Raises: ValueError: If no default session is available; the default session does not have "graph" as its graph; or if "session" is specified, and it does not have "graph" as its graph. """ if session is None: session = get_default_session() if session is None: raise ValueError("Cannot evaluate tensor using `eval()`: No default " "session is registered. Use `with " "sess.as_default()` or pass an explicit session to " "`eval(session=sess)`") if session.graph is not graph: raise ValueError("Cannot use the default session to evaluate tensor: " "the tensor's graph is different from the session's " "graph. Pass an explicit session to " "`eval(session=sess)`.") else: if session.graph is not graph: raise ValueError("Cannot use the given session to evaluate tensor: " "the tensor's graph is different from the session's " "graph.") return session.run(tensors, feed_dict) def _run_using_default_session(operation, feed_dict, graph, session=None): """Uses the default session to run "operation". Args: operation: The Operation to be run. feed_dict: A dictionary that maps Tensor objects (or tensor names) to lists, numpy ndarrays, TensorProtos, or strings. graph: The graph in which "operation" is defined. session: (Optional) A different session to use to run "operation". Raises: ValueError: If no default session is available; the default session does not have "graph" as its graph; or if "session" is specified, and it does not have "graph" as its graph. """ if session is None: session = get_default_session() if session is None: raise ValueError("Cannot execute operation using `run()`: No default " "session is registered. Use `with " "sess.as_default():` or pass an explicit session to " "`run(session=sess)`") if session.graph is not graph: raise ValueError("Cannot use the default session to execute operation: " "the operation's graph is different from the " "session's graph. Pass an explicit session to " "run(session=sess).") else: if session.graph is not graph: raise ValueError("Cannot use the given session to execute operation: " "the operation's graph is different from the session's " "graph.") session.run(operation, feed_dict) class _DefaultGraphStack(_DefaultStack): # pylint: disable=protected-access """A thread-local stack of objects for providing an implicit default graph.""" def __init__(self): super(_DefaultGraphStack, self).__init__() self._global_default_graph = None def get_default(self): """Override that returns a global default if the stack is empty.""" ret = super(_DefaultGraphStack, self).get_default() if ret is None: ret = self._GetGlobalDefaultGraph() return ret def _GetGlobalDefaultGraph(self): if self._global_default_graph is None: # TODO(mrry): Perhaps log that the default graph is being used, or set # provide some other feedback to prevent confusion when a mixture of # the global default graph and an explicit graph are combined in the # same process. self._global_default_graph = Graph() return self._global_default_graph def reset(self): super(_DefaultGraphStack, self).reset() self._global_default_graph = None @tf_contextlib.contextmanager def get_controller(self, default): context.context().context_switches.push(default.building_function, default.as_default, default._device_function_stack) try: with super(_DefaultGraphStack, self).get_controller(default) as g, context.graph_mode(): yield g finally: # If an exception is raised here it may be hiding a related exception in # the try-block (just above). context.context().context_switches.pop() _default_graph_stack = _DefaultGraphStack() # Shared helper used in init_scope and executing_eagerly_outside_functions # to obtain the outermost context that is not building a function, and the # innermost non empty device stack. def _get_outer_context_and_inner_device_stack(): """Get the outermost context not building a function.""" default_graph = get_default_graph() outer_context = None innermost_nonempty_device_stack = default_graph._device_function_stack # pylint: disable=protected-access if not _default_graph_stack.stack: # If the default graph stack is empty, then we cannot be building a # function. Install the global graph (which, in this case, is also the # default graph) as the outer context. if default_graph.building_function: raise RuntimeError("The global graph is building a function.") outer_context = default_graph.as_default else: # Find a context that is not building a function. for stack_entry in reversed(context.context().context_switches.stack): if not innermost_nonempty_device_stack: innermost_nonempty_device_stack = stack_entry.device_stack if not stack_entry.is_building_function: outer_context = stack_entry.enter_context_fn break if outer_context is None: # As a last resort, obtain the global default graph; this graph doesn't # necessarily live on the graph stack (and hence it doesn't necessarily # live on the context stack), but it is stored in the graph stack's # encapsulating object. outer_context = _default_graph_stack._GetGlobalDefaultGraph().as_default # pylint: disable=protected-access if outer_context is None: # Sanity check; this shouldn't be triggered. raise RuntimeError("All graphs are building functions, and no " "eager context was previously active.") return outer_context, innermost_nonempty_device_stack # pylint: disable=g-doc-return-or-yield,line-too-long @tf_export("init_scope") @tf_contextlib.contextmanager def init_scope(): """A context manager that lifts ops out of control-flow scopes and function-building graphs. There is often a need to lift variable initialization ops out of control-flow scopes, function-building graphs, and gradient tapes. Entering an `init_scope` is a mechanism for satisfying these desiderata. In particular, entering an `init_scope` has three effects: (1) All control dependencies are cleared the moment the scope is entered; this is equivalent to entering the context manager returned from `control_dependencies(None)`, which has the side-effect of exiting control-flow scopes like `tf.cond` and `tf.while_loop`. (2) All operations that are created while the scope is active are lifted into the lowest context on the `context_stack` that is not building a graph function. Here, a context is defined as either a graph or an eager context. Every context switch, i.e., every installation of a graph as the default graph and every switch into eager mode, is logged in a thread-local stack called `context_switches`; the log entry for a context switch is popped from the stack when the context is exited. Entering an `init_scope` is equivalent to crawling up `context_switches`, finding the first context that is not building a graph function, and entering it. A caveat is that if graph mode is enabled but the default graph stack is empty, then entering an `init_scope` will simply install a fresh graph as the default one. (3) The gradient tape is paused while the scope is active. When eager execution is enabled, code inside an init_scope block runs with eager execution enabled even when tracing a `tf.function`. For example: ```python tf.compat.v1.enable_eager_execution() @tf.function def func(): # A function constructs TensorFlow graphs, # it does not execute eagerly. assert not tf.executing_eagerly() with tf.init_scope(): # Initialization runs with eager execution enabled assert tf.executing_eagerly() ``` Raises: RuntimeError: if graph state is incompatible with this initialization. """ # pylint: enable=g-doc-return-or-yield,line-too-long if context.executing_eagerly(): # Fastpath. with tape.stop_recording(): yield else: # Retrieve the active name scope: entering an `init_scope` preserves # the name scope of the current context. scope = get_default_graph().get_name_scope() if scope and scope[-1] != "/": # Names that end with trailing slashes are treated by `name_scope` as # absolute. scope = scope + "/" outer_context, innermost_nonempty_device_stack = ( _get_outer_context_and_inner_device_stack()) outer_graph = None outer_device_stack = None try: with outer_context(), name_scope( scope, skip_on_eager=False), control_dependencies( None), tape.stop_recording(): context_manager = NullContextmanager context_manager_input = None if not context.executing_eagerly(): # The device stack is preserved when lifting into a graph. Eager # execution doesn't implement device stacks and in particular it # doesn't support device functions, so in general it's not possible # to do the same when lifting into the eager context. outer_graph = get_default_graph() outer_device_stack = outer_graph._device_function_stack # pylint: disable=protected-access outer_graph._device_function_stack = innermost_nonempty_device_stack # pylint: disable=protected-access elif innermost_nonempty_device_stack is not None: for device_spec in innermost_nonempty_device_stack.peek_objs(): if device_spec.function is None: break if device_spec.raw_string: context_manager = context.device context_manager_input = device_spec.raw_string break # It is currently not possible to have a device function in V2, # but in V1 we are unable to apply device functions in eager mode. # This means that we will silently skip some of the entries on the # device stack in V1 + eager mode. with context_manager(context_manager_input): yield finally: # If an exception is raised here it may be hiding a related exception in # try-block (just above). if outer_graph is not None: outer_graph._device_function_stack = outer_device_stack # pylint: disable=protected-access @tf_export(v1=["executing_eagerly_outside_functions"]) def executing_eagerly_outside_functions(): """Returns True if executing eagerly, even if inside a graph function. This function will check the outermost context for the program and see if it is in eager mode. It is useful comparing to `tf.executing_eagerly()`, which checks the current context and will return `False` within a `tf.function` body. It can be used to build library that behave differently in eager runtime and v1 session runtime (deprecated). Example: >>> tf.compat.v1.enable_eager_execution() >>> @tf.function ... def func(): ... # A function constructs TensorFlow graphs, it does not execute eagerly, ... # but the outer most context is still eager. ... assert not tf.executing_eagerly() ... return tf.compat.v1.executing_eagerly_outside_functions() >>> func() <tf.Tensor: shape=(), dtype=bool, numpy=True> Returns: boolean, whether the outermost context is in eager mode. """ if context.executing_eagerly(): return True else: outer_context, _ = _get_outer_context_and_inner_device_stack() with outer_context(): return context.executing_eagerly() def inside_function(): return get_default_graph().building_function @tf_export(v1=["enable_eager_execution"]) def enable_eager_execution(config=None, device_policy=None, execution_mode=None): """Enables eager execution for the lifetime of this program. Eager execution provides an imperative interface to TensorFlow. With eager execution enabled, TensorFlow functions execute operations immediately (as opposed to adding to a graph to be executed later in a `tf.compat.v1.Session`) and return concrete values (as opposed to symbolic references to a node in a computational graph). For example: ```python tf.compat.v1.enable_eager_execution() # After eager execution is enabled, operations are executed as they are # defined and Tensor objects hold concrete values, which can be accessed as # numpy.ndarray`s through the numpy() method. assert tf.multiply(6, 7).numpy() == 42 ``` Eager execution cannot be enabled after TensorFlow APIs have been used to create or execute graphs. It is typically recommended to invoke this function at program startup and not in a library (as most libraries should be usable both with and without eager execution). Args: config: (Optional.) A `tf.compat.v1.ConfigProto` to use to configure the environment in which operations are executed. Note that `tf.compat.v1.ConfigProto` is also used to configure graph execution (via `tf.compat.v1.Session`) and many options within `tf.compat.v1.ConfigProto` are not implemented (or are irrelevant) when eager execution is enabled. device_policy: (Optional.) Policy controlling how operations requiring inputs on a specific device (e.g., a GPU 0) handle inputs on a different device (e.g. GPU 1 or CPU). When set to None, an appropriate value will be picked automatically. The value picked may change between TensorFlow releases. Valid values: - tf.contrib.eager.DEVICE_PLACEMENT_EXPLICIT: raises an error if the placement is not correct. - tf.contrib.eager.DEVICE_PLACEMENT_WARN: copies the tensors which are not on the right device but logs a warning. - tf.contrib.eager.DEVICE_PLACEMENT_SILENT: silently copies the tensors. Note that this may hide performance problems as there is no notification provided when operations are blocked on the tensor being copied between devices. - tf.contrib.eager.DEVICE_PLACEMENT_SILENT_FOR_INT32: silently copies int32 tensors, raising errors on the other ones. execution_mode: (Optional.) Policy controlling how operations dispatched are actually executed. When set to None, an appropriate value will be picked automatically. The value picked may change between TensorFlow releases. Valid values: - tf.contrib.eager.SYNC: executes each operation synchronously. - tf.contrib.eager.ASYNC: executes each operation asynchronously. These operations may return "non-ready" handles. Raises: ValueError: If eager execution is enabled after creating/executing a TensorFlow graph, or if options provided conflict with a previous call to this function. """ _api_usage_gauge.get_cell().set(True) if context.default_execution_mode != context.EAGER_MODE: return enable_eager_execution_internal( config=config, device_policy=device_policy, execution_mode=execution_mode, server_def=None) @tf_export(v1=["disable_eager_execution"]) def disable_eager_execution(): """Disables eager execution. This function can only be called before any Graphs, Ops, or Tensors have been created. It can be used at the beginning of the program for complex migration projects from TensorFlow 1.x to 2.x. """ _api_usage_gauge.get_cell().set(False) context.default_execution_mode = context.GRAPH_MODE c = context.context_safe() if c is not None: c._thread_local_data.is_eager = False # pylint: disable=protected-access def enable_eager_execution_internal(config=None, device_policy=None, execution_mode=None, server_def=None): """Enables eager execution for the lifetime of this program. Most of the doc string for enable_eager_execution is relevant here as well. Args: config: See enable_eager_execution doc string device_policy: See enable_eager_execution doc string execution_mode: See enable_eager_execution doc string server_def: (Optional.) A tensorflow::ServerDef proto. Enables execution on remote devices. GrpcServers need to be started by creating an identical server_def to this, and setting the appropriate task_indexes, so that the servers can communicate. It will then be possible to execute operations on remote devices. Raises: ValueError """ if config is not None and not isinstance(config, config_pb2.ConfigProto): raise TypeError("config must be a tf.ConfigProto, but got %s" % type(config)) if device_policy not in (None, context.DEVICE_PLACEMENT_EXPLICIT, context.DEVICE_PLACEMENT_WARN, context.DEVICE_PLACEMENT_SILENT, context.DEVICE_PLACEMENT_SILENT_FOR_INT32): raise ValueError( "device_policy must be one of None, tf.contrib.eager.DEVICE_PLACEMENT_*" ) if execution_mode not in (None, context.SYNC, context.ASYNC): raise ValueError( "execution_mode must be one of None, tf.contrib.eager.SYNC, " "tf.contrib.eager.ASYNC") if context.default_execution_mode == context.GRAPH_MODE: graph_mode_has_been_used = ( _default_graph_stack._global_default_graph is not None) # pylint: disable=protected-access if graph_mode_has_been_used: raise ValueError( "tf.enable_eager_execution must be called at program startup.") context.default_execution_mode = context.EAGER_MODE # pylint: disable=protected-access with context._context_lock: if context._context is None: context._set_context_locked(context.Context( config=config, device_policy=device_policy, execution_mode=execution_mode, server_def=server_def)) elif ((config is not None and config is not context._context._config) or (device_policy is not None and device_policy is not context._context._device_policy) or (execution_mode is not None and execution_mode is not context._context._execution_mode)): raise ValueError( "Trying to change the options of an active eager" " execution. Context config: %s, specified config:" " %s. Context device policy: %s, specified device" " policy: %s. Context execution mode: %s, " " specified execution mode %s." % (context._context._config, config, context._context._device_policy, device_policy, context._context._execution_mode, execution_mode)) else: # We already created everything, so update the thread local data. context._context._thread_local_data.is_eager = True # Monkey patch to get rid of an unnecessary conditional since the context is # now initialized. context.context = context.context_safe def eager_run(main=None, argv=None): """Runs the program with an optional main function and argv list. The program will run with eager execution enabled. Example: ```python import tensorflow as tf # Import subject to future changes: from tensorflow.contrib.eager.python import tfe def main(_): u = tf.constant(6.0) v = tf.constant(7.0) print(u * v) if __name__ == "__main__": tfe.run() ``` Args: main: the main function to run. argv: the arguments to pass to it. """ enable_eager_execution() app.run(main, argv) @tf_export(v1=["reset_default_graph"]) def reset_default_graph(): """Clears the default graph stack and resets the global default graph. NOTE: The default graph is a property of the current thread. This function applies only to the current thread. Calling this function while a `tf.compat.v1.Session` or `tf.compat.v1.InteractiveSession` is active will result in undefined behavior. Using any previously created `tf.Operation` or `tf.Tensor` objects after calling this function will result in undefined behavior. Raises: AssertionError: If this function is called within a nested graph. """ if not _default_graph_stack.is_cleared(): raise AssertionError("Do not use tf.reset_default_graph() to clear " "nested graphs. If you need a cleared graph, " "exit the nesting and create a new graph.") _default_graph_stack.reset() @tf_export(v1=["get_default_graph"]) def get_default_graph(): """Returns the default graph for the current thread. The returned graph will be the innermost graph on which a `Graph.as_default()` context has been entered, or a global default graph if none has been explicitly created. NOTE: The default graph is a property of the current thread. If you create a new thread, and wish to use the default graph in that thread, you must explicitly add a `with g.as_default():` in that thread's function. Returns: The default `Graph` being used in the current thread. """ return _default_graph_stack.get_default() def has_default_graph(): """Returns True if there is a default graph.""" return len(_default_graph_stack.stack) >= 1 def get_name_scope(): """Returns the current name scope in the default_graph. For example: ```python with tf.name_scope('scope1'): with tf.name_scope('scope2'): print(tf.get_name_scope()) ``` would print the string `scope1/scope2`. Returns: A string representing the current name scope. """ if context.executing_eagerly(): return context.context().scope_name.rstrip("/") return get_default_graph().get_name_scope() def _assert_same_graph(original_item, item): """Fail if the 2 items are from different graphs. Args: original_item: Original item to check against. item: Item to check. Raises: ValueError: if graphs do not match. """ if original_item.graph is not item.graph: raise ValueError("%s must be from the same graph as %s." % (item, original_item)) def _get_graph_from_inputs(op_input_list, graph=None): """Returns the appropriate graph to use for the given inputs. This library method provides a consistent algorithm for choosing the graph in which an Operation should be constructed: 1. If the default graph is being used to construct a function, we use the default graph. 2. If the "graph" is specified explicitly, we validate that all of the inputs in "op_input_list" are compatible with that graph. 3. Otherwise, we attempt to select a graph from the first Operation- or Tensor-valued input in "op_input_list", and validate that all other such inputs are in the same graph. 4. If the graph was not specified and it could not be inferred from "op_input_list", we attempt to use the default graph. Args: op_input_list: A list of inputs to an operation, which may include `Tensor`, `Operation`, and other objects that may be converted to a graph element. graph: (Optional) The explicit graph to use. Raises: TypeError: If op_input_list is not a list or tuple, or if graph is not a Graph. ValueError: If a graph is explicitly passed and not all inputs are from it, or if the inputs are from multiple graphs, or we could not find a graph and there was no default graph. Returns: The appropriate graph to use for the given inputs. """ current_default_graph = get_default_graph() if current_default_graph.building_function: return current_default_graph op_input_list = tuple(op_input_list) # Handle generators correctly if graph and not isinstance(graph, Graph): raise TypeError("Input graph needs to be a Graph: %s" % graph) # 1. We validate that all of the inputs are from the same graph. This is # either the supplied graph parameter, or the first one selected from one # the graph-element-valued inputs. In the latter case, we hold onto # that input in original_graph_element so we can provide a more # informative error if a mismatch is found. original_graph_element = None for op_input in op_input_list: # Determine if this is a valid graph_element. # TODO(josh11b): Note that we exclude subclasses of Tensor. Need to clean this # up. graph_element = None if (isinstance(op_input, (Operation, _TensorLike)) and ((not isinstance(op_input, Tensor)) or type(op_input) == Tensor)): # pylint: disable=unidiomatic-typecheck graph_element = op_input else: graph_element = _as_graph_element(op_input) if graph_element is not None: if not graph: original_graph_element = graph_element graph = graph_element.graph elif original_graph_element is not None: _assert_same_graph(original_graph_element, graph_element) elif graph_element.graph is not graph: raise ValueError("%s is not from the passed-in graph." % graph_element) # 2. If all else fails, we use the default graph, which is always there. return graph or current_default_graph @tf_export(v1=["GraphKeys"]) class GraphKeys(object): """Standard names to use for graph collections. The standard library uses various well-known names to collect and retrieve values associated with a graph. For example, the `tf.Optimizer` subclasses default to optimizing the variables collected under `tf.GraphKeys.TRAINABLE_VARIABLES` if none is specified, but it is also possible to pass an explicit list of variables. The following standard keys are defined: * `GLOBAL_VARIABLES`: the default collection of `Variable` objects, shared across distributed environment (model variables are subset of these). See `tf.compat.v1.global_variables` for more details. Commonly, all `TRAINABLE_VARIABLES` variables will be in `MODEL_VARIABLES`, and all `MODEL_VARIABLES` variables will be in `GLOBAL_VARIABLES`. * `LOCAL_VARIABLES`: the subset of `Variable` objects that are local to each machine. Usually used for temporarily variables, like counters. Note: use `tf.contrib.framework.local_variable` to add to this collection. * `MODEL_VARIABLES`: the subset of `Variable` objects that are used in the model for inference (feed forward). Note: use `tf.contrib.framework.model_variable` to add to this collection. * `TRAINABLE_VARIABLES`: the subset of `Variable` objects that will be trained by an optimizer. See `tf.compat.v1.trainable_variables` for more details. * `SUMMARIES`: the summary `Tensor` objects that have been created in the graph. See `tf.compat.v1.summary.merge_all` for more details. * `QUEUE_RUNNERS`: the `QueueRunner` objects that are used to produce input for a computation. See `tf.compat.v1.train.start_queue_runners` for more details. * `MOVING_AVERAGE_VARIABLES`: the subset of `Variable` objects that will also keep moving averages. See `tf.compat.v1.moving_average_variables` for more details. * `REGULARIZATION_LOSSES`: regularization losses collected during graph construction. The following standard keys are _defined_, but their collections are **not** automatically populated as many of the others are: * `WEIGHTS` * `BIASES` * `ACTIVATIONS` """ # Key to collect Variable objects that are global (shared across machines). # Default collection for all variables, except local ones. GLOBAL_VARIABLES = "variables" # Key to collect local variables that are local to the machine and are not # saved/restored. LOCAL_VARIABLES = "local_variables" # Key to collect local variables which are used to accumulate interal state # to be used in tf.metrics.*. METRIC_VARIABLES = "metric_variables" # Key to collect model variables defined by layers. MODEL_VARIABLES = "model_variables" # Key to collect Variable objects that will be trained by the # optimizers. TRAINABLE_VARIABLES = "trainable_variables" # Key to collect summaries. SUMMARIES = "summaries" # Key to collect QueueRunners. QUEUE_RUNNERS = "queue_runners" # Key to collect table initializers. TABLE_INITIALIZERS = "table_initializer" # Key to collect asset filepaths. An asset represents an external resource # like a vocabulary file. ASSET_FILEPATHS = "asset_filepaths" # Key to collect Variable objects that keep moving averages. MOVING_AVERAGE_VARIABLES = "moving_average_variables" # Key to collect regularization losses at graph construction. REGULARIZATION_LOSSES = "regularization_losses" # Key to collect concatenated sharded variables. CONCATENATED_VARIABLES = "concatenated_variables" # Key to collect savers. SAVERS = "savers" # Key to collect weights WEIGHTS = "weights" # Key to collect biases BIASES = "biases" # Key to collect activations ACTIVATIONS = "activations" # Key to collect update_ops UPDATE_OPS = "update_ops" # Key to collect losses LOSSES = "losses" # Key to collect BaseSaverBuilder.SaveableObject instances for checkpointing. SAVEABLE_OBJECTS = "saveable_objects" # Key to collect all shared resources used by the graph which need to be # initialized once per cluster. RESOURCES = "resources" # Key to collect all shared resources used in this graph which need to be # initialized once per session. LOCAL_RESOURCES = "local_resources" # Trainable resource-style variables. TRAINABLE_RESOURCE_VARIABLES = "trainable_resource_variables" # Key to indicate various ops. INIT_OP = "init_op" LOCAL_INIT_OP = "local_init_op" READY_OP = "ready_op" READY_FOR_LOCAL_INIT_OP = "ready_for_local_init_op" SUMMARY_OP = "summary_op" GLOBAL_STEP = "global_step" # Used to count the number of evaluations performed during a single evaluation # run. EVAL_STEP = "eval_step" TRAIN_OP = "train_op" # Key for control flow context. COND_CONTEXT = "cond_context" WHILE_CONTEXT = "while_context" # Used to store v2 summary names. _SUMMARY_COLLECTION = "_SUMMARY_V2" # List of all collections that keep track of variables. _VARIABLE_COLLECTIONS = [ GLOBAL_VARIABLES, LOCAL_VARIABLES, METRIC_VARIABLES, MODEL_VARIABLES, TRAINABLE_VARIABLES, MOVING_AVERAGE_VARIABLES, CONCATENATED_VARIABLES, TRAINABLE_RESOURCE_VARIABLES, ] # Key for streaming model ports. # NOTE(yuanbyu): internal and experimental. _STREAMING_MODEL_PORTS = "streaming_model_ports" @decorator_utils.classproperty @deprecation.deprecated(None, "Use `tf.GraphKeys.GLOBAL_VARIABLES` instead.") def VARIABLES(cls): # pylint: disable=no-self-argument return cls.GLOBAL_VARIABLES def dismantle_graph(graph): """Cleans up reference cycles from a `Graph`. Helpful for making sure the garbage collector doesn't need to run after a temporary `Graph` is no longer needed. Args: graph: A `Graph` object to destroy. Neither it nor any of its ops are usable after this function runs. """ memory.dismantle_ordered_dict(graph._functions) # pylint: disable=protected-access # Now clean up Operation<->Graph reference cycles by clearing all of the # attributes for the Graph and its ops. graph_operations = graph.get_operations() for op in graph_operations: op.__dict__ = {} graph.__dict__ = {} @tf_export(v1=["add_to_collection"]) def add_to_collection(name, value): """Wrapper for `Graph.add_to_collection()` using the default graph. See `tf.Graph.add_to_collection` for more details. Args: name: The key for the collection. For example, the `GraphKeys` class contains many standard names for collections. value: The value to add to the collection. @compatibility(eager) Collections are only supported in eager when variables are created inside an EagerVariableStore (e.g. as part of a layer or template). @end_compatibility """ get_default_graph().add_to_collection(name, value) @tf_export(v1=["add_to_collections"]) def add_to_collections(names, value): """Wrapper for `Graph.add_to_collections()` using the default graph. See `tf.Graph.add_to_collections` for more details. Args: names: The key for the collections. The `GraphKeys` class contains many standard names for collections. value: The value to add to the collections. @compatibility(eager) Collections are only supported in eager when variables are created inside an EagerVariableStore (e.g. as part of a layer or template). @end_compatibility """ get_default_graph().add_to_collections(names, value) @tf_export(v1=["get_collection_ref"]) def get_collection_ref(key): """Wrapper for `Graph.get_collection_ref()` using the default graph. See `tf.Graph.get_collection_ref` for more details. Args: key: The key for the collection. For example, the `GraphKeys` class contains many standard names for collections. Returns: The list of values in the collection with the given `name`, or an empty list if no value has been added to that collection. Note that this returns the collection list itself, which can be modified in place to change the collection. @compatibility(eager) Collections are not supported when eager execution is enabled. @end_compatibility """ return get_default_graph().get_collection_ref(key) @tf_export(v1=["get_collection"]) def get_collection(key, scope=None): """Wrapper for `Graph.get_collection()` using the default graph. See `tf.Graph.get_collection` for more details. Args: key: The key for the collection. For example, the `GraphKeys` class contains many standard names for collections. scope: (Optional.) If supplied, the resulting list is filtered to include only items whose `name` attribute matches using `re.match`. Items without a `name` attribute are never returned if a scope is supplied and the choice or `re.match` means that a `scope` without special tokens filters by prefix. Returns: The list of values in the collection with the given `name`, or an empty list if no value has been added to that collection. The list contains the values in the order under which they were collected. @compatibility(eager) Collections are not supported when eager execution is enabled. @end_compatibility """ return get_default_graph().get_collection(key, scope) def get_all_collection_keys(): """Returns a list of collections used in the default graph.""" return get_default_graph().get_all_collection_keys() def name_scope(name, default_name=None, values=None, skip_on_eager=True): """Internal-only entry point for `name_scope*`. Internal ops do not use the public API and instead rely on `ops.name_scope` regardless of the execution mode. This function dispatches to the correct `name_scope*` implementation based on the arguments provided and the current mode. Specifically, * if `values` contains a graph tensor `Graph.name_scope` is used; * `name_scope_v1` is used in graph mode; * `name_scope_v2` -- in eager mode. Args: name: The name argument that is passed to the op function. default_name: The default name to use if the `name` argument is `None`. values: The list of `Tensor` arguments that are passed to the op function. skip_on_eager: Indicates to return NullContextmanager if executing eagerly. By default this is True since naming tensors and operations in eager mode have little use and cause unnecessary performance overhead. However, it is important to preserve variable names since they are often useful for debugging and saved models. Returns: `name_scope*` context manager. """ ctx = context.context() in_eager_mode = ctx.executing_eagerly() if not in_eager_mode: return internal_name_scope_v1(name, default_name, values) if skip_on_eager: return NullContextmanager() name = default_name if name is None else name if values: # The presence of a graph tensor in `values` overrides the context. # TODO(slebedev): this is Keras-specific and should be removed. # pylint: disable=unidiomatic-typecheck graph_value = next((value for value in values if type(value) == Tensor), None) # pylint: enable=unidiomatic-typecheck if graph_value is not None: return graph_value.graph.name_scope(name) return name_scope_v2(name or "") class internal_name_scope_v1(object): # pylint: disable=invalid-name """Graph-only version of `name_scope_v1`.""" @property def name(self): return self._name def __init__(self, name, default_name=None, values=None): """Initialize the context manager. Args: name: The name argument that is passed to the op function. default_name: The default name to use if the `name` argument is `None`. values: The list of `Tensor` arguments that are passed to the op function. Raises: TypeError: if `default_name` is passed in but not a string. """ if not (default_name is None or isinstance(default_name, six.string_types)): raise TypeError( "`default_name` type (%s) is not a string type. You likely meant to " "pass this into the `values` kwarg." % type(default_name)) self._name = default_name if name is None else name self._default_name = default_name self._values = values def __enter__(self): """Start the scope block. Returns: The scope name. Raises: ValueError: if neither `name` nor `default_name` is provided but `values` are. """ if self._name is None and self._values is not None: # We only raise an error if values is not None (provided) because # currently tf.name_scope(None) (values=None then) is sometimes used as # an idiom to reset to top scope. raise ValueError( "At least one of name (%s) and default_name (%s) must be provided." % (self._name, self._default_name)) g = get_default_graph() if self._values and not g.building_function: # Specialize based on the knowledge that `_get_graph_from_inputs()` # ignores `inputs` when building a function. g_from_inputs = _get_graph_from_inputs(self._values) if g_from_inputs is not g: g = g_from_inputs self._g_manager = g.as_default() self._g_manager.__enter__() else: self._g_manager = None else: self._g_manager = None try: self._name_scope = g.name_scope(self._name) return self._name_scope.__enter__() except: if self._g_manager is not None: self._g_manager.__exit__(*sys.exc_info()) raise def __exit__(self, *exc_info): self._name_scope.__exit__(*exc_info) if self._g_manager is not None: self._g_manager.__exit__(*exc_info) # Named like a function for backwards compatibility with the # @tf_contextlib.contextmanager version, which was switched to a class to avoid # some object creation overhead. @tf_export(v1=["name_scope"]) class name_scope_v1(object): # pylint: disable=invalid-name """A context manager for use when defining a Python op. This context manager validates that the given `values` are from the same graph, makes that graph the default graph, and pushes a name scope in that graph (see `tf.Graph.name_scope` for more details on that). For example, to define a new Python op called `my_op`: ```python def my_op(a, b, c, name=None): with tf.name_scope(name, "MyOp", [a, b, c]) as scope: a = tf.convert_to_tensor(a, name="a") b = tf.convert_to_tensor(b, name="b") c = tf.convert_to_tensor(c, name="c") # Define some computation that uses `a`, `b`, and `c`. return foo_op(..., name=scope) ``` """ @property def name(self): return self._name def __init__(self, name, default_name=None, values=None): """Initialize the context manager. Args: name: The name argument that is passed to the op function. default_name: The default name to use if the `name` argument is `None`. values: The list of `Tensor` arguments that are passed to the op function. Raises: TypeError: if `default_name` is passed in but not a string. """ self._name_scope = name_scope( name, default_name, values, skip_on_eager=False) self._name = default_name if name is None else name def __enter__(self): return self._name_scope.__enter__() def __exit__(self, *exc_info): return self._name_scope.__exit__(*exc_info) def enter_eager_name_scope(ctx, name): """Updates the eager context to enter the given name scope.""" old_name = ctx.scope_name if not name: scope_name = "" else: if name.endswith("/"): # A trailing slash breaks out of nested name scopes, indicating a # fully specified scope name, for compatibility with Graph.name_scope. scope_name = name else: scope_name = name + "/" if old_name: scope_name = old_name + scope_name ctx.scope_name = scope_name return scope_name, old_name @tf_export("name_scope", v1=[]) class name_scope_v2(object): """A context manager for use when defining a Python op. This context manager pushes a name scope, which will make the name of all operations added within it have a prefix. For example, to define a new Python op called `my_op`: ```python def my_op(a, b, c, name=None): with tf.name_scope("MyOp") as scope: a = tf.convert_to_tensor(a, name="a") b = tf.convert_to_tensor(b, name="b") c = tf.convert_to_tensor(c, name="c") # Define some computation that uses `a`, `b`, and `c`. return foo_op(..., name=scope) ``` When executed, the Tensors `a`, `b`, `c`, will have names `MyOp/a`, `MyOp/b`, and `MyOp/c`. If the scope name already exists, the name will be made unique by appending `_n`. For example, calling `my_op` the second time will generate `MyOp_1/a`, etc. """ def __init__(self, name): """Initialize the context manager. Args: name: The prefix to use on all names created within the name scope. Raises: ValueError: If name is None, or not a string. """ if name is None or not isinstance(name, six.string_types): raise ValueError("name for name_scope must be a string.") self._name = name self._exit_fns = [] @property def name(self): return self._name def __enter__(self): """Start the scope block. Returns: The scope name. Raises: ValueError: if neither `name` nor `default_name` is provided but `values` are. """ ctx = context.context() if ctx.executing_eagerly(): scope_name, old_scope_name = enter_eager_name_scope(ctx, self._name) self._exit_fns.append( lambda *a: setattr(ctx, "scope_name", old_scope_name)) else: scope = get_default_graph().name_scope(self._name) scope_name = scope.__enter__() self._exit_fns.append(scope.__exit__) return scope_name def __exit__(self, type_arg, value_arg, traceback_arg): exit_fn = self._exit_fns.pop() exit_fn(type_arg, value_arg, traceback_arg) return False # False values do not suppress exceptions def strip_name_scope(name, export_scope): """Removes name scope from a name. Args: name: A `string` name. export_scope: Optional `string`. Name scope to remove. Returns: Name with name scope removed, or the original name if export_scope is None. """ if export_scope: if export_scope[-1] == "/": export_scope = export_scope[:-1] try: # Strips export_scope/, export_scope///, # ^export_scope/, loc:@export_scope/. str_to_replace = r"([\^]|loc:@|^)" + export_scope + r"[\/]+(.*)" return re.sub(str_to_replace, r"\1\2", compat.as_str(name), count=1) except TypeError as e: # If the name is not of a type we can process, simply return it. logging.warning(e) return name else: return name def prepend_name_scope(name, import_scope): """Prepends name scope to a name. Args: name: A `string` name. import_scope: Optional `string`. Name scope to add. Returns: Name with name scope added, or the original name if import_scope is None. """ if import_scope: if import_scope[-1] == "/": import_scope = import_scope[:-1] try: str_to_replace = r"([\^]|loc:@|^)(.*)" return re.sub(str_to_replace, r"\1" + import_scope + r"/\2", compat.as_str(name)) except TypeError as e: # If the name is not of a type we can process, simply return it. logging.warning(e) return name else: return name # pylint: disable=g-doc-return-or-yield # pylint: disable=not-context-manager @tf_export(v1=["op_scope"]) @tf_contextlib.contextmanager def op_scope(values, name, default_name=None): """DEPRECATED. Same as name_scope above, just different argument order.""" logging.warn("tf.op_scope(values, name, default_name) is deprecated," " use tf.name_scope(name, default_name, values)") with name_scope(name, default_name=default_name, values=values) as scope: yield scope _proto_function_registry = registry.Registry("proto functions") def register_proto_function(collection_name, proto_type=None, to_proto=None, from_proto=None): """Registers `to_proto` and `from_proto` functions for collection_name. `to_proto` function converts a Python object to the corresponding protocol buffer, and returns the protocol buffer. `from_proto` function converts protocol buffer into a Python object, and returns the object.. Args: collection_name: Name of the collection. proto_type: Protobuf type, such as `saver_pb2.SaverDef`, `variable_pb2.VariableDef`, `queue_runner_pb2.QueueRunnerDef`.. to_proto: Function that implements Python object to protobuf conversion. from_proto: Function that implements protobuf to Python object conversion. """ if to_proto and not callable(to_proto): raise TypeError("to_proto must be callable.") if from_proto and not callable(from_proto): raise TypeError("from_proto must be callable.") _proto_function_registry.register((proto_type, to_proto, from_proto), collection_name) def get_collection_proto_type(collection_name): """Returns the proto_type for collection_name.""" try: return _proto_function_registry.lookup(collection_name)[0] except LookupError: return None def get_to_proto_function(collection_name): """Returns the to_proto function for collection_name.""" try: return _proto_function_registry.lookup(collection_name)[1] except LookupError: return None def get_from_proto_function(collection_name): """Returns the from_proto function for collection_name.""" try: return _proto_function_registry.lookup(collection_name)[2] except LookupError: return None def _operation_conversion_error(op, dtype=None, name=None, as_ref=False): """Produce a nice error if someone converts an Operation to a Tensor.""" raise TypeError(("Can't convert Operation '%s' to Tensor " "(target dtype=%r, name=%r, as_ref=%r)") % (op.name, dtype, name, as_ref)) def _op_to_colocate_with(v, graph): """Operation object corresponding to v to use for colocation constraints.""" if v is None: return None if isinstance(v, Operation): return v # We always want to colocate with the reference op. # When 'v' is a ResourceVariable, the reference op is the handle creating op. # # What this should be is: # if isinstance(v, ResourceVariable): # return v.handle.op # However, that would require a circular import dependency. # As of October 2018, there were attempts underway to remove # colocation constraints altogether. Assuming that will # happen soon, perhaps this hack to work around the circular # import dependency is acceptable. if hasattr(v, "handle") and isinstance(v.handle, Tensor): if graph.building_function: return graph.capture(v.handle).op else: return v.handle.op return internal_convert_to_tensor_or_indexed_slices(v, as_ref=True).op def _is_keras_symbolic_tensor(x): return hasattr(x, "graph") and getattr(x.graph, "name", None) == "keras_graph" tensor_conversion_registry.register_tensor_conversion_function( Operation, _operation_conversion_error) # These symbols were originally defined in this module; import them for # backwards compatibility until all references have been updated to access # them from the indexed_slices.py module. IndexedSlices = indexed_slices.IndexedSlices IndexedSlicesValue = indexed_slices.IndexedSlicesValue convert_to_tensor_or_indexed_slices = \ indexed_slices.convert_to_tensor_or_indexed_slices convert_n_to_tensor_or_indexed_slices = \ indexed_slices.convert_n_to_tensor_or_indexed_slices internal_convert_to_tensor_or_indexed_slices = \ indexed_slices.internal_convert_to_tensor_or_indexed_slices internal_convert_n_to_tensor_or_indexed_slices = \ indexed_slices.internal_convert_n_to_tensor_or_indexed_slices register_tensor_conversion_function = \ tensor_conversion_registry.register_tensor_conversion_function # Helper functions for op wrapper modules generated by `python_op_gen`. def to_raw_op(f): """Make a given op wrapper function `f` raw. Raw op wrappers can only be called with keyword arguments. Args: f: An op wrapper function to make raw. Returns: Raw `f`. """ # Copy `f` to get a new `__dict__`, otherwise `tf_export` will fail # due to double-registration. f = types.FunctionType(f.__code__, f.__globals__, f.__name__, f.__defaults__, f.__closure__) return kwarg_only(f) def raise_from_not_ok_status(e, name): message = e.message + (" name: " + name if name is not None else "") # pylint: disable=protected-access six.raise_from(core._status_to_exception(e.code, message), None) # pylint: enable=protected-access def add_exit_callback_to_default_func_graph(fn): """Add a callback to run when the default function graph goes out of scope. Usage: ```python @tf.function def fn(x, v): expensive = expensive_object(v) add_exit_callback_to_default_func_graph(lambda: expensive.release()) return g(x, expensive) fn(x=tf.constant(...), v=...) # `expensive` has been released. ``` Args: fn: A callable that takes no arguments and whose output is ignored. To be executed when exiting func graph scope. Raises: RuntimeError: If executed when the current default graph is not a FuncGraph, or not currently executing in function creation mode (e.g., if inside an init_scope). """ default_graph = get_default_graph() if not default_graph._building_function: # pylint: disable=protected-access raise RuntimeError( "Cannot add scope exit callbacks when not building a function. " "Default graph: {}".format(default_graph)) default_graph._add_scope_exit_callback(fn) # pylint: disable=protected-access def _reconstruct_sequence_inputs(op_def, inputs, attrs): """Regroups a flat list of input tensors into scalar and sequence inputs. Args: op_def: The `op_def_pb2.OpDef` (for knowing the input types) inputs: a list of input `Tensor`s to the op. attrs: mapping from attr name to `attr_value_pb2.AttrValue` (these define how long each sequence is) Returns: A list of `Tensor`s (corresponding to scalar inputs) and lists of `Tensor`s (corresponding to sequence inputs). """ grouped_inputs = [] i = 0 for input_arg in op_def.input_arg: if input_arg.number_attr: input_len = attrs[input_arg.number_attr].i is_sequence = True elif input_arg.type_list_attr: input_len = len(attrs[input_arg.type_list_attr].list.type) is_sequence = True else: input_len = 1 is_sequence = False if is_sequence: grouped_inputs.append(inputs[i:i + input_len]) else: grouped_inputs.append(inputs[i]) i += input_len assert i == len(inputs) return grouped_inputs class _TensorIterator(object): """Iterates over the leading dim of a Tensor. Performs no error checks.""" def __init__(self, tensor, dim0): self._tensor = tensor self._index = 0 self._limit = dim0 def __iter__(self): return self def __next__(self): if self._index == self._limit: raise StopIteration result = self._tensor[self._index] self._index += 1 return result next = __next__ # python2.x compatibility.
36.68432
115
0.692594
from __future__ import absolute_import from __future__ import division from __future__ import print_function import collections import re import sys import threading import types import numpy as np import six from six.moves import map from six.moves import xrange from tensorflow.core.framework import attr_value_pb2 from tensorflow.core.framework import function_pb2 from tensorflow.core.framework import graph_pb2 from tensorflow.core.framework import node_def_pb2 from tensorflow.core.framework import op_def_pb2 from tensorflow.core.framework import versions_pb2 from tensorflow.core.protobuf import config_pb2 from tensorflow.python import pywrap_tensorflow from tensorflow.python import pywrap_tfe from tensorflow.python import tf2 from tensorflow.python.client import pywrap_tf_session from tensorflow.python.eager import context from tensorflow.python.eager import core from tensorflow.python.eager import monitoring from tensorflow.python.eager import tape from tensorflow.python.framework import c_api_util from tensorflow.python.framework import composite_tensor from tensorflow.python.framework import device as pydev from tensorflow.python.framework import dtypes from tensorflow.python.framework import errors from tensorflow.python.framework import indexed_slices from tensorflow.python.framework import registry from tensorflow.python.framework import tensor_conversion_registry from tensorflow.python.framework import tensor_like from tensorflow.python.framework import tensor_shape from tensorflow.python.framework import traceable_stack from tensorflow.python.framework import versions from tensorflow.python.ops import control_flow_util from tensorflow.python.platform import app from tensorflow.python.platform import tf_logging as logging from tensorflow.python.util import compat from tensorflow.python.util import decorator_utils from tensorflow.python.util import deprecation from tensorflow.python.util import function_utils from tensorflow.python.util import lock_util from tensorflow.python.util import memory from tensorflow.python.util import object_identity from tensorflow.python.util import tf_contextlib from tensorflow.python.util import tf_stack from tensorflow.python.util.compat import collections_abc from tensorflow.python.util.deprecation import deprecated_args from tensorflow.python.util.lazy_loader import LazyLoader from tensorflow.python.util.tf_export import kwarg_only from tensorflow.python.util.tf_export import tf_export ag_ctx = LazyLoader( "ag_ctx", globals(), "tensorflow.python.autograph.core.ag_ctx") _USE_C_API = True _USE_C_SHAPES = True _api_usage_gauge = monitoring.BoolGauge( "/tensorflow/api/ops_eager_execution", "Whether ops.enable_eager_execution() is called.") _TensorLike = tensor_like._TensorLike _DTYPES_INTERN_TABLE = dtypes._INTERN_TABLE def tensor_id(tensor): return tensor._id class _UserDeviceSpec(object): def __init__(self, device_name_or_function): self._device_name_or_function = device_name_or_function self.display_name = str(self._device_name_or_function) self.function = device_name_or_function self.raw_string = None if isinstance(device_name_or_function, pydev.MergeDevice): self.is_null_merge = device_name_or_function.is_null_merge elif callable(device_name_or_function): self.is_null_merge = False dev_func = self._device_name_or_function func_name = function_utils.get_func_name(dev_func) func_code = function_utils.get_func_code(dev_func) if func_code: fname = func_code.co_filename lineno = func_code.co_firstlineno else: fname = "unknown" lineno = -1 self.display_name = "%s<%s, %d>" % (func_name, fname, lineno) elif device_name_or_function is None: self.is_null_merge = False else: self.raw_string = device_name_or_function self.function = pydev.merge_device(device_name_or_function) self.is_null_merge = self.function.is_null_merge self.fast_string_merge = isinstance(self.function, pydev.MergeDevice) def string_merge(self, node_def): if self.fast_string_merge: return self.function.shortcut_string_merge(node_def) return compat.as_str(_device_string(self.function(node_def))) class NullContextmanager(object): def __init__(self, *args, **kwargs): pass def __enter__(self): pass def __exit__(self, type_arg, value_arg, traceback_arg): return False def _override_helper(clazz_object, operator, func): existing = getattr(clazz_object, operator, None) if existing is not None: if not isinstance(existing, type(object.__lt__)): raise ValueError("operator %s cannot be overwritten again on class %s." % (operator, clazz_object)) if operator not in Tensor.OVERLOADABLE_OPERATORS: raise ValueError("Overriding %s is disallowed" % operator) setattr(clazz_object, operator, func) def _as_graph_element(obj): conv_fn = getattr(obj, "_as_graph_element", None) if conv_fn and callable(conv_fn): return conv_fn() return None _TENSOR_LIKE_TYPES = tuple() def is_dense_tensor_like(t): return isinstance(t, _TENSOR_LIKE_TYPES) def register_dense_tensor_like_type(tensor_type): if not (hasattr(tensor_type, "name") and isinstance(tensor_type.name, property)): raise TypeError("Type %s does not define a `name` property" % tensor_type.__name__) if not (hasattr(tensor_type, "dtype") and isinstance(tensor_type.dtype, property)): raise TypeError("Type %s does not define a `dtype` property" % tensor_type.__name__) if not (hasattr(tensor_type, "shape") and isinstance(tensor_type.shape, property)): raise TypeError("Type %s does not define a `shape` property" % tensor_type.__name__) global _TENSOR_LIKE_TYPES _TENSOR_LIKE_TYPES = tuple(list(_TENSOR_LIKE_TYPES) + [tensor_type]) def uid(): return pywrap_tfe.TFE_Py_UID() def numpy_text(tensor, is_repr=False): if tensor.dtype.is_numpy_compatible: text = repr(tensor._numpy()) if is_repr else str(tensor._numpy()) else: text = "<unprintable>" if "\n" in text: text = "\n" + text return text @tf_export(v1=["enable_tensor_equality"]) def enable_tensor_equality(): Tensor._USE_EQUALITY = True @tf_export(v1=["disable_tensor_equality"]) def disable_tensor_equality(): Tensor._USE_EQUALITY = False @tf_export("Tensor") class Tensor(_TensorLike): OVERLOADABLE_OPERATORS = { "__add__", "__radd__", "__sub__", "__rsub__", "__mul__", "__rmul__", "__div__", "__rdiv__", "__truediv__", "__rtruediv__", "__floordiv__", "__rfloordiv__", "__mod__", "__rmod__", "__lt__", "__le__", "__gt__", "__ge__", "__ne__", "__eq__", "__and__", "__rand__", "__or__", "__ror__", "__xor__", "__rxor__", "__getitem__", "__pow__", "__rpow__", "__invert__", "__neg__", "__abs__", "__matmul__", "__rmatmul__" } _USE_EQUALITY = tf2.enabled() def __init__(self, op, value_index, dtype): if not isinstance(op, Operation): raise TypeError("op needs to be an Operation: %s" % op) self._op = op self._value_index = value_index self._dtype = dtypes.as_dtype(dtype) self._tf_output = None self._shape_val = None self._consumers = [] self._id = uid() self._name = None @staticmethod def _create_with_tf_output(op, value_index, dtype, tf_output): ret = Tensor(op, value_index, dtype) ret._tf_output = tf_output return ret @property def op(self): return self._op @property def dtype(self): return self._dtype @property def graph(self): return self._op.graph @property def name(self): if self._name is None: if not self._op.name: raise ValueError("Operation was not named: %s" % self._op) self._name = "%s:%d" % (self._op.name, self._value_index) return self._name @property def device(self): return self._op.device @property def shape(self): if self._shape_val is None: self._shape_val = self._c_api_shape() return self._shape_val def _c_api_shape(self): c_graph = self._op._graph._c_graph shape_vec, unknown_shape = pywrap_tf_session.TF_GraphGetTensorShapeHelper( c_graph, self._as_tf_output()) if unknown_shape: return tensor_shape.unknown_shape() else: shape_vec = [None if d == -1 else d for d in shape_vec] return tensor_shape.TensorShape(shape_vec) @property def _shape(self): logging.warning("Tensor._shape is private, use Tensor.shape " "instead. Tensor._shape will eventually be removed.") return self.shape @_shape.setter def _shape(self, value): raise ValueError( "Tensor._shape cannot be assigned, use Tensor.set_shape instead.") def _disallow_when_autograph_disabled(self, task): raise errors.OperatorNotAllowedInGraphError( "{} is not allowed: AutoGraph is disabled in this function." " Try decorating it directly with @tf.function.".format(task)) def _disallow_when_autograph_enabled(self, task): raise errors.OperatorNotAllowedInGraphError( "{} is not allowed: AutoGraph did not convert this function. Try" " decorating it directly with @tf.function.".format(task)) def _disallow_in_graph_mode(self, task): raise errors.OperatorNotAllowedInGraphError( "{} is not allowed in Graph execution. Use Eager execution or decorate" " this function with @tf.function.".format(task)) def _disallow_bool_casting(self): if ag_ctx.control_status_ctx().status == ag_ctx.Status.DISABLED: self._disallow_when_autograph_disabled( "using a `tf.Tensor` as a Python `bool`") elif ag_ctx.control_status_ctx().status == ag_ctx.Status.ENABLED: self._disallow_when_autograph_enabled( "using a `tf.Tensor` as a Python `bool`") else: self._disallow_in_graph_mode("using a `tf.Tensor` as a Python `bool`") def _disallow_iteration(self): if ag_ctx.control_status_ctx().status == ag_ctx.Status.DISABLED: self._disallow_when_autograph_disabled("iterating over `tf.Tensor`") elif ag_ctx.control_status_ctx().status == ag_ctx.Status.ENABLED: self._disallow_when_autograph_enabled("iterating over `tf.Tensor`") else: self._disallow_in_graph_mode("iterating over `tf.Tensor`") def __iter__(self): if not context.executing_eagerly(): self._disallow_iteration() shape = self._shape_tuple() if shape is None: raise TypeError("Cannot iterate over a tensor with unknown shape.") if not shape: raise TypeError("Cannot iterate over a scalar tensor.") if shape[0] is None: raise TypeError( "Cannot iterate over a tensor with unknown first dimension.") return _TensorIterator(self, shape[0]) def _shape_as_list(self): if self.shape.ndims is not None: return [dim.value for dim in self.shape.dims] else: return None def _shape_tuple(self): shape = self._shape_as_list() if shape is None: return None return tuple(shape) def _rank(self): return self.shape.ndims def get_shape(self): return self.shape def set_shape(self, shape): self._shape_val = None if not isinstance(shape, tensor_shape.TensorShape): shape = tensor_shape.TensorShape(shape) dim_list = [] if shape.dims is None: unknown_shape = True else: unknown_shape = False for dim in shape.dims: if dim.value is None: dim_list.append(-1) else: dim_list.append(dim.value) try: pywrap_tf_session.TF_GraphSetTensorShape_wrapper( self._op._graph._c_graph, self._as_tf_output(), dim_list, unknown_shape) except errors.InvalidArgumentError as e: raise ValueError(str(e)) @property def value_index(self): return self._value_index def consumers(self): consumer_names = pywrap_tf_session.TF_OperationOutputConsumers_wrapper( self._as_tf_output()) return [ self.graph._get_operation_by_name_unsafe(name) for name in consumer_names ] def _as_node_def_input(self): if not self._op.name: raise ValueError("Operation was not named: %s" % self._op) if self._value_index == 0: return self._op.name else: return "%s:%d" % (self._op.name, self._value_index) def _as_tf_output(self): if self._tf_output is None: self._tf_output = c_api_util.tf_output(self.op._c_op, self.value_index) return self._tf_output def __str__(self): return "Tensor(\"%s\"%s%s%s)" % ( self.name, (", shape=%s" % self.get_shape()) if self.get_shape().ndims is not None else "", (", dtype=%s" % self._dtype.name) if self._dtype else "", (", device=%s" % self.device) if self.device else "") def __repr__(self): return "<tf.Tensor '%s' shape=%s dtype=%s>" % (self.name, self.get_shape(), self._dtype.name) def __hash__(self): g = getattr(self, "graph", None) if (Tensor._USE_EQUALITY and executing_eagerly_outside_functions() and (g is None or g.building_function)): raise TypeError("Tensor is unhashable. " "Instead, use tensor.ref() as the key.") else: return id(self) def __copy__(self): cls = self.__class__ result = cls.__new__(cls) result.__dict__.update(self.__dict__) return result # operators to run when the left operand is an ndarray, because it # accords the Tensor class higher priority than an ndarray, or a # numpy matrix. # TODO(mrry): Convert this to using numpy's __numpy_ufunc__ __array_priority__ = 100 def __array__(self): raise NotImplementedError("Cannot convert a symbolic Tensor ({}) to a numpy" " array.".format(self.name)) def __len__(self): raise TypeError("len is not well defined for symbolic Tensors. ({}) " "Please call `x.shape` rather than `len(x)` for " "shape information.".format(self.name)) @staticmethod def _override_operator(operator, func): _override_helper(Tensor, operator, func) def __bool__(self): self._disallow_bool_casting() def __nonzero__(self): self._disallow_bool_casting() def eval(self, feed_dict=None, session=None): return _eval_using_default_session(self, feed_dict, self.graph, session) @deprecation.deprecated(None, "Use ref() instead.") def experimental_ref(self): return self.ref() def ref(self): return object_identity.Reference(self) class _EagerTensorBase(Tensor): def __complex__(self): return complex(self._numpy()) def __int__(self): return int(self._numpy()) def __long__(self): return long(self._numpy()) def __float__(self): return float(self._numpy()) def __index__(self): return self._numpy().__index__() def __bool__(self): return bool(self._numpy()) __nonzero__ = __bool__ def __format__(self, format_spec): return self._numpy().__format__(format_spec) def __reduce__(self): return convert_to_tensor, (self._numpy(),) def __copy__(self): return self def __deepcopy__(self, memo): # Eager Tensors are immutable so it's safe to return themselves as a copy. del memo return self def __str__(self): return "tf.Tensor(%s, shape=%s, dtype=%s)" % (numpy_text(self), self.shape, self.dtype.name) def __repr__(self): return "<tf.Tensor: shape=%s, dtype=%s, numpy=%s>" % ( self.shape, self.dtype.name, numpy_text(self, is_repr=True)) def __len__(self): if not self.shape.ndims: raise TypeError("Scalar tensor has no `len()`") try: return self._shape_tuple()[0] except core._NotOkStatusException as e: six.raise_from(core._status_to_exception(e.code, e.message), None) def _numpy_internal(self): raise NotImplementedError() def _numpy(self): try: return self._numpy_internal() except core._NotOkStatusException as e: six.raise_from(core._status_to_exception(e.code, e.message), None) @property def dtype(self): return dtypes._INTERN_TABLE[self._datatype_enum()] def numpy(self): maybe_arr = self._numpy() return maybe_arr.copy() if isinstance(maybe_arr, np.ndarray) else maybe_arr @property def backing_device(self): raise NotImplementedError() def _datatype_enum(self): raise NotImplementedError() def _shape_tuple(self): raise NotImplementedError() def _rank(self): raise NotImplementedError() def _num_elements(self): raise NotImplementedError() def _copy_to_device(self, device_name): raise NotImplementedError() @staticmethod def _override_operator(name, func): setattr(_EagerTensorBase, name, func) def _copy_nograd(self, ctx=None, device_name=None): if ctx is None: ctx = context.context() if device_name is None: device_name = ctx.device_name try: ctx.ensure_initialized() new_tensor = self._copy_to_device(device_name) except core._NotOkStatusException as e: six.raise_from(core._status_to_exception(e.code, e.message), None) return new_tensor def _copy(self, ctx=None, device_name=None): new_tensor = self._copy_nograd(ctx, device_name) if context.executing_eagerly(): self_device = self.device def grad_fun(dresult): return [ dresult._copy(device_name=self_device) if hasattr(dresult, "_copy") else dresult ] tape.record_operation("_copy", [new_tensor], [self], grad_fun) return new_tensor @property def shape(self): if self._tensor_shape is None: try: self._tensor_shape = tensor_shape.TensorShape(self._shape_tuple()) except core._NotOkStatusException as e: six.raise_from(core._status_to_exception(e.code, e.message), None) return self._tensor_shape def get_shape(self): return self.shape def _shape_as_list(self): return list(self._shape_tuple()) @property def ndim(self): return self.shape.ndims @deprecation.deprecated(None, "Use tf.identity instead.") def cpu(self): return self._copy(context.context(), "CPU:0") @deprecation.deprecated(None, "Use tf.identity instead.") def gpu(self, gpu_index=0): return self._copy(context.context(), "GPU:" + str(gpu_index)) def set_shape(self, shape): if not self.shape.is_compatible_with(shape): raise ValueError( "Tensor's shape %s is not compatible with supplied shape %s" % (self.shape, shape)) # Methods not supported / implemented for Eager Tensors. @property def op(self): raise AttributeError( "Tensor.op is meaningless when eager execution is enabled.") @property def graph(self): raise AttributeError( "Tensor.graph is meaningless when eager execution is enabled.") @property def name(self): raise AttributeError( "Tensor.name is meaningless when eager execution is enabled.") @property def value_index(self): raise AttributeError( "Tensor.value_index is meaningless when eager execution is enabled.") def consumers(self): raise NotImplementedError( "Tensor.consumers is meaningless when eager execution is enabled.") def _add_consumer(self, consumer): raise NotImplementedError( "_add_consumer not supported when eager execution is enabled.") def _as_node_def_input(self): raise NotImplementedError( "_as_node_def_input not supported when eager execution is enabled.") def _as_tf_output(self): raise NotImplementedError( "_as_tf_output not supported when eager execution is enabled.") def eval(self, feed_dict=None, session=None): raise NotImplementedError( "eval is not supported when eager execution is enabled, " "is .numpy() what you're looking for?") EagerTensor = pywrap_tfe.TFE_Py_InitEagerTensor(_EagerTensorBase) register_dense_tensor_like_type(Tensor) @tf_export(v1=["convert_to_tensor"]) def convert_to_tensor_v1(value, dtype=None, name=None, preferred_dtype=None, dtype_hint=None): preferred_dtype = deprecation.deprecated_argument_lookup( "dtype_hint", dtype_hint, "preferred_dtype", preferred_dtype) return convert_to_tensor_v2(value, dtype, preferred_dtype, name) @tf_export("convert_to_tensor", v1=[]) def convert_to_tensor_v2(value, dtype=None, dtype_hint=None, name=None): return convert_to_tensor( value=value, dtype=dtype, name=name, preferred_dtype=dtype_hint, as_ref=False) def _error_prefix(name): return "" if name is None else "%s: " % name def convert_to_tensor(value, dtype=None, name=None, as_ref=False, preferred_dtype=None, dtype_hint=None, ctx=None, accepted_result_types=(Tensor,)): preferred_dtype = preferred_dtype or dtype_hint if isinstance(value, EagerTensor): if ctx is None: ctx = context.context() if not ctx.executing_eagerly(): graph = get_default_graph() if not graph.building_function: raise RuntimeError("Attempting to capture an EagerTensor without " "building a function.") return graph.capture(value, name=name) if dtype is not None: dtype = dtypes.as_dtype(dtype) if isinstance(value, Tensor): if dtype is not None and not dtype.is_compatible_with(value.dtype): raise ValueError( "Tensor conversion requested dtype %s for Tensor with dtype %s: %r" % (dtype.name, value.dtype.name, value)) return value if preferred_dtype is not None: preferred_dtype = dtypes.as_dtype(preferred_dtype) for base_type, conversion_func in tensor_conversion_registry.get(type(value)): ret = None if dtype is None and preferred_dtype is not None: try: ret = conversion_func( value, dtype=preferred_dtype, name=name, as_ref=as_ref) except (TypeError, ValueError): pass else: if (ret is not NotImplemented and ret.dtype.base_dtype != preferred_dtype.base_dtype): raise TypeError("convert_to_tensor did not convert to " "the preferred dtype: %s vs %s " % (ret.dtype.base_dtype, preferred_dtype.base_dtype)) if ret is None: ret = conversion_func(value, dtype=dtype, name=name, as_ref=as_ref) if ret is NotImplemented: continue if not isinstance(ret, accepted_result_types): raise RuntimeError( "%sConversion function %r for type %s returned non-Tensor: %r" % (_error_prefix(name), conversion_func, base_type, ret)) if dtype and not dtype.is_compatible_with(ret.dtype): raise RuntimeError( "%sConversion function %r for type %s returned incompatible " "dtype: requested = %s, actual = %s" % (_error_prefix(name), conversion_func, base_type, dtype.name, ret.dtype.name)) return ret raise TypeError("%sCannot convert %r with type %s to Tensor: " "no conversion function registered." % (_error_prefix(name), value, type(value))) internal_convert_to_tensor = convert_to_tensor def internal_convert_n_to_tensor(values, dtype=None, name=None, as_ref=False, preferred_dtype=None, ctx=None): if not isinstance(values, collections_abc.Sequence): raise TypeError("values must be a sequence.") ret = [] if ctx is None: ctx = context.context() for i, value in enumerate(values): n = None if name is None else "%s_%d" % (name, i) ret.append( convert_to_tensor( value, dtype=dtype, name=n, as_ref=as_ref, preferred_dtype=preferred_dtype, ctx=ctx)) return ret def convert_n_to_tensor(values, dtype=None, name=None, preferred_dtype=None): return internal_convert_n_to_tensor( values=values, dtype=dtype, name=name, preferred_dtype=preferred_dtype, as_ref=False) def convert_to_tensor_or_composite(value, dtype=None, name=None): return internal_convert_to_tensor_or_composite( value=value, dtype=dtype, name=name, as_ref=False) def internal_convert_to_tensor_or_composite(value, dtype=None, name=None, as_ref=False): if isinstance(value, composite_tensor.CompositeTensor): value_dtype = getattr(value, "dtype", None) if dtype and not dtypes.as_dtype(dtype).is_compatible_with(value_dtype): raise ValueError( "Tensor conversion requested dtype %s for Tensor with dtype %s: %r" % (dtypes.as_dtype(dtype).name, value.dtype.name, str(value))) return value else: return convert_to_tensor( value, dtype=dtype, name=name, as_ref=as_ref, accepted_result_types=(Tensor, composite_tensor.CompositeTensor)) def internal_convert_n_to_tensor_or_composite(values, dtype=None, name=None, as_ref=False): if not isinstance(values, collections_abc.Sequence): raise TypeError("values must be a sequence.") ret = [] for i, value in enumerate(values): if value is None: ret.append(value) else: n = None if name is None else "%s_%d" % (name, i) ret.append( internal_convert_to_tensor_or_composite( value, dtype=dtype, name=n, as_ref=as_ref)) return ret def convert_n_to_tensor_or_composite(values, dtype=None, name=None): return internal_convert_n_to_tensor_or_composite( values=values, dtype=dtype, name=name, as_ref=False) def _device_string(dev_spec): if pydev.is_device_spec(dev_spec): return dev_spec.to_string() else: return dev_spec def _NodeDef(op_type, name, attrs=None): node_def = node_def_pb2.NodeDef(op=compat.as_bytes(op_type), name=compat.as_bytes(name)) if attrs: for k, v in six.iteritems(attrs): node_def.attr[k].CopyFrom(v) return node_def _VALID_OP_NAME_REGEX = re.compile("^[A-Za-z0-9.][A-Za-z0-9_.\\-/>]*$") _VALID_SCOPE_NAME_REGEX = re.compile("^[A-Za-z0-9_.\\-/>]*$") def _create_c_op(graph, node_def, inputs, control_inputs, op_def=None): if op_def is None: op_def = graph._get_op_def(node_def.op) inputs = _reconstruct_sequence_inputs(op_def, inputs, node_def.attr) # pylint: disable=protected-access op_desc = pywrap_tf_session.TF_NewOperation(graph._c_graph, compat.as_str(node_def.op), compat.as_str(node_def.name)) if node_def.device: pywrap_tf_session.TF_SetDevice(op_desc, compat.as_str(node_def.device)) # Add inputs for op_input in inputs: if isinstance(op_input, (list, tuple)): pywrap_tf_session.TF_AddInputList(op_desc, [t._as_tf_output() for t in op_input]) else: pywrap_tf_session.TF_AddInput(op_desc, op_input._as_tf_output()) # Add control inputs for control_input in control_inputs: pywrap_tf_session.TF_AddControlInput(op_desc, control_input._c_op) # pylint: enable=protected-access # Add attrs for name, attr_value in node_def.attr.items(): serialized = attr_value.SerializeToString() # TODO(skyewm): this creates and deletes a new TF_Status for every attr. # It might be worth creating a convenient way to re-use the same status. pywrap_tf_session.TF_SetAttrValueProto(op_desc, compat.as_str(name), serialized) try: c_op = pywrap_tf_session.TF_FinishOperation(op_desc) except errors.InvalidArgumentError as e: # Convert to ValueError for backwards compatibility. raise ValueError(str(e)) return c_op @tf_export("Operation") class Operation(object): def __init__(self, node_def, g, inputs=None, output_types=None, control_inputs=None, input_types=None, original_op=None, op_def=None): # For internal use only: `node_def` can be set to a TF_Operation to create # an Operation for that op. This is useful for creating Operations for ops # indirectly created by C API methods, e.g. the ops created by # TF_ImportGraphDef. When `node_def` is a TF_Operation, all optional fields # should be None. if isinstance(node_def, node_def_pb2.NodeDef): if node_def.ByteSize() >= (1 << 31) or node_def.ByteSize() < 0: raise ValueError( "Cannot create a tensor proto whose content is larger than 2GB.") if not _VALID_OP_NAME_REGEX.match(node_def.name): raise ValueError("'%s' is not a valid node name" % node_def.name) c_op = None elif type(node_def).__name__ == "TF_Operation": assert inputs is None assert output_types is None assert control_inputs is None assert input_types is None assert original_op is None assert op_def is None c_op = node_def else: raise TypeError("node_def needs to be a NodeDef: %s" % node_def) if not isinstance(g, Graph): raise TypeError("g needs to be a Graph: %s" % g) self._graph = g if inputs is None: inputs = [] elif not isinstance(inputs, list): raise TypeError("inputs needs to be a list of Tensors: %s" % inputs) for a in inputs: if not isinstance(a, Tensor): raise TypeError("input needs to be a Tensor: %s" % a) if input_types is None: input_types = [i.dtype.base_dtype for i in inputs] else: if not all( x.is_compatible_with(i.dtype) for i, x in zip(inputs, input_types)): raise TypeError("In op '%s', input types (%s) are not compatible " "with expected types (%s)" % (node_def.name, [i.dtype for i in inputs], input_types)) # Build the list of control inputs. control_input_ops = [] if control_inputs: for c in control_inputs: control_op = None if isinstance(c, Operation): control_op = c elif isinstance(c, (Tensor, IndexedSlices)): control_op = c.op else: raise TypeError("Control input must be an Operation, " "a Tensor, or IndexedSlices: %s" % c) control_input_ops.append(control_op) # This will be set by self.inputs. self._inputs_val = None # pylint: disable=protected-access self._original_op = original_op self._traceback = tf_stack.extract_stack() # List of _UserDevSpecs holding code location of device context manager # invocations and the users original argument to them. self._device_code_locations = None # Dict mapping op name to file and line information for op colocation # context managers. self._colocation_code_locations = None self._control_flow_context = self.graph._get_control_flow_context() # Gradient function for this op. There are three ways to specify gradient # function, and first available gradient gets used, in the following order. # 1. self._gradient_function # 2. Gradient name registered by "_gradient_op_type" attribute. # 3. Gradient name registered by op.type. self._gradient_function = None # Initialize self._c_op. if c_op: self._c_op = c_op op_def = g._get_op_def(pywrap_tf_session.TF_OperationOpType(c_op)) name = self.name else: if op_def is None: op_def = self._graph._get_op_def(node_def.op) self._c_op = _create_c_op(self._graph, node_def, inputs, control_input_ops, op_def) name = compat.as_str(node_def.name) # pylint: enable=protected-access self._is_stateful = op_def.is_stateful # Initialize self._outputs. num_outputs = pywrap_tf_session.TF_OperationNumOutputs(self._c_op) self._outputs = [] for i in range(num_outputs): tf_output = c_api_util.tf_output(self._c_op, i) output_type = pywrap_tf_session.TF_OperationOutputType(tf_output) tensor = Tensor._create_with_tf_output(self, i, output_type, tf_output) # pylint: disable=protected-access self._outputs.append(tensor) self._id_value = self._graph._add_op(self, name) # pylint: disable=protected-access if not c_op: self._control_flow_post_processing(input_tensors=inputs) def _control_flow_post_processing(self, input_tensors=None): if input_tensors is None: input_tensors = self.inputs for input_tensor in input_tensors: control_flow_util.CheckInputFromValidContext(self, input_tensor.op) if self._control_flow_context is not None: self._control_flow_context.AddOp(self) def colocation_groups(self): default_colocation_group = [compat.as_bytes("loc:@%s" % self.name)] try: class_attr = self.get_attr("_class") except ValueError: # This op has no explicit colocation group, so it is itself its # own root of a colocation group. return default_colocation_group attr_groups = [ class_name for class_name in class_attr if class_name.startswith(b"loc:@") ] # If there are no colocation groups in the explicit _class field, # return the default colocation group. return attr_groups if attr_groups else default_colocation_group def values(self): return tuple(self.outputs) def _get_control_flow_context(self): return self._control_flow_context def _set_control_flow_context(self, ctx): self._control_flow_context = ctx @property def name(self): return pywrap_tf_session.TF_OperationName(self._c_op) @property def _id(self): return self._id_value @property def device(self): return pywrap_tf_session.TF_OperationDevice(self._c_op) @property def _device_assignments(self): return self._device_code_locations or [] @property def _colocation_dict(self): locations_dict = self._colocation_code_locations or {} return locations_dict.copy() @property def _output_types(self): num_outputs = pywrap_tf_session.TF_OperationNumOutputs(self._c_op) output_types = [ int(pywrap_tf_session.TF_OperationOutputType(self._tf_output(i))) for i in xrange(num_outputs) ] return output_types def _tf_output(self, output_idx): tf_output = pywrap_tf_session.TF_Output() tf_output.oper = self._c_op tf_output.index = output_idx return tf_output def _tf_input(self, input_idx): tf_input = pywrap_tf_session.TF_Input() tf_input.oper = self._c_op tf_input.index = input_idx return tf_input def _set_device(self, device): # pylint: disable=redefined-outer-name self._set_device_from_string(compat.as_str(_device_string(device))) def _set_device_from_string(self, device_str): pywrap_tf_session.SetRequestedDevice( self._graph._c_graph, # pylint: disable=protected-access self._c_op, # pylint: disable=protected-access device_str) def _update_input(self, index, tensor): if not isinstance(tensor, Tensor): raise TypeError("tensor must be a Tensor: %s" % tensor) _assert_same_graph(self, tensor) # Reset cached inputs. self._inputs_val = None pywrap_tf_session.UpdateEdge( self._graph._c_graph, # pylint: disable=protected-access tensor._as_tf_output(), # pylint: disable=protected-access self._tf_input(index)) def _add_while_inputs(self, tensors): for tensor in tensors: if not isinstance(tensor, Tensor): raise TypeError("tensor must be a Tensor: %s" % tensor) _assert_same_graph(self, tensor) # Reset cached inputs. self._inputs_val = None pywrap_tf_session.AddWhileInputHack( self._graph._c_graph, # pylint: disable=protected-access tensor._as_tf_output(), # pylint: disable=protected-access self._c_op) def _add_control_inputs(self, ops): for op in ops: if not isinstance(op, Operation): raise TypeError("op must be an Operation: %s" % op) pywrap_tf_session.AddControlInput( self._graph._c_graph, # pylint: disable=protected-access self._c_op, # pylint: disable=protected-access op._c_op) # pylint: disable=protected-access def _add_control_input(self, op): if not isinstance(op, Operation): raise TypeError("op must be an Operation: %s" % op) pywrap_tf_session.AddControlInput( self._graph._c_graph, # pylint: disable=protected-access self._c_op, # pylint: disable=protected-access op._c_op) # pylint: disable=protected-access def _remove_all_control_inputs(self): pywrap_tf_session.RemoveAllControlInputs(self._graph._c_graph, self._c_op) # pylint: disable=protected-access def _add_outputs(self, types, shapes): assert len(types) == len(shapes) orig_num_outputs = len(self.outputs) for i in range(len(types)): t = Tensor(self, orig_num_outputs + i, types[i]) self._outputs.append(t) t.set_shape(shapes[i]) def __str__(self): return str(self.node_def) def __repr__(self): return "<tf.Operation '%s' type=%s>" % (self.name, self.type) @property def outputs(self): return self._outputs @property def inputs(self): if self._inputs_val is None: # pylint: disable=protected-access self._inputs_val = tuple( map(self.graph._get_tensor_by_tf_output, pywrap_tf_session.GetOperationInputs(self._c_op))) # pylint: enable=protected-access return self._inputs_val @property def _input_types(self): num_inputs = pywrap_tf_session.TF_OperationNumInputs(self._c_op) input_types = [ dtypes.as_dtype( pywrap_tf_session.TF_OperationInputType(self._tf_input(i))) for i in xrange(num_inputs) ] return input_types @property def control_inputs(self): control_c_ops = pywrap_tf_session.TF_OperationGetControlInputs_wrapper( self._c_op) # pylint: disable=protected-access return [ self.graph._get_operation_by_name_unsafe( pywrap_tf_session.TF_OperationName(c_op)) for c_op in control_c_ops ] # pylint: enable=protected-access @property def _control_outputs(self): control_c_ops = pywrap_tf_session.TF_OperationGetControlOutputs_wrapper( self._c_op) # pylint: disable=protected-access return [ self.graph._get_operation_by_name_unsafe( pywrap_tf_session.TF_OperationName(c_op)) for c_op in control_c_ops ] # pylint: enable=protected-access @property def type(self): return pywrap_tf_session.TF_OperationOpType(self._c_op) @property def graph(self): return self._graph @property def node_def(self): # pylint: disable=line-too-long # pylint: enable=line-too-long with c_api_util.tf_buffer() as buf: pywrap_tf_session.TF_OperationToNodeDef(self._c_op, buf) data = pywrap_tf_session.TF_GetBuffer(buf) node_def = node_def_pb2.NodeDef() node_def.ParseFromString(compat.as_bytes(data)) return node_def @property def op_def(self): # pylint: disable=line-too-long # pylint: enable=line-too-long return self._graph._get_op_def(self.type) @property def traceback(self): return self._traceback def _set_attr(self, attr_name, attr_value): buf = pywrap_tf_session.TF_NewBufferFromString( compat.as_bytes(attr_value.SerializeToString())) try: self._set_attr_with_buf(attr_name, buf) finally: pywrap_tf_session.TF_DeleteBuffer(buf) def _set_attr_with_buf(self, attr_name, attr_buf): # pylint: disable=protected-access pywrap_tf_session.SetAttr(self._graph._c_graph, self._c_op, attr_name, attr_buf) # pylint: enable=protected-access def _set_func_attr(self, attr_name, func_name): func = attr_value_pb2.NameAttrList(name=func_name) self._set_attr(attr_name, attr_value_pb2.AttrValue(func=func)) def _set_func_list_attr(self, attr_name, func_names): funcs = [attr_value_pb2.NameAttrList(name=func_name) for func_name in func_names] funcs_list = attr_value_pb2.AttrValue.ListValue(func=funcs) self._set_attr(attr_name, attr_value_pb2.AttrValue(list=funcs_list)) def _set_type_list_attr(self, attr_name, types): if not types: return if isinstance(types[0], dtypes.DType): types = [dt.as_datatype_enum for dt in types] types_list = attr_value_pb2.AttrValue.ListValue(type=types) self._set_attr(attr_name, attr_value_pb2.AttrValue(list=types_list)) def _set_shape_list_attr(self, attr_name, shapes): shapes = [s.as_proto() for s in shapes] shapes_list = attr_value_pb2.AttrValue.ListValue(shape=shapes) self._set_attr(attr_name, attr_value_pb2.AttrValue(list=shapes_list)) def _clear_attr(self, attr_name): # pylint: disable=protected-access pywrap_tf_session.ClearAttr(self._graph._c_graph, self._c_op, attr_name) # pylint: enable=protected-access def get_attr(self, name): fields = ("s", "i", "f", "b", "type", "shape", "tensor", "func") try: with c_api_util.tf_buffer() as buf: pywrap_tf_session.TF_OperationGetAttrValueProto(self._c_op, name, buf) data = pywrap_tf_session.TF_GetBuffer(buf) except errors.InvalidArgumentError as e: # Convert to ValueError for backwards compatibility. raise ValueError(str(e)) x = attr_value_pb2.AttrValue() x.ParseFromString(data) oneof_value = x.WhichOneof("value") if oneof_value is None: return [] if oneof_value == "list": for f in fields: if getattr(x.list, f): if f == "type": return [dtypes.as_dtype(t) for t in x.list.type] else: return list(getattr(x.list, f)) return [] if oneof_value == "type": return dtypes.as_dtype(x.type) assert oneof_value in fields, "Unsupported field type in " + str(x) return getattr(x, oneof_value) def _get_attr_type(self, name): try: dtype_enum = pywrap_tf_session.TF_OperationGetAttrType(self._c_op, name) return _DTYPES_INTERN_TABLE[dtype_enum] except errors.InvalidArgumentError as e: # Convert to ValueError for backwards compatibility. raise ValueError(str(e)) def _get_attr_bool(self, name): try: return pywrap_tf_session.TF_OperationGetAttrBool(self._c_op, name) except errors.InvalidArgumentError as e: # Convert to ValueError for backwards compatibility. raise ValueError(str(e)) def _get_attr_int(self, name): try: return pywrap_tf_session.TF_OperationGetAttrInt(self._c_op, name) except errors.InvalidArgumentError as e: # Convert to ValueError for backwards compatibility. raise ValueError(str(e)) def run(self, feed_dict=None, session=None): _run_using_default_session(self, feed_dict, self.graph, session) _gradient_registry = registry.Registry("gradient") @tf_export("RegisterGradient") class RegisterGradient(object): def __init__(self, op_type): if not isinstance(op_type, six.string_types): raise TypeError("op_type must be a string") self._op_type = op_type def __call__(self, f): _gradient_registry.register(f, self._op_type) return f @deprecation.deprecated_endpoints("NotDifferentiable", "NoGradient") @tf_export("no_gradient", v1=["no_gradient", "NotDifferentiable", "NoGradient"]) def no_gradient(op_type): if not isinstance(op_type, six.string_types): raise TypeError("op_type must be a string") _gradient_registry.register(None, op_type) # Aliases for the old names, will be eventually removed. NoGradient = no_gradient NotDifferentiable = no_gradient def get_gradient_function(op): if not op.inputs: return None gradient_function = op._gradient_function # pylint: disable=protected-access if gradient_function: return gradient_function try: op_type = op.get_attr("_gradient_op_type") except ValueError: op_type = op.type return _gradient_registry.lookup(op_type) def set_shape_and_handle_data_for_outputs(_): pass class OpStats(object): def __init__(self, statistic_type, value=None): self.statistic_type = statistic_type self.value = value @property def statistic_type(self): return self._statistic_type @statistic_type.setter def statistic_type(self, statistic_type): self._statistic_type = statistic_type @property def value(self): return self._value @value.setter def value(self, value): self._value = value def __iadd__(self, other): if other.statistic_type != self.statistic_type: raise ValueError("Can't add an OpStat of type %s to one of %s." % (self.statistic_type, other.statistic_type)) if self.value is None: self.value = other.value elif other.value is not None: self._value += other.value return self _stats_registry = registry.Registry("statistical functions") class RegisterStatistics(object): def __init__(self, op_type, statistic_type): if not isinstance(op_type, six.string_types): raise TypeError("op_type must be a string.") if "," in op_type: raise TypeError("op_type must not contain a comma.") self._op_type = op_type if not isinstance(statistic_type, six.string_types): raise TypeError("statistic_type must be a string.") if "," in statistic_type: raise TypeError("statistic_type must not contain a comma.") self._statistic_type = statistic_type def __call__(self, f): _stats_registry.register(f, self._op_type + "," + self._statistic_type) return f def get_stats_for_node_def(graph, node, statistic_type): try: stats_func = _stats_registry.lookup(node.op + "," + statistic_type) result = stats_func(graph, node) except LookupError: result = OpStats(statistic_type) return result def name_from_scope_name(name): return name[:-1] if (name and name[-1] == "/") else name _MUTATION_LOCK_GROUP = 0 _SESSION_RUN_LOCK_GROUP = 1 @tf_export("Graph") class Graph(object): def __init__(self): self._lock = threading.RLock() self._group_lock = lock_util.GroupLock(num_groups=2) self._nodes_by_id = {} self._next_id_counter = 0 self._nodes_by_name = {} self._version = 0 self._names_in_use = {} self._stack_state_is_thread_local = False self._thread_local = threading.local() self._graph_device_function_stack = traceable_stack.TraceableStack() self._default_original_op = None self._control_flow_context = None self._graph_control_dependencies_stack = [] self._collections = {} self._seed = None self._attr_scope_map = {} self._op_to_kernel_label_map = {} self._gradient_override_map = {} self._gradient_function_map = {} self._finalized = False self._functions = collections.OrderedDict() self._graph_def_versions = versions_pb2.VersionDef( producer=versions.GRAPH_DEF_VERSION, min_consumer=versions.GRAPH_DEF_VERSION_MIN_CONSUMER) self._building_function = False self._graph_colocation_stack = traceable_stack.TraceableStack() self._unfeedable_tensors = object_identity.ObjectIdentitySet() self._unfetchable_ops = set() self._handle_feeders = {} self._handle_readers = {} self._handle_movers = {} self._handle_deleters = {} self._graph_key = "grap-key-%d/" % (uid(),) self._last_loss_reduction = None self._is_loss_scaled_by_optimizer = False self._container = "" self._add_control_dependencies = False self._op_def_cache = {} self._bcast_grad_args_cache = {} self._reduced_shape_cache = {} self._scoped_c_graph = c_api_util.ScopedTFGraph() # want to break these existing cases). pywrap_tf_session.SetRequireShapeInferenceFns(self._c_graph, False) if tf2.enabled(): self.switch_to_thread_local() # Note: this method is private because the API of tf.Graph() is public and # frozen, and this functionality is still not ready for public visibility. @tf_contextlib.contextmanager def _variable_creator_scope(self, creator, priority=100): # This step keeps a reference to the existing stack, and it also initializes # self._thread_local._variable_creator_stack if it doesn't exist yet. old = self._variable_creator_stack new = list(old) new.append((priority, creator)) # but otherwise maintain registration order. new.sort(key=lambda item: item[0]) self._thread_local._variable_creator_stack = new # pylint: disable=protected-access try: yield finally: if self._thread_local._variable_creator_stack is not new: # pylint: disable=protected-access raise RuntimeError( "Exiting variable_creator_scope without proper nesting.") self._thread_local._variable_creator_stack = old # pylint: disable=protected-access # Note: this method is private because the API of tf.Graph() is public and # frozen, and this functionality is still not ready for public visibility. @property def _variable_creator_stack(self): if not hasattr(self._thread_local, "_variable_creator_stack"): self._thread_local._variable_creator_stack = [] # pylint: disable=protected-access # This previously returned a copy of the stack instead of the stack itself, # to guard against accidental mutation. Consider, however, code that wants # to save and restore the variable creator stack: # def f(): # original_stack = graph._variable_creator_stack # graph._variable_creator_stack = new_stack # ... # Some code # graph._variable_creator_stack = original_stack # # And lets say you have some code that calls this function with some # variable_creator: # def g(): # with variable_scope.variable_creator_scope(creator): # f() # When exiting the variable creator scope, it would see a different stack # object than it expected leading to a "Exiting variable_creator_scope # without proper nesting" error. return self._thread_local._variable_creator_stack # pylint: disable=protected-access @_variable_creator_stack.setter def _variable_creator_stack(self, variable_creator_stack): self._thread_local._variable_creator_stack = variable_creator_stack # pylint: disable=protected-access def _check_not_finalized(self): if self._finalized: raise RuntimeError("Graph is finalized and cannot be modified.") def _add_op(self, op, op_name): self._check_not_finalized() with self._lock: self._next_id_counter += 1 op_id = self._next_id_counter self._nodes_by_id[op_id] = op self._nodes_by_name[op_name] = op self._version = max(self._version, op_id) return op_id @property def _c_graph(self): if self._scoped_c_graph: return self._scoped_c_graph.graph return None @property def version(self): if self._finalized: return self._version with self._lock: return self._version @property def graph_def_versions(self): # pylint: disable=line-too-long # pylint: enable=line-too-long with c_api_util.tf_buffer() as buf: pywrap_tf_session.TF_GraphVersions(self._c_graph, buf) data = pywrap_tf_session.TF_GetBuffer(buf) version_def = versions_pb2.VersionDef() version_def.ParseFromString(compat.as_bytes(data)) return version_def @property def seed(self): return self._seed @seed.setter def seed(self, seed): self._seed = seed @property def finalized(self): return self._finalized def finalize(self): self._finalized = True def _unsafe_unfinalize(self): self._finalized = False def _get_control_flow_context(self): return self._control_flow_context def _set_control_flow_context(self, ctx): self._control_flow_context = ctx def _copy_functions_to_graph_def(self, graph_def, starting_bytesize): bytesize = starting_bytesize for f in self._functions.values(): bytesize += f.definition.ByteSize() if bytesize >= (1 << 31) or bytesize < 0: raise ValueError("GraphDef cannot be larger than 2GB.") graph_def.library.function.extend([f.definition]) if f.grad_func_name: grad_def = function_pb2.GradientDef() grad_def.function_name = f.name grad_def.gradient_func = f.grad_func_name graph_def.library.gradient.extend([grad_def]) def _as_graph_def(self, from_version=None, add_shapes=False): # pylint: disable=line-too-long # pylint: enable=line-too-long with self._lock: with c_api_util.tf_buffer() as buf: pywrap_tf_session.TF_GraphToGraphDef(self._c_graph, buf) data = pywrap_tf_session.TF_GetBuffer(buf) graph = graph_pb2.GraphDef() graph.ParseFromString(compat.as_bytes(data)) # Strip the experimental library field iff it's empty. if not graph.library.function: graph.ClearField("library") if add_shapes: for node in graph.node: op = self._nodes_by_name[node.name] if op.outputs: node.attr["_output_shapes"].list.shape.extend( [output.get_shape().as_proto() for output in op.outputs]) for function_def in graph.library.function: defined_function = self._functions[function_def.signature.name] try: func_graph = defined_function.graph except AttributeError: # does. Both rely on ops.py, so we can't really isinstance check continue input_shapes = function_def.attr["_input_shapes"] try: func_graph_inputs = func_graph.inputs except AttributeError: continue for input_tensor, _ in zip(func_graph_inputs, function_def.signature.input_arg): if input_tensor.dtype == dtypes.resource: # confused if we set the shape of the resource placeholder (to a # scalar of course) and there isn't any handle data. input_shapes.list.shape.add().CopyFrom( tensor_shape.TensorShape(None).as_proto()) else: input_shapes.list.shape.add().CopyFrom( input_tensor.get_shape().as_proto()) for node in function_def.node_def: try: op = func_graph.get_operation_by_name(node.name) except KeyError: continue outputs = op.outputs if op.type == "StatefulPartitionedCall": num_outputs = len(node.attr["Tout"].list.type) outputs = outputs[:num_outputs] node.attr["_output_shapes"].list.shape.extend( [output.get_shape().as_proto() for output in outputs]) return graph, self._version def as_graph_def(self, from_version=None, add_shapes=False): result, _ = self._as_graph_def(from_version, add_shapes) return result def _is_function(self, name): return compat.as_str(name) in self._functions def _get_function(self, name): return self._functions.get(compat.as_str(name), None) def _add_function(self, function): name = function.name if (function.grad_func_name is not None) and (function.python_grad_func is not None): raise ValueError("Gradient defined twice for function %s" % name) gradient = ( function._grad_func._c_func.func if function._grad_func else None) pywrap_tf_session.TF_GraphCopyFunction(self._c_graph, function._c_func.func, gradient) self._functions[compat.as_str(name)] = function if self._graph_def_versions.min_consumer < 12: self._graph_def_versions.min_consumer = 12 @property def building_function(self): return self._building_function @deprecated_args(None, "Shapes are always computed; don't use the compute_shapes " "as it has no effect.", "compute_shapes") def create_op( self, op_type, inputs, dtypes=None, # pylint: disable=redefined-outer-name input_types=None, name=None, attrs=None, op_def=None, compute_shapes=True, compute_device=True): del compute_shapes for idx, a in enumerate(inputs): if not isinstance(a, Tensor): raise TypeError("Input #%d is not a tensor: %s" % (idx, a)) return self._create_op_internal(op_type, inputs, dtypes, input_types, name, attrs, op_def, compute_device) def _create_op_internal( self, op_type, inputs, dtypes=None, # pylint: disable=redefined-outer-name input_types=None, name=None, attrs=None, op_def=None, compute_device=True): self._check_not_finalized() if name is None: name = op_type # If a names ends with a '/' it is a "name scope" and we use it as-is, # after removing the trailing '/'. if name and name[-1] == "/": name = name_from_scope_name(name) else: name = self.unique_name(name) node_def = _NodeDef(op_type, name, attrs) input_ops = set(t.op for t in inputs) control_inputs = self._control_dependencies_for_inputs(input_ops) # _create_op_helper mutates the new Operation. `_mutation_lock` ensures a # Session.run call cannot occur between creating and mutating the op. with self._mutation_lock(): ret = Operation( node_def, self, inputs=inputs, output_types=dtypes, control_inputs=control_inputs, input_types=input_types, original_op=self._default_original_op, op_def=op_def) self._create_op_helper(ret, compute_device=compute_device) return ret def _create_op_from_tf_operation(self, c_op, compute_device=True): self._check_not_finalized() ret = Operation(c_op, self) # If a name_scope was created with ret.name but no nodes were created in it, # the name will still appear in _names_in_use even though the name hasn't name_key = ret.name.lower() if name_key not in self._names_in_use: self._names_in_use[name_key] = 1 self._create_op_helper(ret, compute_device=compute_device) return ret def _create_op_helper(self, op, compute_device=True): for key, value in self._attr_scope_map.items(): try: op.get_attr(key) except ValueError: if callable(value): value = value(op.node_def) if not isinstance(value, (type(None), attr_value_pb2.AttrValue)): raise TypeError( "Callable for scope map key '%s' must return either None or " "an AttrValue protocol buffer; but it returned: %s" % (key, value)) if value: op._set_attr(key, value) try: kernel_label = self._op_to_kernel_label_map[op.type] op._set_attr("_kernel", attr_value_pb2.AttrValue(s=compat.as_bytes(kernel_label))) except KeyError: pass op._gradient_function = self._gradient_function_map.get(op.type) try: mapped_op_type = self._gradient_override_map[op.type] op._set_attr("_gradient_op_type", attr_value_pb2.AttrValue(s=compat.as_bytes(mapped_op_type))) except KeyError: pass self._record_op_seen_by_control_dependencies(op) if compute_device: self._apply_device_functions(op) # pylint: disable=protected-access op._colocation_code_locations = self._snapshot_colocation_stack_metadata() # pylint: enable=protected-access if self._colocation_stack: all_colocation_groups = [] for colocation_op in self._colocation_stack.peek_objs(): all_colocation_groups.extend(colocation_op.colocation_groups()) if colocation_op.device: # pylint: disable=protected-access op._set_device(colocation_op.device) # pylint: enable=protected-access all_colocation_groups = sorted(set(all_colocation_groups)) # pylint: disable=protected-access op._set_attr( "_class", attr_value_pb2.AttrValue( list=attr_value_pb2.AttrValue.ListValue(s=all_colocation_groups))) # pylint: enable=protected-access # Sets "container" attribute if # (1) self._container is not None # (2) "is_stateful" is set in OpDef # (3) "container" attribute is in OpDef # (4) "container" attribute is None if self._container and op._is_stateful: # pylint: disable=protected-access try: container_attr = op.get_attr("container") except ValueError: # "container" attribute is not in OpDef pass else: if not container_attr: op._set_attr("container", attr_value_pb2.AttrValue( # pylint: disable=protected-access s=compat.as_bytes(self._container))) def _add_new_tf_operations(self, compute_devices=True): # Create all Operation objects before accessing their inputs since an op may # be created before its inputs. new_ops = [ self._create_op_from_tf_operation(c_op, compute_device=compute_devices) for c_op in c_api_util.new_tf_operations(self) ] # pylint: disable=protected-access for op in new_ops: new_control_inputs = self._control_dependencies_for_inputs(op.inputs) op._add_control_inputs(new_control_inputs) op._control_flow_post_processing() # pylint: enable=protected-access return new_ops def as_graph_element(self, obj, allow_tensor=True, allow_operation=True): if self._finalized: return self._as_graph_element_locked(obj, allow_tensor, allow_operation) with self._lock: return self._as_graph_element_locked(obj, allow_tensor, allow_operation) def _as_graph_element_locked(self, obj, allow_tensor, allow_operation): # The vast majority of this function is figuring # out what an API user might be doing wrong, so # that we can give helpful error messages. # # Ideally, it would be nice to split it up, but we # need context to generate nice error messages. if allow_tensor and allow_operation: types_str = "Tensor or Operation" elif allow_tensor: types_str = "Tensor" elif allow_operation: types_str = "Operation" else: raise ValueError("allow_tensor and allow_operation can't both be False.") temp_obj = _as_graph_element(obj) if temp_obj is not None: obj = temp_obj if isinstance(obj, compat.bytes_or_text_types): name = compat.as_str(obj) if ":" in name and allow_tensor: try: op_name, out_n = name.split(":") out_n = int(out_n) except: raise ValueError("The name %s looks a like a Tensor name, but is " "not a valid one. Tensor names must be of the " "form \"<op_name>:<output_index>\"." % repr(name)) if op_name in self._nodes_by_name: op = self._nodes_by_name[op_name] else: raise KeyError("The name %s refers to a Tensor which does not " "exist. The operation, %s, does not exist in the " "graph." % (repr(name), repr(op_name))) try: return op.outputs[out_n] except: raise KeyError("The name %s refers to a Tensor which does not " "exist. The operation, %s, exists but only has " "%s outputs." % (repr(name), repr(op_name), len(op.outputs))) elif ":" in name and not allow_tensor: raise ValueError("Name %s appears to refer to a Tensor, not a %s." % (repr(name), types_str)) elif ":" not in name and allow_operation: # Looks like an Operation name and can be an Operation. if name not in self._nodes_by_name: raise KeyError("The name %s refers to an Operation not in the " "graph." % repr(name)) return self._nodes_by_name[name] elif ":" not in name and not allow_operation: # Looks like an Operation name but can't be an Operation. if name in self._nodes_by_name: err_msg = ("The name %s refers to an Operation, not a %s." % (repr(name), types_str)) else: err_msg = ("The name %s looks like an (invalid) Operation name, " "not a %s." % (repr(name), types_str)) err_msg += (" Tensor names must be of the form " "\"<op_name>:<output_index>\".") raise ValueError(err_msg) elif isinstance(obj, Tensor) and allow_tensor: # Actually obj is just the object it's referring to. if obj.graph is not self: raise ValueError("Tensor %s is not an element of this graph." % obj) return obj elif isinstance(obj, Operation) and allow_operation: if obj.graph is not self: raise ValueError("Operation %s is not an element of this graph." % obj) return obj else: # We give up! raise TypeError("Can not convert a %s into a %s." % (type(obj).__name__, types_str)) def get_operations(self): if self._finalized: return list(self._nodes_by_id.values()) with self._lock: return list(self._nodes_by_id.values()) def get_operation_by_name(self, name): if not isinstance(name, six.string_types): raise TypeError("Operation names are strings (or similar), not %s." % type(name).__name__) return self.as_graph_element(name, allow_tensor=False, allow_operation=True) def _get_operation_by_name_unsafe(self, name): if self._finalized: return self._nodes_by_name[name] with self._lock: return self._nodes_by_name[name] def _get_operation_by_tf_operation(self, tf_oper): op_name = pywrap_tf_session.TF_OperationName(tf_oper) return self._get_operation_by_name_unsafe(op_name) def get_tensor_by_name(self, name): # Names should be strings. if not isinstance(name, six.string_types): raise TypeError("Tensor names are strings (or similar), not %s." % type(name).__name__) return self.as_graph_element(name, allow_tensor=True, allow_operation=False) def _get_tensor_by_tf_output(self, tf_output): op = self._get_operation_by_tf_operation(tf_output.oper) return op.outputs[tf_output.index] @property def _last_id(self): return self._next_id_counter def _get_op_def(self, type): # pylint: disable=redefined-builtin # NOTE: No locking is required because the lookup and insertion operations # on Python dictionaries are atomic. try: return self._op_def_cache[type] except KeyError: with c_api_util.tf_buffer() as buf: # pylint: disable=protected-access pywrap_tf_session.TF_GraphGetOpDef(self._c_graph, compat.as_bytes(type), buf) # pylint: enable=protected-access data = pywrap_tf_session.TF_GetBuffer(buf) op_def = op_def_pb2.OpDef() op_def.ParseFromString(compat.as_bytes(data)) self._op_def_cache[type] = op_def return op_def def as_default(self): return _default_graph_stack.get_controller(self) @property def collections(self): return list(self._collections) def add_to_collection(self, name, value): self._check_not_finalized() with self._lock: if name not in self._collections: self._collections[name] = [value] else: self._collections[name].append(value) def add_to_collections(self, names, value): # Make sure names are unique, but treat strings as a single collection name names = (names,) if isinstance(names, six.string_types) else set(names) for name in names: self.add_to_collection(name, value) def get_collection_ref(self, name): with self._lock: coll_list = self._collections.get(name, None) if coll_list is None: coll_list = [] self._collections[name] = coll_list return coll_list def get_collection(self, name, scope=None): with self._lock: collection = self._collections.get(name, None) if collection is None: return [] if scope is None: return list(collection) else: c = [] regex = re.compile(scope) for item in collection: try: if regex.match(item.name): c.append(item) except AttributeError: # Collection items with no name are ignored. pass return c def get_all_collection_keys(self): with self._lock: return [x for x in self._collections if isinstance(x, six.string_types)] def clear_collection(self, name): self._check_not_finalized() with self._lock: if name in self._collections: del self._collections[name] @tf_contextlib.contextmanager def _original_op(self, op): old_original_op = self._default_original_op self._default_original_op = op try: yield finally: self._default_original_op = old_original_op @property def _name_stack(self): # This may be called from a thread where name_stack doesn't yet exist. if not hasattr(self._thread_local, "_name_stack"): self._thread_local._name_stack = "" return self._thread_local._name_stack @_name_stack.setter def _name_stack(self, name_stack): self._thread_local._name_stack = name_stack @tf_contextlib.contextmanager def name_scope(self, name): if name: if isinstance(name, compat.bytes_or_text_types): name = compat.as_str(name) if self._name_stack: if not _VALID_SCOPE_NAME_REGEX.match(name): raise ValueError("'%s' is not a valid scope name" % name) else: if not _VALID_OP_NAME_REGEX.match(name): raise ValueError("'%s' is not a valid scope name" % name) old_stack = self._name_stack if not name: new_stack = None elif name[-1] == "/": new_stack = name_from_scope_name(name) else: new_stack = self.unique_name(name) self._name_stack = new_stack try: yield "" if new_stack is None else new_stack + "/" finally: self._name_stack = old_stack def unique_name(self, name, mark_as_used=True): if self._name_stack: name = self._name_stack + "/" + name name_key = name.lower() i = self._names_in_use.get(name_key, 0) if mark_as_used: self._names_in_use[name_key] = i + 1 if i > 0: base_name_key = name_key while name_key in self._names_in_use: name_key = "%s_%d" % (base_name_key, i) i += 1 if mark_as_used: self._names_in_use[name_key] = 1 name = "%s_%d" % (name, i - 1) return name def get_name_scope(self): return self._name_stack @tf_contextlib.contextmanager def _colocate_with_for_gradient(self, op, gradient_uid, ignore_existing=False): with self.colocate_with(op, ignore_existing): if gradient_uid is not None and self._control_flow_context is not None: self._control_flow_context.EnterGradientColocation(op, gradient_uid) try: yield finally: self._control_flow_context.ExitGradientColocation(op, gradient_uid) else: yield @tf_contextlib.contextmanager def colocate_with(self, op, ignore_existing=False): if op is None and not ignore_existing: raise ValueError("Trying to reset colocation (op is None) but " "ignore_existing is not True") op = _op_to_colocate_with(op, self) device_fn_tmp = self._device_function_stack self._device_function_stack = traceable_stack.TraceableStack() if ignore_existing: current_stack = self._colocation_stack self._colocation_stack = traceable_stack.TraceableStack() if op is not None: # to jump over layers of context managers above us. self._colocation_stack.push_obj(op, offset=4) try: yield finally: # Restore device function stack self._device_function_stack = device_fn_tmp if op is not None: self._colocation_stack.pop_obj() # Reset the colocation stack if requested. if ignore_existing: self._colocation_stack = current_stack def _add_device_to_stack(self, device_name_or_function, offset=0): total_offset = 1 + offset spec = _UserDeviceSpec(device_name_or_function) self._device_function_stack.push_obj(spec, offset=total_offset) return spec @tf_contextlib.contextmanager def device(self, device_name_or_function): # pylint: disable=line-too-long self._add_device_to_stack(device_name_or_function, offset=2) old_top_of_stack = self._device_function_stack.peek_top_obj() try: yield finally: new_top_of_stack = self._device_function_stack.peek_top_obj() if old_top_of_stack is not new_top_of_stack: raise RuntimeError("Exiting device scope without proper scope nesting.") self._device_function_stack.pop_obj() def _apply_device_functions(self, op): # Apply any device functions in LIFO order, so that the most recently # pushed function has the first chance to apply a device to the op. # We apply here because the result can depend on the Operation's prior_device_string = None for device_spec in self._device_function_stack.peek_objs(): if device_spec.is_null_merge: continue if device_spec.function is None: break device_string = device_spec.string_merge(op) if device_string is not prior_device_string: op._set_device_from_string(device_string) prior_device_string = device_string op._device_code_locations = self._snapshot_device_function_stack_metadata() @tf_contextlib.contextmanager def container(self, container_name): original_container = self._container self._container = container_name try: yield self._container finally: self._container = original_container class _ControlDependenciesController(object): def __init__(self, graph, control_inputs): self._graph = graph if control_inputs is None: self._control_inputs_val = [] self._new_stack = True else: self._control_inputs_val = control_inputs self._new_stack = False self._seen_nodes = set() self._old_stack = None self._old_control_flow_context = None def __enter__(self): if self._new_stack: self._old_stack = self._graph._control_dependencies_stack self._graph._control_dependencies_stack = [] self._old_control_flow_context = self._graph._get_control_flow_context() self._graph._set_control_flow_context(None) self._graph._push_control_dependencies_controller(self) def __exit__(self, unused_type, unused_value, unused_traceback): self._graph._pop_control_dependencies_controller(self) if self._new_stack: self._graph._control_dependencies_stack = self._old_stack self._graph._set_control_flow_context(self._old_control_flow_context) @property def control_inputs(self): return self._control_inputs_val def add_op(self, op): if isinstance(op, Tensor): op = op.ref() self._seen_nodes.add(op) def op_in_group(self, op): if isinstance(op, Tensor): op = op.ref() return op in self._seen_nodes def _push_control_dependencies_controller(self, controller): self._control_dependencies_stack.append(controller) def _pop_control_dependencies_controller(self, controller): assert self._control_dependencies_stack[-1] is controller self._control_dependencies_stack.pop() def _current_control_dependencies(self): ret = set() for controller in self._control_dependencies_stack: for op in controller.control_inputs: ret.add(op) return ret def _control_dependencies_for_inputs(self, input_ops): ret = [] for controller in self._control_dependencies_stack: dominated = False for op in input_ops: if controller.op_in_group(op): dominated = True break if not dominated: # Don't add a control input if we already have a data dependency on i. ret.extend(c for c in controller.control_inputs if c not in input_ops) return ret def _record_op_seen_by_control_dependencies(self, op): for controller in self._control_dependencies_stack: controller.add_op(op) def control_dependencies(self, control_inputs): if control_inputs is None: return self._ControlDependenciesController(self, None) control_ops = [] current = self._current_control_dependencies() for c in control_inputs: # trigger reads. if (isinstance(c, IndexedSlices) or (hasattr(c, "_handle") and hasattr(c, "op"))): c = c.op c = self.as_graph_element(c) if isinstance(c, Tensor): c = c.op elif not isinstance(c, Operation): raise TypeError("Control input must be Operation or Tensor: %s" % c) if c not in current: control_ops.append(c) current.add(c) return self._ControlDependenciesController(self, control_ops) # pylint: disable=g-doc-return-or-yield @tf_contextlib.contextmanager def _attr_scope(self, attr_map): if not isinstance(attr_map, dict): raise TypeError("attr_map must be a dictionary mapping " "strings to AttrValue protocol buffers") # The saved_attrs dictionary stores any currently-set labels that # will be overridden by this context manager. saved_attrs = {} # Install the given attribute for name, attr in attr_map.items(): if not (isinstance(name, six.string_types) and (isinstance(attr, (type(None), attr_value_pb2.AttrValue)) or callable(attr))): raise TypeError("attr_map must be a dictionary mapping " "strings to AttrValue protocol buffers or " "callables that emit AttrValue protocol buffers") try: saved_attrs[name] = self._attr_scope_map[name] except KeyError: pass if attr is None: del self._attr_scope_map[name] else: self._attr_scope_map[name] = attr try: yield # The code within the context runs here. finally: # Remove the attributes set for this context, and restore any saved # attributes. for name, attr in attr_map.items(): try: self._attr_scope_map[name] = saved_attrs[name] except KeyError: del self._attr_scope_map[name] # pylint: enable=g-doc-return-or-yield # pylint: disable=g-doc-return-or-yield @tf_contextlib.contextmanager def _kernel_label_map(self, op_to_kernel_label_map): if not isinstance(op_to_kernel_label_map, dict): raise TypeError("op_to_kernel_label_map must be a dictionary mapping " "strings to strings") # The saved_labels dictionary stores any currently-set labels that # will be overridden by this context manager. saved_labels = {} # Install the given label for op_type, label in op_to_kernel_label_map.items(): if not (isinstance(op_type, six.string_types) and isinstance(label, six.string_types)): raise TypeError("op_to_kernel_label_map must be a dictionary mapping " "strings to strings") try: saved_labels[op_type] = self._op_to_kernel_label_map[op_type] except KeyError: pass self._op_to_kernel_label_map[op_type] = label try: yield # The code within the context runs here. finally: # Remove the labels set for this context, and restore any saved labels. for op_type, label in op_to_kernel_label_map.items(): try: self._op_to_kernel_label_map[op_type] = saved_labels[op_type] except KeyError: del self._op_to_kernel_label_map[op_type] # pylint: enable=g-doc-return-or-yield @tf_contextlib.contextmanager def _override_gradient_function(self, gradient_function_map): # This is an internal API and we don't need nested context for this. assert not self._gradient_function_map self._gradient_function_map = gradient_function_map yield self._gradient_function_map = {} @tf_contextlib.contextmanager def gradient_override_map(self, op_type_map): if not isinstance(op_type_map, dict): raise TypeError("op_type_map must be a dictionary mapping " "strings to strings") saved_mappings = {} for op_type, mapped_op_type in op_type_map.items(): if not (isinstance(op_type, six.string_types) and isinstance(mapped_op_type, six.string_types)): raise TypeError("op_type_map must be a dictionary mapping " "strings to strings") try: saved_mappings[op_type] = self._gradient_override_map[op_type] except KeyError: pass self._gradient_override_map[op_type] = mapped_op_type try: yield finally: for op_type, mapped_op_type in op_type_map.items(): try: self._gradient_override_map[op_type] = saved_mappings[op_type] except KeyError: del self._gradient_override_map[op_type] def prevent_feeding(self, tensor): self._unfeedable_tensors.add(tensor) def is_feedable(self, tensor): return tensor not in self._unfeedable_tensors def prevent_fetching(self, op): self._unfetchable_ops.add(op) def is_fetchable(self, tensor_or_op): if isinstance(tensor_or_op, Tensor): return tensor_or_op.op not in self._unfetchable_ops else: return tensor_or_op not in self._unfetchable_ops def switch_to_thread_local(self): if not self._stack_state_is_thread_local: self._stack_state_is_thread_local = True @property def _device_function_stack(self): if self._stack_state_is_thread_local: # exist. # pylint: disable=protected-access if not hasattr(self._thread_local, "_device_function_stack"): stack_copy_for_this_thread = self._graph_device_function_stack.copy() self._thread_local._device_function_stack = stack_copy_for_this_thread return self._thread_local._device_function_stack # pylint: enable=protected-access else: return self._graph_device_function_stack @property def _device_functions_outer_to_inner(self): user_device_specs = self._device_function_stack.peek_objs() device_functions = [spec.function for spec in user_device_specs] device_functions_outer_to_inner = list(reversed(device_functions)) return device_functions_outer_to_inner def _snapshot_device_function_stack_metadata(self): snapshot = [] for obj in self._device_function_stack.peek_traceable_objs(): obj_copy = obj.copy_metadata() obj_copy.obj = obj.obj.display_name snapshot.append(obj_copy) return snapshot @_device_function_stack.setter def _device_function_stack(self, device_function_stack): if self._stack_state_is_thread_local: # pylint: disable=protected-access self._thread_local._device_function_stack = device_function_stack # pylint: enable=protected-access else: self._graph_device_function_stack = device_function_stack @property def _colocation_stack(self): if self._stack_state_is_thread_local: # This may be called from a thread where colocation_stack doesn't yet if not hasattr(self._thread_local, "_colocation_stack"): stack_copy_for_this_thread = self._graph_colocation_stack.copy() self._thread_local._colocation_stack = stack_copy_for_this_thread return self._thread_local._colocation_stack else: return self._graph_colocation_stack def _snapshot_colocation_stack_metadata(self): return { traceable_obj.obj.name: traceable_obj.copy_metadata() for traceable_obj in self._colocation_stack.peek_traceable_objs() } @_colocation_stack.setter def _colocation_stack(self, colocation_stack): if self._stack_state_is_thread_local: self._thread_local._colocation_stack = colocation_stack else: self._graph_colocation_stack = colocation_stack @property def _control_dependencies_stack(self): if self._stack_state_is_thread_local: if not hasattr(self._thread_local, "_control_dependencies_stack"): self._thread_local._control_dependencies_stack = ( self._graph_control_dependencies_stack[:]) return self._thread_local._control_dependencies_stack else: return self._graph_control_dependencies_stack @_control_dependencies_stack.setter def _control_dependencies_stack(self, control_dependencies): if self._stack_state_is_thread_local: self._thread_local._control_dependencies_stack = control_dependencies else: self._graph_control_dependencies_stack = control_dependencies @property def _distribution_strategy_stack(self): if not hasattr(self._thread_local, "_distribution_strategy_stack"): self._thread_local._distribution_strategy_stack = [] # pylint: disable=protected-access return self._thread_local._distribution_strategy_stack # pylint: disable=protected-access @_distribution_strategy_stack.setter def _distribution_strategy_stack(self, _distribution_strategy_stack): self._thread_local._distribution_strategy_stack = ( # pylint: disable=protected-access _distribution_strategy_stack) @property def _global_distribute_strategy_scope(self): if not hasattr(self._thread_local, "distribute_strategy_scope"): self._thread_local.distribute_strategy_scope = None return self._thread_local.distribute_strategy_scope @_global_distribute_strategy_scope.setter def _global_distribute_strategy_scope(self, distribute_strategy_scope): self._thread_local.distribute_strategy_scope = (distribute_strategy_scope) @property def _auto_cast_variable_read_dtype(self): if not hasattr(self._thread_local, "_auto_cast_variable_read_dtype"): self._thread_local._auto_cast_variable_read_dtype = None # pylint: disable=protected-access return self._thread_local._auto_cast_variable_read_dtype # pylint: disable=protected-access @_auto_cast_variable_read_dtype.setter def _auto_cast_variable_read_dtype(self, dtype): if dtype: dtype = dtypes.as_dtype(dtype) self._thread_local._auto_cast_variable_read_dtype = dtype # pylint: disable=protected-access @tf_contextlib.contextmanager def _enable_auto_casting_variables(self, dtype): prev_read_dtype = self._auto_cast_variable_read_dtype try: self._auto_cast_variable_read_dtype = dtype yield finally: self._auto_cast_variable_read_dtype = prev_read_dtype def _mutation_lock(self): return self._group_lock.group(_MUTATION_LOCK_GROUP) def _session_run_lock(self): return self._group_lock.group(_SESSION_RUN_LOCK_GROUP) # TODO(agarwal): currently device directives in an outer eager scope will not # apply to inner graph mode code. Fix that. @tf_export(v1=["device"]) def device(device_name_or_function): if context.executing_eagerly(): if callable(device_name_or_function): raise RuntimeError( "tf.device does not support functions when eager execution " "is enabled.") return context.device(device_name_or_function) elif executing_eagerly_outside_functions(): @tf_contextlib.contextmanager def combined(device_name_or_function): with get_default_graph().device(device_name_or_function): if not callable(device_name_or_function): with context.device(device_name_or_function): yield else: yield return combined(device_name_or_function) else: return get_default_graph().device(device_name_or_function) @tf_export("device", v1=[]) def device_v2(device_name): if callable(device_name): raise RuntimeError("tf.device does not support functions.") return device(device_name) @tf_export(v1=["container"]) def container(container_name): return get_default_graph().container(container_name) def _colocate_with_for_gradient(op, gradient_uid, ignore_existing=False): if context.executing_eagerly(): if op is not None: if not hasattr(op, "device"): op = internal_convert_to_tensor_or_indexed_slices(op) return device(op.device) else: return NullContextmanager() else: default_graph = get_default_graph() if isinstance(op, EagerTensor): if default_graph.building_function: return default_graph.device(op.device) else: raise ValueError("Encountered an Eager-defined Tensor during graph " "construction, but a function was not being built.") return default_graph._colocate_with_for_gradient( op, gradient_uid=gradient_uid, ignore_existing=ignore_existing) # Internal interface to colocate_with. colocate_with has been deprecated from # public API. There are still a few internal uses of colocate_with. Add internal # only API for those uses to avoid deprecation warning. def colocate_with(op, ignore_existing=False): return _colocate_with_for_gradient(op, None, ignore_existing=ignore_existing) @deprecation.deprecated( date=None, instructions="Colocations handled automatically by placer.") @tf_export(v1=["colocate_with"]) def _colocate_with(op, ignore_existing=False): return colocate_with(op, ignore_existing) @tf_export("control_dependencies") def control_dependencies(control_inputs): if context.executing_eagerly(): if control_inputs: # Execute any pending callables. for control in control_inputs: if callable(control): control() return NullContextmanager() else: return get_default_graph().control_dependencies(control_inputs) class _DefaultStack(threading.local): def __init__(self): super(_DefaultStack, self).__init__() self._enforce_nesting = True self.stack = [] def get_default(self): return self.stack[-1] if len(self.stack) >= 1 else None def reset(self): self.stack = [] def is_cleared(self): return not self.stack @property def enforce_nesting(self): return self._enforce_nesting @enforce_nesting.setter def enforce_nesting(self, value): self._enforce_nesting = value @tf_contextlib.contextmanager def get_controller(self, default): self.stack.append(default) try: yield default finally: # stack may be empty if reset() was called if self.stack: if self._enforce_nesting: if self.stack[-1] is not default: raise AssertionError( "Nesting violated for default stack of %s objects" % type(default)) self.stack.pop() else: self.stack.remove(default) _default_session_stack = _DefaultStack() # pylint: disable=protected-access def default_session(session): return _default_session_stack.get_controller(session) @tf_export(v1=["get_default_session"]) def get_default_session(): return _default_session_stack.get_default() def _eval_using_default_session(tensors, feed_dict, graph, session=None): if session is None: session = get_default_session() if session is None: raise ValueError("Cannot evaluate tensor using `eval()`: No default " "session is registered. Use `with " "sess.as_default()` or pass an explicit session to " "`eval(session=sess)`") if session.graph is not graph: raise ValueError("Cannot use the default session to evaluate tensor: " "the tensor's graph is different from the session's " "graph. Pass an explicit session to " "`eval(session=sess)`.") else: if session.graph is not graph: raise ValueError("Cannot use the given session to evaluate tensor: " "the tensor's graph is different from the session's " "graph.") return session.run(tensors, feed_dict) def _run_using_default_session(operation, feed_dict, graph, session=None): if session is None: session = get_default_session() if session is None: raise ValueError("Cannot execute operation using `run()`: No default " "session is registered. Use `with " "sess.as_default():` or pass an explicit session to " "`run(session=sess)`") if session.graph is not graph: raise ValueError("Cannot use the default session to execute operation: " "the operation's graph is different from the " "session's graph. Pass an explicit session to " "run(session=sess).") else: if session.graph is not graph: raise ValueError("Cannot use the given session to execute operation: " "the operation's graph is different from the session's " "graph.") session.run(operation, feed_dict) class _DefaultGraphStack(_DefaultStack): # pylint: disable=protected-access def __init__(self): super(_DefaultGraphStack, self).__init__() self._global_default_graph = None def get_default(self): ret = super(_DefaultGraphStack, self).get_default() if ret is None: ret = self._GetGlobalDefaultGraph() return ret def _GetGlobalDefaultGraph(self): if self._global_default_graph is None: # TODO(mrry): Perhaps log that the default graph is being used, or set # provide some other feedback to prevent confusion when a mixture of # the global default graph and an explicit graph are combined in the # same process. self._global_default_graph = Graph() return self._global_default_graph def reset(self): super(_DefaultGraphStack, self).reset() self._global_default_graph = None @tf_contextlib.contextmanager def get_controller(self, default): context.context().context_switches.push(default.building_function, default.as_default, default._device_function_stack) try: with super(_DefaultGraphStack, self).get_controller(default) as g, context.graph_mode(): yield g finally: # If an exception is raised here it may be hiding a related exception in # the try-block (just above). context.context().context_switches.pop() _default_graph_stack = _DefaultGraphStack() # Shared helper used in init_scope and executing_eagerly_outside_functions # to obtain the outermost context that is not building a function, and the # innermost non empty device stack. def _get_outer_context_and_inner_device_stack(): default_graph = get_default_graph() outer_context = None innermost_nonempty_device_stack = default_graph._device_function_stack # pylint: disable=protected-access if not _default_graph_stack.stack: # If the default graph stack is empty, then we cannot be building a # function. Install the global graph (which, in this case, is also the # default graph) as the outer context. if default_graph.building_function: raise RuntimeError("The global graph is building a function.") outer_context = default_graph.as_default else: # Find a context that is not building a function. for stack_entry in reversed(context.context().context_switches.stack): if not innermost_nonempty_device_stack: innermost_nonempty_device_stack = stack_entry.device_stack if not stack_entry.is_building_function: outer_context = stack_entry.enter_context_fn break if outer_context is None: # As a last resort, obtain the global default graph; this graph doesn't # live on the context stack), but it is stored in the graph stack's outer_context = _default_graph_stack._GetGlobalDefaultGraph().as_default if outer_context is None: raise RuntimeError("All graphs are building functions, and no " "eager context was previously active.") return outer_context, innermost_nonempty_device_stack # pylint: disable=g-doc-return-or-yield,line-too-long @tf_export("init_scope") @tf_contextlib.contextmanager def init_scope(): # pylint: enable=g-doc-return-or-yield,line-too-long if context.executing_eagerly(): # Fastpath. with tape.stop_recording(): yield else: # Retrieve the active name scope: entering an `init_scope` preserves # the name scope of the current context. scope = get_default_graph().get_name_scope() if scope and scope[-1] != "/": # Names that end with trailing slashes are treated by `name_scope` as # absolute. scope = scope + "/" outer_context, innermost_nonempty_device_stack = ( _get_outer_context_and_inner_device_stack()) outer_graph = None outer_device_stack = None try: with outer_context(), name_scope( scope, skip_on_eager=False), control_dependencies( None), tape.stop_recording(): context_manager = NullContextmanager context_manager_input = None if not context.executing_eagerly(): # The device stack is preserved when lifting into a graph. Eager # execution doesn't implement device stacks and in particular it outer_graph = get_default_graph() outer_device_stack = outer_graph._device_function_stack outer_graph._device_function_stack = innermost_nonempty_device_stack elif innermost_nonempty_device_stack is not None: for device_spec in innermost_nonempty_device_stack.peek_objs(): if device_spec.function is None: break if device_spec.raw_string: context_manager = context.device context_manager_input = device_spec.raw_string break with context_manager(context_manager_input): yield finally: if outer_graph is not None: outer_graph._device_function_stack = outer_device_stack @tf_export(v1=["executing_eagerly_outside_functions"]) def executing_eagerly_outside_functions(): if context.executing_eagerly(): return True else: outer_context, _ = _get_outer_context_and_inner_device_stack() with outer_context(): return context.executing_eagerly() def inside_function(): return get_default_graph().building_function @tf_export(v1=["enable_eager_execution"]) def enable_eager_execution(config=None, device_policy=None, execution_mode=None): _api_usage_gauge.get_cell().set(True) if context.default_execution_mode != context.EAGER_MODE: return enable_eager_execution_internal( config=config, device_policy=device_policy, execution_mode=execution_mode, server_def=None) @tf_export(v1=["disable_eager_execution"]) def disable_eager_execution(): _api_usage_gauge.get_cell().set(False) context.default_execution_mode = context.GRAPH_MODE c = context.context_safe() if c is not None: c._thread_local_data.is_eager = False def enable_eager_execution_internal(config=None, device_policy=None, execution_mode=None, server_def=None): if config is not None and not isinstance(config, config_pb2.ConfigProto): raise TypeError("config must be a tf.ConfigProto, but got %s" % type(config)) if device_policy not in (None, context.DEVICE_PLACEMENT_EXPLICIT, context.DEVICE_PLACEMENT_WARN, context.DEVICE_PLACEMENT_SILENT, context.DEVICE_PLACEMENT_SILENT_FOR_INT32): raise ValueError( "device_policy must be one of None, tf.contrib.eager.DEVICE_PLACEMENT_*" ) if execution_mode not in (None, context.SYNC, context.ASYNC): raise ValueError( "execution_mode must be one of None, tf.contrib.eager.SYNC, " "tf.contrib.eager.ASYNC") if context.default_execution_mode == context.GRAPH_MODE: graph_mode_has_been_used = ( _default_graph_stack._global_default_graph is not None) if graph_mode_has_been_used: raise ValueError( "tf.enable_eager_execution must be called at program startup.") context.default_execution_mode = context.EAGER_MODE with context._context_lock: if context._context is None: context._set_context_locked(context.Context( config=config, device_policy=device_policy, execution_mode=execution_mode, server_def=server_def)) elif ((config is not None and config is not context._context._config) or (device_policy is not None and device_policy is not context._context._device_policy) or (execution_mode is not None and execution_mode is not context._context._execution_mode)): raise ValueError( "Trying to change the options of an active eager" " execution. Context config: %s, specified config:" " %s. Context device policy: %s, specified device" " policy: %s. Context execution mode: %s, " " specified execution mode %s." % (context._context._config, config, context._context._device_policy, device_policy, context._context._execution_mode, execution_mode)) else: context._context._thread_local_data.is_eager = True context.context = context.context_safe def eager_run(main=None, argv=None): enable_eager_execution() app.run(main, argv) @tf_export(v1=["reset_default_graph"]) def reset_default_graph(): if not _default_graph_stack.is_cleared(): raise AssertionError("Do not use tf.reset_default_graph() to clear " "nested graphs. If you need a cleared graph, " "exit the nesting and create a new graph.") _default_graph_stack.reset() @tf_export(v1=["get_default_graph"]) def get_default_graph(): return _default_graph_stack.get_default() def has_default_graph(): return len(_default_graph_stack.stack) >= 1 def get_name_scope(): if context.executing_eagerly(): return context.context().scope_name.rstrip("/") return get_default_graph().get_name_scope() def _assert_same_graph(original_item, item): if original_item.graph is not item.graph: raise ValueError("%s must be from the same graph as %s." % (item, original_item)) def _get_graph_from_inputs(op_input_list, graph=None): current_default_graph = get_default_graph() if current_default_graph.building_function: return current_default_graph op_input_list = tuple(op_input_list) if graph and not isinstance(graph, Graph): raise TypeError("Input graph needs to be a Graph: %s" % graph) original_graph_element = None for op_input in op_input_list: graph_element = None if (isinstance(op_input, (Operation, _TensorLike)) and ((not isinstance(op_input, Tensor)) or type(op_input) == Tensor)): graph_element = op_input else: graph_element = _as_graph_element(op_input) if graph_element is not None: if not graph: original_graph_element = graph_element graph = graph_element.graph elif original_graph_element is not None: _assert_same_graph(original_graph_element, graph_element) elif graph_element.graph is not graph: raise ValueError("%s is not from the passed-in graph." % graph_element) return graph or current_default_graph @tf_export(v1=["GraphKeys"]) class GraphKeys(object): GLOBAL_VARIABLES = "variables" LOCAL_VARIABLES = "local_variables" METRIC_VARIABLES = "metric_variables" MODEL_VARIABLES = "model_variables" TRAINABLE_VARIABLES = "trainable_variables" SUMMARIES = "summaries" QUEUE_RUNNERS = "queue_runners" TABLE_INITIALIZERS = "table_initializer" ASSET_FILEPATHS = "asset_filepaths" MOVING_AVERAGE_VARIABLES = "moving_average_variables" REGULARIZATION_LOSSES = "regularization_losses" CONCATENATED_VARIABLES = "concatenated_variables" SAVERS = "savers" WEIGHTS = "weights" BIASES = "biases" ACTIVATIONS = "activations" UPDATE_OPS = "update_ops" LOSSES = "losses" SAVEABLE_OBJECTS = "saveable_objects" RESOURCES = "resources" LOCAL_RESOURCES = "local_resources" TRAINABLE_RESOURCE_VARIABLES = "trainable_resource_variables" INIT_OP = "init_op" LOCAL_INIT_OP = "local_init_op" READY_OP = "ready_op" READY_FOR_LOCAL_INIT_OP = "ready_for_local_init_op" SUMMARY_OP = "summary_op" GLOBAL_STEP = "global_step" EVAL_STEP = "eval_step" TRAIN_OP = "train_op" COND_CONTEXT = "cond_context" WHILE_CONTEXT = "while_context" _SUMMARY_COLLECTION = "_SUMMARY_V2" _VARIABLE_COLLECTIONS = [ GLOBAL_VARIABLES, LOCAL_VARIABLES, METRIC_VARIABLES, MODEL_VARIABLES, TRAINABLE_VARIABLES, MOVING_AVERAGE_VARIABLES, CONCATENATED_VARIABLES, TRAINABLE_RESOURCE_VARIABLES, ] _STREAMING_MODEL_PORTS = "streaming_model_ports" @decorator_utils.classproperty @deprecation.deprecated(None, "Use `tf.GraphKeys.GLOBAL_VARIABLES` instead.") def VARIABLES(cls): return cls.GLOBAL_VARIABLES def dismantle_graph(graph): memory.dismantle_ordered_dict(graph._functions) graph_operations = graph.get_operations() for op in graph_operations: op.__dict__ = {} graph.__dict__ = {} @tf_export(v1=["add_to_collection"]) def add_to_collection(name, value): get_default_graph().add_to_collection(name, value) @tf_export(v1=["add_to_collections"]) def add_to_collections(names, value): get_default_graph().add_to_collections(names, value) @tf_export(v1=["get_collection_ref"]) def get_collection_ref(key): return get_default_graph().get_collection_ref(key) @tf_export(v1=["get_collection"]) def get_collection(key, scope=None): return get_default_graph().get_collection(key, scope) def get_all_collection_keys(): return get_default_graph().get_all_collection_keys() def name_scope(name, default_name=None, values=None, skip_on_eager=True): ctx = context.context() in_eager_mode = ctx.executing_eagerly() if not in_eager_mode: return internal_name_scope_v1(name, default_name, values) if skip_on_eager: return NullContextmanager() name = default_name if name is None else name if values: graph_value = next((value for value in values if type(value) == Tensor), None) if graph_value is not None: return graph_value.graph.name_scope(name) return name_scope_v2(name or "") class internal_name_scope_v1(object): @property def name(self): return self._name def __init__(self, name, default_name=None, values=None): if not (default_name is None or isinstance(default_name, six.string_types)): raise TypeError( "`default_name` type (%s) is not a string type. You likely meant to " "pass this into the `values` kwarg." % type(default_name)) self._name = default_name if name is None else name self._default_name = default_name self._values = values def __enter__(self): if self._name is None and self._values is not None: raise ValueError( "At least one of name (%s) and default_name (%s) must be provided." % (self._name, self._default_name)) g = get_default_graph() if self._values and not g.building_function: g_from_inputs = _get_graph_from_inputs(self._values) if g_from_inputs is not g: g = g_from_inputs self._g_manager = g.as_default() self._g_manager.__enter__() else: self._g_manager = None else: self._g_manager = None try: self._name_scope = g.name_scope(self._name) return self._name_scope.__enter__() except: if self._g_manager is not None: self._g_manager.__exit__(*sys.exc_info()) raise def __exit__(self, *exc_info): self._name_scope.__exit__(*exc_info) if self._g_manager is not None: self._g_manager.__exit__(*exc_info) @tf_export(v1=["name_scope"]) class name_scope_v1(object): @property def name(self): return self._name def __init__(self, name, default_name=None, values=None): self._name_scope = name_scope( name, default_name, values, skip_on_eager=False) self._name = default_name if name is None else name def __enter__(self): return self._name_scope.__enter__() def __exit__(self, *exc_info): return self._name_scope.__exit__(*exc_info) def enter_eager_name_scope(ctx, name): old_name = ctx.scope_name if not name: scope_name = "" else: if name.endswith("/"): scope_name = name else: scope_name = name + "/" if old_name: scope_name = old_name + scope_name ctx.scope_name = scope_name return scope_name, old_name @tf_export("name_scope", v1=[]) class name_scope_v2(object): def __init__(self, name): if name is None or not isinstance(name, six.string_types): raise ValueError("name for name_scope must be a string.") self._name = name self._exit_fns = [] @property def name(self): return self._name def __enter__(self): ctx = context.context() if ctx.executing_eagerly(): scope_name, old_scope_name = enter_eager_name_scope(ctx, self._name) self._exit_fns.append( lambda *a: setattr(ctx, "scope_name", old_scope_name)) else: scope = get_default_graph().name_scope(self._name) scope_name = scope.__enter__() self._exit_fns.append(scope.__exit__) return scope_name def __exit__(self, type_arg, value_arg, traceback_arg): exit_fn = self._exit_fns.pop() exit_fn(type_arg, value_arg, traceback_arg) return False def strip_name_scope(name, export_scope): if export_scope: if export_scope[-1] == "/": export_scope = export_scope[:-1] try: str_to_replace = r"([\^]|loc:@|^)" + export_scope + r"[\/]+(.*)" return re.sub(str_to_replace, r"\1\2", compat.as_str(name), count=1) except TypeError as e: logging.warning(e) return name else: return name def prepend_name_scope(name, import_scope): if import_scope: if import_scope[-1] == "/": import_scope = import_scope[:-1] try: str_to_replace = r"([\^]|loc:@|^)(.*)" return re.sub(str_to_replace, r"\1" + import_scope + r"/\2", compat.as_str(name)) except TypeError as e: logging.warning(e) return name else: return name @tf_export(v1=["op_scope"]) @tf_contextlib.contextmanager def op_scope(values, name, default_name=None): logging.warn("tf.op_scope(values, name, default_name) is deprecated," " use tf.name_scope(name, default_name, values)") with name_scope(name, default_name=default_name, values=values) as scope: yield scope _proto_function_registry = registry.Registry("proto functions") def register_proto_function(collection_name, proto_type=None, to_proto=None, from_proto=None): if to_proto and not callable(to_proto): raise TypeError("to_proto must be callable.") if from_proto and not callable(from_proto): raise TypeError("from_proto must be callable.") _proto_function_registry.register((proto_type, to_proto, from_proto), collection_name) def get_collection_proto_type(collection_name): try: return _proto_function_registry.lookup(collection_name)[0] except LookupError: return None def get_to_proto_function(collection_name): try: return _proto_function_registry.lookup(collection_name)[1] except LookupError: return None def get_from_proto_function(collection_name): try: return _proto_function_registry.lookup(collection_name)[2] except LookupError: return None def _operation_conversion_error(op, dtype=None, name=None, as_ref=False): raise TypeError(("Can't convert Operation '%s' to Tensor " "(target dtype=%r, name=%r, as_ref=%r)") % (op.name, dtype, name, as_ref)) def _op_to_colocate_with(v, graph): if v is None: return None if isinstance(v, Operation): return v # We always want to colocate with the reference op. # When 'v' is a ResourceVariable, the reference op is the handle creating op. # # What this should be is: # if isinstance(v, ResourceVariable): # return v.handle.op # However, that would require a circular import dependency. # As of October 2018, there were attempts underway to remove # colocation constraints altogether. Assuming that will # happen soon, perhaps this hack to work around the circular # import dependency is acceptable. if hasattr(v, "handle") and isinstance(v.handle, Tensor): if graph.building_function: return graph.capture(v.handle).op else: return v.handle.op return internal_convert_to_tensor_or_indexed_slices(v, as_ref=True).op def _is_keras_symbolic_tensor(x): return hasattr(x, "graph") and getattr(x.graph, "name", None) == "keras_graph" tensor_conversion_registry.register_tensor_conversion_function( Operation, _operation_conversion_error) # These symbols were originally defined in this module; import them for # backwards compatibility until all references have been updated to access # them from the indexed_slices.py module. IndexedSlices = indexed_slices.IndexedSlices IndexedSlicesValue = indexed_slices.IndexedSlicesValue convert_to_tensor_or_indexed_slices = \ indexed_slices.convert_to_tensor_or_indexed_slices convert_n_to_tensor_or_indexed_slices = \ indexed_slices.convert_n_to_tensor_or_indexed_slices internal_convert_to_tensor_or_indexed_slices = \ indexed_slices.internal_convert_to_tensor_or_indexed_slices internal_convert_n_to_tensor_or_indexed_slices = \ indexed_slices.internal_convert_n_to_tensor_or_indexed_slices register_tensor_conversion_function = \ tensor_conversion_registry.register_tensor_conversion_function # Helper functions for op wrapper modules generated by `python_op_gen`. def to_raw_op(f): # Copy `f` to get a new `__dict__`, otherwise `tf_export` will fail # due to double-registration. f = types.FunctionType(f.__code__, f.__globals__, f.__name__, f.__defaults__, f.__closure__) return kwarg_only(f) def raise_from_not_ok_status(e, name): message = e.message + (" name: " + name if name is not None else "") # pylint: disable=protected-access six.raise_from(core._status_to_exception(e.code, message), None) # pylint: enable=protected-access def add_exit_callback_to_default_func_graph(fn): default_graph = get_default_graph() if not default_graph._building_function: # pylint: disable=protected-access raise RuntimeError( "Cannot add scope exit callbacks when not building a function. " "Default graph: {}".format(default_graph)) default_graph._add_scope_exit_callback(fn) # pylint: disable=protected-access def _reconstruct_sequence_inputs(op_def, inputs, attrs): grouped_inputs = [] i = 0 for input_arg in op_def.input_arg: if input_arg.number_attr: input_len = attrs[input_arg.number_attr].i is_sequence = True elif input_arg.type_list_attr: input_len = len(attrs[input_arg.type_list_attr].list.type) is_sequence = True else: input_len = 1 is_sequence = False if is_sequence: grouped_inputs.append(inputs[i:i + input_len]) else: grouped_inputs.append(inputs[i]) i += input_len assert i == len(inputs) return grouped_inputs class _TensorIterator(object): def __init__(self, tensor, dim0): self._tensor = tensor self._index = 0 self._limit = dim0 def __iter__(self): return self def __next__(self): if self._index == self._limit: raise StopIteration result = self._tensor[self._index] self._index += 1 return result next = __next__ # python2.x compatibility.
true
true
f716e0b7ca7df97d982aaf86ed718db537ad482b
49,994
py
Python
research/compression/distillation/resnet.py
ragavvenkatesan/models
420a88c7af20dae8d79dbc1b4351fef41be361c8
[ "Apache-2.0" ]
null
null
null
research/compression/distillation/resnet.py
ragavvenkatesan/models
420a88c7af20dae8d79dbc1b4351fef41be361c8
[ "Apache-2.0" ]
null
null
null
research/compression/distillation/resnet.py
ragavvenkatesan/models
420a88c7af20dae8d79dbc1b4351fef41be361c8
[ "Apache-2.0" ]
null
null
null
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Contains definitions for the preactivation form of Residual Networks (also known as ResNet v2). Residual networks (ResNets) were originally proposed in: [1] Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun Deep Residual Learning for Image Recognition. arXiv:1512.03385 The full preactivation 'v2' ResNet variant implemented in this module was introduced by: [2] Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun Identity Mappings in Deep Residual Networks. arXiv: 1603.05027 The key difference of the full preactivation 'v2' variant compared to the 'v1' variant in [1] is the use of batch normalization before every weight layer rather than after. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import argparse import os import tensorflow as tf _BATCH_NORM_DECAY = 0.997 _BATCH_NORM_EPSILON = 1e-5 ################################################################################ # Functions for input processing. ################################################################################ def process_record_dataset(dataset, is_training, batch_size, shuffle_buffer, parse_record_fn, num_epochs=1, num_parallel_calls=1): """Given a Dataset with raw records, parse each record into images and labels, and return an iterator over the records. Args: dataset: A Dataset representing raw records is_training: A boolean denoting whether the input is for training. batch_size: The number of samples per batch. shuffle_buffer: The buffer size to use when shuffling records. A larger value results in better randomness, but smaller values reduce startup time and use less memory. parse_record_fn: A function that takes a raw record and returns the corresponding (image, label) pair. num_epochs: The number of epochs to repeat the dataset. num_parallel_calls: The number of records that are processed in parallel. This can be optimized per data set but for generally homogeneous data sets, should be approximately the number of available CPU cores. Returns: Dataset of (image, label) pairs ready for iteration. """ # We prefetch a batch at a time, This can help smooth out the time taken to # load input files as we go through shuffling and processing. dataset = dataset.prefetch(buffer_size=batch_size) if is_training: # Shuffle the records. Note that we shuffle before repeating to ensure # that the shuffling respects epoch boundaries. dataset = dataset.shuffle(buffer_size=shuffle_buffer) # If we are training over multiple epochs before evaluating, repeat the # dataset for the appropriate number of epochs. dataset = dataset.repeat(num_epochs) # Parse the raw records into images and labels dataset = dataset.map(lambda value: parse_record_fn(value, is_training), num_parallel_calls=num_parallel_calls) dataset = dataset.batch(batch_size) # Operations between the final prefetch and the get_next call to the iterator # will happen synchronously during run time. We prefetch here again to # background all of the above processing work and keep it out of the # critical training path. dataset = dataset.prefetch(1) return dataset ################################################################################ # Functions building the ResNet model. ################################################################################ def batch_norm_relu(inputs, training, data_format): """Performs a batch normalization followed by a ReLU.""" # We set fused=True for a significant performance boost. See # https://www.tensorflow.org/performance/performance_guide#common_fused_ops inputs = tf.layers.batch_normalization( inputs=inputs, axis=1 if data_format == 'channels_first' else 3, momentum=_BATCH_NORM_DECAY, epsilon=_BATCH_NORM_EPSILON, center=True, scale=True, training=training, fused=True) inputs = tf.nn.relu(inputs) return inputs def fixed_padding(inputs, kernel_size, data_format): """Pads the input along the spatial dimensions independently of input size. Args: inputs: A tensor of size [batch, channels, height_in, width_in] or [batch, height_in, width_in, channels] depending on data_format. kernel_size: The kernel to be used in the conv2d or max_pool2d operation. Should be a positive integer. data_format: The input format ('channels_last' or 'channels_first'). Returns: A tensor with the same format as the input with the data either intact (if kernel_size == 1) or padded (if kernel_size > 1). """ pad_total = kernel_size - 1 pad_beg = pad_total // 2 pad_end = pad_total - pad_beg if data_format == 'channels_first': padded_inputs = tf.pad(inputs, [[0, 0], [0, 0], [pad_beg, pad_end], [pad_beg, pad_end]]) else: padded_inputs = tf.pad(inputs, [[0, 0], [pad_beg, pad_end], [pad_beg, pad_end], [0, 0]]) return padded_inputs def conv2d_fixed_padding(inputs, filters, kernel_size, strides, data_format): """Strided 2-D convolution with explicit padding.""" # The padding is consistent and is based only on `kernel_size`, not on the # dimensions of `inputs` (as opposed to using `tf.layers.conv2d` alone). if strides > 1: inputs = fixed_padding(inputs, kernel_size, data_format) return tf.layers.conv2d( inputs=inputs, filters=filters, kernel_size=kernel_size, strides=strides, padding=('SAME' if strides == 1 else 'VALID'), use_bias=False, kernel_initializer=tf.variance_scaling_initializer(), data_format=data_format) def building_block(inputs, filters, training, projection_shortcut, strides, data_format): """Standard building block for residual networks with BN before convolutions. Args: inputs: A tensor of size [batch, channels, height_in, width_in] or [batch, height_in, width_in, channels] depending on data_format. filters: The number of filters for the convolutions. training: A Boolean for whether the model is in training or inference mode. Needed for batch normalization. projection_shortcut: The function to use for projection shortcuts (typically a 1x1 convolution when downsampling the input). strides: The block's stride. If greater than 1, this block will ultimately downsample the input. data_format: The input format ('channels_last' or 'channels_first'). Returns: The output tensor of the block. """ shortcut = inputs inputs = batch_norm_relu(inputs, training, data_format) # The projection shortcut should come after the first batch norm and ReLU # since it performs a 1x1 convolution. if projection_shortcut is not None: shortcut = projection_shortcut(inputs) inputs = conv2d_fixed_padding( inputs=inputs, filters=filters, kernel_size=3, strides=strides, data_format=data_format) inputs = batch_norm_relu(inputs, training, data_format) inputs = conv2d_fixed_padding( inputs=inputs, filters=filters, kernel_size=3, strides=1, data_format=data_format) return inputs + shortcut def bottleneck_block(inputs, filters, training, projection_shortcut, strides, data_format): """Bottleneck block variant for residual networks with BN before convolutions. Args: inputs: A tensor of size [batch, channels, height_in, width_in] or [batch, height_in, width_in, channels] depending on data_format. filters: The number of filters for the first two convolutions. Note that the third and final convolution will use 4 times as many filters. training: A Boolean for whether the model is in training or inference mode. Needed for batch normalization. projection_shortcut: The function to use for projection shortcuts (typically a 1x1 convolution when downsampling the input). strides: The block's stride. If greater than 1, this block will ultimately downsample the input. data_format: The input format ('channels_last' or 'channels_first'). Returns: The output tensor of the block. """ shortcut = inputs inputs = batch_norm_relu(inputs, training, data_format) # The projection shortcut should come after the first batch norm and ReLU # since it performs a 1x1 convolution. if projection_shortcut is not None: shortcut = projection_shortcut(inputs) inputs = conv2d_fixed_padding( inputs=inputs, filters=filters, kernel_size=1, strides=1, data_format=data_format) inputs = batch_norm_relu(inputs, training, data_format) inputs = conv2d_fixed_padding( inputs=inputs, filters=filters, kernel_size=3, strides=strides, data_format=data_format) inputs = batch_norm_relu(inputs, training, data_format) inputs = conv2d_fixed_padding( inputs=inputs, filters=4 * filters, kernel_size=1, strides=1, data_format=data_format) return inputs + shortcut def block_layer(inputs, filters, block_fn, blocks, strides, training, name, data_format): """Creates one layer of blocks for the ResNet model. Args: inputs: A tensor of size [batch, channels, height_in, width_in] or [batch, height_in, width_in, channels] depending on data_format. filters: The number of filters for the first convolution of the layer. block_fn: The block to use within the model, either `building_block` or `bottleneck_block`. blocks: The number of blocks contained in the layer. strides: The stride to use for the first convolution of the layer. If greater than 1, this layer will ultimately downsample the input. training: Either True or False, whether we are currently training the model. Needed for batch norm. name: A string name for the tensor output of the block layer. data_format: The input format ('channels_last' or 'channels_first'). Returns: The output tensor of the block layer. """ # Bottleneck blocks end with 4x the number of filters as they start with filters_out = 4 * filters if block_fn is bottleneck_block else filters def projection_shortcut(inputs): return conv2d_fixed_padding( inputs=inputs, filters=filters_out, kernel_size=1, strides=strides, data_format=data_format) # Only the first block per block_layer uses projection_shortcut and strides inputs = block_fn(inputs, filters, training, projection_shortcut, strides, data_format) for _ in range(1, blocks): inputs = block_fn(inputs, filters, training, None, 1, data_format) return tf.identity(inputs, name) class Model(object): """Base class for building the Resnet v2 Model. """ def __init__(self, resnet_size, num_classes, num_filters, kernel_size, conv_stride, first_pool_size, first_pool_stride, probe_pool_size, second_pool_size, second_pool_stride, probe_pool_stride, block_fn, block_sizes, pool_type, num_probes, block_strides, final_size, data_format=None): """Creates a model for classifying an image. Args: resnet_size: A single integer for the size of the ResNet model. probe_pool_size: Number to pool the probes by. probe_pool_stride: stride size for the probe pooling layer num_classes: The number of classes used as labels. num_filters: The number of filters to use for the first block layer of the model. This number is then doubled for each subsequent block layer. kernel_size: The kernel size to use for convolution. conv_stride: stride size for the initial convolutional layer first_pool_size: Pool size to be used for the first pooling layer. If none, the first pooling layer is skipped. first_pool_stride: stride size for the first pooling layer. Not used if first_pool_size is None. second_pool_size: Pool size to be used for the second pooling layer. second_pool_stride: stride size for the final pooling layer block_fn: Which block layer function should be used? Pass in one of the two functions defined above: building_block or bottleneck_block block_sizes: A list containing n values, where n is the number of sets of block layers desired. Each value should be the number of blocks in the i-th set. pool_type: 'max' or 'mean'. block_strides: List of integers representing the desired stride size for each of the sets of block layers. Should be same length as block_sizes. final_size: The expected size of the model after the second pooling. data_format: Input format ('channels_last', 'channels_first', or None). If set to None, the format is dependent on whether a GPU is available. """ self.resnet_size = resnet_size if not data_format: data_format = ( 'channels_first' if tf.test.is_built_with_cuda() else 'channels_last') self.data_format = data_format self.num_classes = num_classes self.num_filters = num_filters self.kernel_size = kernel_size self.conv_stride = conv_stride self.first_pool_size = first_pool_size self.first_pool_stride = first_pool_stride self.second_pool_size = second_pool_size self.second_pool_stride = second_pool_stride self.probe_pool_size = probe_pool_size self.probe_pool_stride = probe_pool_stride self.block_fn = block_fn self.block_sizes = block_sizes self.block_strides = block_strides self.final_size = final_size self.pool_type = pool_type self.num_probes = num_probes def __call__(self, inputs, training): """Add operations to classify a batch of input images. Args: inputs: A Tensor representing a batch of input images. training: A boolean. Set to True to add operations required only when training the classifier. Returns: A logits Tensor with shape [<batch_size>, self.num_classes]. """ with tf.variable_scope('input_transforms'): if self.data_format == 'channels_first': # Convert the inputs from channels_last (NHWC) to channels_first (NCHW). # This provides a large performance boost on GPU. See # https://www.tensorflow.org/performance/performance_guide#data_formats inputs = tf.transpose(inputs, [0, 3, 1, 2]) with tf.variable_scope('mentor') as scope: # mentor mentor = conv2d_fixed_padding( inputs=inputs, filters=self.num_filters, kernel_size=self.kernel_size, strides=self.conv_stride, data_format=self.data_format) mentor = tf.identity(mentor, 'mentor_' + 'initial_conv') if self.first_pool_size: mentor = tf.layers.max_pooling2d( inputs=mentor, pool_size=self.first_pool_size, strides=self.first_pool_stride, padding='SAME', data_format=self.data_format) mentor = tf.identity(mentor, 'mentor_' + 'initial_max_pool') mentor_probes = [] probe_count = 0 for i, num_blocks in enumerate(self.block_sizes[0]): num_filters = self.num_filters * (2**i) mentor = block_layer( inputs=mentor, filters=num_filters, block_fn=self.block_fn, blocks=num_blocks, strides=self.block_strides[i], training=training, name='mentor_' + 'block_layer{}'.format(i + 1), data_format=self.data_format) if probe_count < self.num_probes: if self.probe_pool_size > 0: if self.pool_type == 'max': mentor_probe = tf.layers.max_pooling2d( inputs=mentor, pool_size=self.probe_pool_size, strides=self.probe_pool_stride, padding='SAME', data_format=self.data_format) mentor_probe = tf.identity(mentor, 'mentor_'+'probe_max_pool_' \ + str(i)) elif self.pool_type == 'mean': mentor_probe = tf.layers.average_pooling2d( inputs=mentor, pool_size=self.probe_pool_size, strides=self.probe_pool_stride, padding='SAME', data_format=self.data_format) mentor_probe = tf.identity(mentor, 'mentor_'+'probe_mean_pool_' \ + str(i)) else: mentor_probe = mentor mentor_probes.append(mentor_probe) probe_count+=1 mentor = batch_norm_relu(mentor, training, self.data_format) mentor = tf.layers.average_pooling2d( inputs=mentor, pool_size=self.second_pool_size, strides=self.second_pool_stride, padding='VALID', data_format=self.data_format) mentor = tf.identity(mentor, 'mentor_' + 'final_avg_pool') mentor = tf.reshape(mentor, [-1, self.final_size]) mentor = tf.layers.dense(inputs=mentor, units=self.num_classes) mentor = tf.identity(mentor, 'mentor_' + 'final_dense') mentor_probes.append(mentor) with tf.variable_scope('mentee') as scope: # mentee mentee = conv2d_fixed_padding( inputs=inputs, filters=self.num_filters, kernel_size=self.kernel_size, strides=self.conv_stride, data_format=self.data_format) mentee = tf.identity(mentee, 'mentee_' + 'initial_conv') if self.first_pool_size: mentee = tf.layers.max_pooling2d( inputs=mentee, pool_size=self.first_pool_size, strides=self.first_pool_stride, padding='SAME', data_format=self.data_format) mentee = tf.identity(mentee, 'mentee_' + 'initial_max_pool') probe_count = 0 mentee_probes = [] for i, num_blocks in enumerate(self.block_sizes[1]): num_filters = self.num_filters * (2**i) mentee = block_layer( inputs=mentee, filters=num_filters, block_fn=self.block_fn, blocks=num_blocks, strides=self.block_strides[i], training=training, name='mentee_' + 'block_layer{}'.format(i + 1), data_format=self.data_format) if probe_count < self.num_probes: if self.probe_pool_size > 0: if self.pool_type == 'max': mentee_probe = tf.layers.max_pooling2d( inputs=mentee, pool_size=self.probe_pool_size, strides=self.probe_pool_stride, padding='SAME', data_format=self.data_format) mentee_probe = tf.identity(mentee, 'mentee_'+'probe_max_pool_' \ + str(i)) elif self.pool_type == 'mean': mentee_probe = tf.layers.average_pooling2d( inputs=mentee, pool_size=self.probe_pool_size, strides=self.probe_pool_stride, padding='SAME', data_format=self.data_format) mentee_probe = tf.identity(mentee, 'mentee_'+'probe_max_pool_' \ + str(i)) else: mentee_probe=mentee mentee_probes.append(mentee_probe) probe_count+=1 mentee = batch_norm_relu(mentee, training, self.data_format) mentee = tf.layers.average_pooling2d( inputs=mentee, pool_size=self.second_pool_size, strides=self.second_pool_stride, padding='VALID', data_format=self.data_format) mentee = tf.identity(mentee, 'mentee_' + 'final_avg_pool') mentee = tf.reshape(mentee, [-1, self.final_size]) mentee = tf.layers.dense(inputs=mentee, units=self.num_classes) mentee = tf.identity(mentee, 'mentee_' + 'final_dense') mentee_probes.append(mentee) probe_cost = tf.constant(0.) for mentor_feat, mentee_feat in zip(mentor_probes, mentee_probes): probe_cost = probe_cost + tf.losses.mean_squared_error ( mentor_feat, mentee_feat) return (mentor, mentee, probe_cost) ################################################################################ # Functions for running training/eval/validation loops for the model. ################################################################################ def learning_rate_with_decay( batch_size, batch_denom, num_images, boundary_epochs, decay_rates): """Get a learning rate that decays step-wise as training progresses. Args: batch_size: the number of examples processed in each training batch. batch_denom: this value will be used to scale the base learning rate. `0.1 * batch size` is divided by this number, such that when batch_denom == batch_size, the initial learning rate will be 0.1. num_images: total number of images that will be used for training. boundary_epochs: list of ints representing the epochs at which we decay the learning rate. decay_rates: list of floats representing the decay rates to be used for scaling the learning rate. Should be the same length as boundary_epochs. Returns: Returns a function that takes a single argument - the number of batches trained so far (global_step)- and returns the learning rate to be used for training the next batch. """ with tf.variable_scope('learning_rate'): initial_learning_rate = 0.01 * batch_size / batch_denom batches_per_epoch = num_images / batch_size # Multiply the learning rate by 0.1 at 100, 150, and 200 epochs. boundaries = [int(batches_per_epoch * epoch) for epoch in boundary_epochs] vals = [initial_learning_rate * decay for decay in decay_rates] def learning_rate_fn(global_step): global_step = tf.cast(global_step, tf.int32) rval = tf.train.piecewise_constant(global_step, boundaries, vals) return rval return learning_rate_fn def learning_rate_with_decay_2( initial_learning_rate, batch_size, batch_denom, num_images, boundary_epochs, decay_rates): """Get a learning rate that decays step-wise as training progresses. Args: batch_size: the number of examples processed in each training batch. batch_denom: this value will be used to scale the base learning rate. `0.1 * batch size` is divided by this number, such that when batch_denom == batch_size, the initial learning rate will be 0.1. num_images: total number of images that will be used for training. boundary_epochs: list of ints representing the epochs at which we decay the learning rate. decay_rates: list of floats representing the decay rates to be used for scaling the learning rate. Should be the same length as boundary_epochs. Returns: Returns a function that takes a single argument - the number of batches trained so far (global_step)- and returns the learning rate to be used for training the next batch. """ with tf.variable_scope('learning_rate'): batches_per_epoch = num_images / batch_size boundaries = [int(batches_per_epoch * epoch) for epoch in boundary_epochs] vals = [initial_learning_rate * decay for decay in decay_rates] def learning_rate_fn(global_step): global_step = tf.cast(global_step, tf.int32) rval = tf.train.piecewise_constant(global_step, boundaries, vals) return rval return learning_rate_fn def distillation_coeff_fn(intital_distillation, global_step): global_step = tf.cast(global_step, tf.int32) rval = tf.train.exponential_decay ( intital_distillation, global_step, 100000, 0.55, staircase = False) return rval def resnet_model_fn(features, labels, mode, model_class, trainee, distillation_coeff, probes_coeff, resnet_size, num_probes, weight_decay_coeff, learning_rate_fn_mentor, learning_rate_fn_mentee, learning_rate_fn_finetune, momentum, data_format, pool_probes, pool_type, temperature=1, optimizer='momentum', loss_filter_fn=None): """Shared functionality for different resnet model_fns. Initializes the ResnetModel representing the model layers and uses that model to build the necessary EstimatorSpecs for the `mode` in question. For training, this means building losses, the optimizer, and the train op that get passed into the EstimatorSpec. For evaluation and prediction, the EstimatorSpec is returned without a train op, but with the necessary parameters for the given mode. Args: features: tensor representing input images labels: tensor representing class labels for all input images mode: current estimator mode; should be one of `tf.estimator.ModeKeys.TRAIN`, `EVALUATE`, `PREDICT` model_class: a class representing a TensorFlow model that has a __call__ function. We assume here that this is a subclass of ResnetModel. trainee: A string either `'mentee'` or `'mentor`'. resnet_size: A list of two integers for the size of the ResNet model for mentor followed by mentee. weight_decay_coeff: weight decay rate used to regularize learned variables. distillation_coeff: Weight for distillation. probes_coeff: weight for probes. learning_rate_fn_mentor: function that returns the current learning rate given the current global_step learning_rate_fn_mentee: function that returns the current learning rate given the current global_step learning_rate_fn_finetune: function that returns the current learning rate given the current global_step num_probes: How many equally spaced probes do we need. momentum: momentum term used for optimization. data_format: Input format ('channels_last', 'channels_first', or None). If set to None, the format is dependent on whether a GPU is available. temperature: A value of temperature to use for distillation. Defaults to 1 so that it will remain backward compatible. loss_filter_fn: function that takes a string variable name and returns True if the var should be included in loss calculation, and False otherwise. If None, batch_normalization variables will be excluded from the loss. pool_probes: Downsampling for probes. pool_type: 'max' or 'mean'. optimizer: 'adam', 'adadelta' and 'momentum' are options. Returns: EstimatorSpec parameterized according to the input params and the current mode. """ with tf.variable_scope('inputs'): # Generate a summary node for the images tf.summary.image('images', features, max_outputs=6) model = model_class(resnet_size = resnet_size, pool_probes = pool_probes, pool_type = pool_type, num_probes = num_probes, data_format = data_format) logits_mentor, logits_mentee, probe_cost = model(features, mode == tf.estimator.ModeKeys.TRAIN) predictions_mentor = { 'classes': tf.argmax(logits_mentor, axis=1), 'probabilities': tf.nn.softmax(logits_mentor, name='softmax_tensor_mentor'), } predictions_mentee = { 'classes': tf.argmax(logits_mentee, axis=1), 'probabilities': tf.nn.softmax(logits_mentee, name='softmax_tensor_mentee'), } if mode == tf.estimator.ModeKeys.PREDICT: if trainee == 'mentor': return tf.estimator.EstimatorSpec(mode=mode, predictions=predictions_mentor) elif trainee == 'mentee' or trainee == 'finetune': return tf.estimator.EstimatorSpec(mode=mode, predictions=predictions_mentee) with tf.variable_scope('distillery'): temperature_softmax_mentor = tf.nn.softmax((tf.div(logits_mentor, temperature)), name ='softmax_temperature_tensor_mentor') distillation_loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits( logits = tf.div(logits_mentee,temperature), labels = temperature_softmax_mentor)) tf.identity(distillation_loss, name='distillation_loss') tf.summary.scalar('distillation_loss', distillation_loss) tf.summary.scalar('scaled_distillation_loss', distillation_coeff * distillation_loss) with tf.variable_scope('cross_entropy'): # Calculate loss, which includes softmax cross entropy and L2 regularization. cross_entropy_mentor = tf.losses.softmax_cross_entropy( logits=logits_mentor, onehot_labels=labels) # Create a tensor named cross_entropy for logging purposes. tf.identity(cross_entropy_mentor, name='cross_entropy_mentor') tf.summary.scalar('cross_entropy_mentor', cross_entropy_mentor) cross_entropy_mentee = tf.losses.softmax_cross_entropy( logits=logits_mentee, onehot_labels=labels) tf.identity(cross_entropy_mentee, name='cross_entropy_mentee') tf.summary.scalar('cross_entropy_mentee', cross_entropy_mentee) # If no loss_filter_fn is passed, assume we want the default behavior, # which is that batch_normalization variables are excluded from loss. if not loss_filter_fn: def loss_filter_fn(name): return 'batch_normalization' not in name mentor_variables=tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope='mentor') mentee_variables=tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope='mentee') with tf.variable_scope('regularizers'): if weight_decay_coeff > 0: l2_mentor = weight_decay_coeff * tf.add_n( [tf.nn.l2_loss(v) for v in mentor_variables if loss_filter_fn(v.name)]) l2_mentee = weight_decay_coeff * tf.add_n( [tf.nn.l2_loss(v) for v in mentee_variables if loss_filter_fn(v.name)]) else: l2_mentor = tf.constant(0.) l2_mentee = tf.constant(0.) if mode == tf.estimator.ModeKeys.TRAIN: with tf.variable_scope('learning_rates'): global_step = tf.train.get_or_create_global_step() learning_rate_mentor = learning_rate_fn_mentor(global_step) learning_rate_mentee = learning_rate_fn_mentee(global_step) learning_rate_finetune = learning_rate_fn_finetune(global_step) tf.identity(learning_rate_mentor, name='learning_rate_mentor' ) tf.summary.scalar('learning_rate_mentor', learning_rate_mentor) tf.identity(learning_rate_mentee, name='learning_rate_mentee' ) tf.summary.scalar('learning_rate_mentee', learning_rate_mentee) tf.identity(learning_rate_finetune, name='learning_rate_finetune' ) tf.summary.scalar('learning_rate_finetune', learning_rate_finetune) with tf.variable_scope('mentor_cumulative_loss'): # Add weight decay and distillation to the loss. loss_mentor = cross_entropy_mentor + l2_mentor tf.summary.scalar('objective', loss_mentor) with tf.variable_scope('mentee_cumulative_loss'): distillation_coeff_decayed = distillation_coeff_fn(distillation_coeff, global_step) probe_scale = probes_coeff * distillation_coeff_decayed tf.identity(probe_cost, name='probe_cost') tf.summary.scalar('probe_loss', probe_cost) tf.summary.scalar('scaled_probe_loss', probe_scale * probe_cost) tf.identity(distillation_coeff, name='distillation_coeff_decayed') tf.summary.scalar('coeff',distillation_coeff_decayed) loss_mentee = cross_entropy_mentee + l2_mentee + \ distillation_coeff_decayed * distillation_loss + \ probe_scale * probe_cost tf.summary.scalar('objective', loss_mentee) with tf.variable_scope('mentee_finetune'): loss_finetune = cross_entropy_mentee + l2_mentee tf.summary.scalar('objective', loss_finetune) if optimizer[0] == 'momentum': with tf.variable_scope('mentor_momentum_optimizer'): optimizer_mentor = tf.train.MomentumOptimizer( learning_rate=learning_rate_mentor, momentum=momentum) elif optimizer[0] == 'adam': with tf.variable_scope('mentor_adam_optimizer'): optimizer_mentor = tf.train.AdamOptimizer( learning_rate=learning_rate_mentor) elif optimizer[0] == 'adadelta': with tf.variable_scope('mentor_adadelta_optimizer'): optimizer_mentor = tf.train.AdadeltaOptimizer( learning_rate=learning_rate_mentor) if optimizer[1] == 'momentum': with tf.variable_scope('mentee_momentum_optimizer'): optimizer_mentee = tf.train.MomentumOptimizer( learning_rate=learning_rate_mentee, momentum=momentum) elif optimizer[1] == 'adam': with tf.variable_scope('mentee_adam_optimizer'): optimizer_mentee = tf.train.AdamOptimizer( learning_rate=learning_rate_mentee) elif optimizer[1] == 'adadelta': with tf.variable_scope('mentee_adadelta_optimizer'): optimizer_mentee = tf.train.AdadeltaOptimizer( learning_rate=learning_rate_mentee) if optimizer[2] == 'momentum': with tf.variable_scope('finetune_momentum_optimizer'): optimizer_finetune = tf.train.MomentumOptimizer( learning_rate=learning_rate_finetune, momentum=momentum) elif optimizer[2] == 'adam': with tf.variable_scope('finetune_adam_optimizer'): optimizer_finetune = tf.train.AdamOptimizer( learning_rate=learning_rate_finetune) elif optimizer[2] == 'adadelta': with tf.variable_scope('finetune_adadelta_optimizer'): optimizer_finetune = tf.train.AdadeltaOptimizer( learning_rate=learning_rate_finetune) # Batch norm requires update ops to be added as a dependency to train_op update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS) with tf.control_dependencies(update_ops): with tf.variable_scope('optimizers'): train_op_mentor = optimizer_mentor.minimize(loss_mentor, global_step, var_list = mentor_variables) train_op_mentee = optimizer_mentee.minimize(loss_mentee, global_step, var_list = mentee_variables) train_op_finetune = optimizer_finetune.minimize(loss_finetune, global_step, var_list = mentee_variables) else: with tf.variable_scope('mentor_cumulative_loss'): # Add weight decay and distillation to the loss. loss_mentor = cross_entropy_mentor + weight_decay_coeff * l2_mentor with tf.variable_scope('mentee_cumulative_loss'): loss_mentee = cross_entropy_mentee + weight_decay_coeff * l2_mentee with tf.variable_scope('mentee_finetune'): loss_finetune = cross_entropy_mentee + weight_decay_coeff * l2_mentee train_op_mentor = None train_op_mentee = None train_op_finetune = None with tf.variable_scope('metrics'): accuracy_mentor = tf.metrics.accuracy( tf.argmax(labels, axis=1), predictions_mentor['classes']) accuracy_mentee = tf.metrics.accuracy( tf.argmax(labels, axis=1), predictions_mentee['classes']) metrics = {'accuracy_mentor': accuracy_mentor, 'accuracy_mentee': accuracy_mentee} # Create a tensor named train_accuracy for logging purposes tf.identity(accuracy_mentor[1], name='train_accuracy_mentor') tf.summary.scalar('train_accuracy_mentor', accuracy_mentor[1]) tf.identity(accuracy_mentee[1], name='train_accuracy_mentee') tf.summary.scalar('train_accuracy_mentee', accuracy_mentee[1]) saver=tf.train.Saver(var_list = tf.global_variables()) if trainee == 'mentor': return tf.estimator.EstimatorSpec( mode=mode, predictions=predictions_mentor, loss=loss_mentor, train_op=train_op_mentor, scaffold=tf.train.Scaffold(saver=saver), eval_metric_ops=metrics) elif trainee == 'mentee': return tf.estimator.EstimatorSpec( mode=mode, predictions=predictions_mentee, loss=loss_mentee, train_op=train_op_mentee, scaffold=tf.train.Scaffold(saver=saver), eval_metric_ops=metrics) elif trainee == 'finetune': return tf.estimator.EstimatorSpec( mode=mode, predictions=predictions_mentee, loss=loss_finetune, train_op=train_op_finetune, scaffold=tf.train.Scaffold(saver=saver), eval_metric_ops=metrics) def resnet_main(flags, model_function, input_function): # Using the Winograd non-fused algorithms provides a small performance boost. os.environ['TF_ENABLE_WINOGRAD_NONFUSED'] = '1' # Set up a RunConfig to only save checkpoints once per training cycle. run_config = tf.estimator.RunConfig().replace(save_checkpoints_secs=1e9) mentor = tf.estimator.Estimator( model_fn=model_function, model_dir=flags.model_dir, config=run_config, params={ 'resnet_size': [flags.resnet_size_mentor, flags.resnet_size_mentee], 'data_format': flags.data_format, 'batch_size': flags.batch_size, 'distillation_coeff': flags.distillation_coeff, 'probes_coeff': flags.probes_coeff, 'weight_decay_coeff': flags.weight_decay_coeff, 'optimizer': [flags.mentor_optimizer, flags.mentee_optimizer, flags.finetune_optimizer], 'temperature': flags.temperature, 'num_probes': flags.num_probes, 'pool_probes': flags.pool_probes, 'train_epochs_mentor': flags.train_epochs_mentor, 'train_epochs_mentee': flags.train_epochs_mentee, 'train_epochs_finetune': flags.train_epochs_finetune, 'initial_learning_rate_mentor': flags.initial_learning_rate_mentor, 'initial_learning_rate_mentee': flags.initial_learning_rate_mentee, 'initial_learning_rate_finetune': flags.initial_learning_rate_finetune, 'pool_type': flags.pool_type, 'trainee': 'mentor' }) for i in range(flags.train_epochs_mentor // flags.epochs_per_eval): tensors_to_log = { 'learning_rate': 'learning_rates/learning_rate_mentor', 'cross_entropy': 'cross_entropy/cross_entropy_mentor' , 'train_accuracy': 'metrics/train_accuracy_mentor' } logging_hook = tf.train.LoggingTensorHook( tensors=tensors_to_log, every_n_iter=100) def input_fn_train(): return input_function(True, flags.data_dir, flags.batch_size, flags.epochs_per_eval, flags.num_parallel_calls) print(' *********************** ' ) print(' Starting a mentor training cycle. [' + str(i) + '/' + str(flags.train_epochs_mentor // flags.epochs_per_eval) + ']') print(' *********************** ' ) mentor.train(input_fn=input_fn_train, hooks=[logging_hook]) print('Starting to evaluate.') # Evaluate the model and print results def input_fn_eval(): return input_function(False, flags.data_dir, flags.batch_size, 1, flags.num_parallel_calls) eval_results = mentor.evaluate(input_fn=input_fn_eval) print(eval_results) mentee = tf.estimator.Estimator( model_fn=model_function, model_dir=flags.model_dir, config=run_config, params={ 'resnet_size': [flags.resnet_size_mentor, flags.resnet_size_mentee], 'data_format': flags.data_format, 'batch_size': flags.batch_size, 'distillation_coeff': flags.distillation_coeff, 'probes_coeff': flags.probes_coeff, 'optimizer': [flags.mentor_optimizer, flags.mentee_optimizer, flags.finetune_optimizer], 'weight_decay_coeff': flags.weight_decay_coeff, 'temperature': flags.temperature, 'num_probes': flags.num_probes, 'pool_probes': flags.pool_probes, 'train_epochs_mentor': flags.train_epochs_mentor, 'train_epochs_mentee': flags.train_epochs_mentee, 'train_epochs_finetune': flags.train_epochs_finetune, 'initial_learning_rate_mentor': flags.initial_learning_rate_mentor, 'initial_learning_rate_mentee': flags.initial_learning_rate_mentee, 'initial_learning_rate_finetune': flags.initial_learning_rate_finetune, 'pool_type': flags.pool_type, 'trainee': 'mentee' }) for i in range(flags.train_epochs_mentee // flags.epochs_per_eval): tensors_to_log = { 'learning_rate': 'learning_rates/learning_rate_mentee', 'cross_entropy': 'cross_entropy/cross_entropy_mentee', 'train_accuracy': 'metrics/train_accuracy_mentee', 'distillation_loss': 'distillery/distillation_loss', 'distillation_coeff':'mentee_cumulative_loss/distillation_coeff_decayed' } logging_hook = tf.train.LoggingTensorHook( tensors=tensors_to_log, every_n_iter=100) def input_fn_train(): return input_function(True, flags.data_dir, flags.batch_size, flags.epochs_per_eval, flags.num_parallel_calls) print(' *********************** ' ) print(' Starting a mentee training cycle. [' + str(i) + '/' + str(flags.train_epochs_mentee // flags.epochs_per_eval) + ']') print(' *********************** ' ) mentee.train(input_fn=input_fn_train, hooks=[logging_hook]) print('Starting to evaluate.') # Evaluate the model and print results def input_fn_eval(): return input_function(False, flags.data_dir, flags.batch_size, 1, flags.num_parallel_calls) eval_results = mentee.evaluate(input_fn=input_fn_eval) print(eval_results) finetune = tf.estimator.Estimator( model_fn=model_function, model_dir=flags.model_dir, config=run_config, params={ 'resnet_size': [flags.resnet_size_mentor, flags.resnet_size_mentee], 'data_format': flags.data_format, 'batch_size': flags.batch_size, 'distillation_coeff': flags.distillation_coeff, 'probes_coeff': flags.probes_coeff, 'optimizer': [flags.mentor_optimizer, flags.mentee_optimizer, flags.finetune_optimizer], 'weight_decay_coeff': flags.weight_decay_coeff, 'temperature': flags.temperature, 'num_probes': flags.num_probes, 'pool_probes': flags.pool_probes, 'train_epochs_mentor': flags.train_epochs_mentor, 'train_epochs_mentee': flags.train_epochs_mentee, 'train_epochs_finetune': flags.train_epochs_finetune, 'initial_learning_rate_mentor': flags.initial_learning_rate_mentor, 'initial_learning_rate_mentee': flags.initial_learning_rate_mentee, 'initial_learning_rate_finetune': flags.initial_learning_rate_finetune, 'pool_type': flags.pool_type, 'trainee': 'finetune' }) for i in range(flags.train_epochs_finetune // flags.epochs_per_eval): tensors_to_log = { 'learning_rate': 'learning_rates/learning_rate_mentee', 'cross_entropy': 'cross_entropy/cross_entropy_mentee', 'train_accuracy': 'metrics/train_accuracy_mentee', } logging_hook = tf.train.LoggingTensorHook( tensors=tensors_to_log, every_n_iter=100) def input_fn_train(): return input_function(True, flags.data_dir, flags.batch_size, flags.epochs_per_eval, flags.num_parallel_calls) print(' *********************** ' ) print(' Starting a mentee finetune cycle. [' + str(i) + '/' + str(flags.train_epochs_finetune // flags.epochs_per_eval) + ']') print(' *********************** ' ) finetune.train(input_fn=input_fn_train, hooks=[logging_hook]) print('Starting to evaluate.') # Evaluate the model and print results def input_fn_eval(): return input_function(False, flags.data_dir, flags.batch_size, 1, flags.num_parallel_calls) eval_results = finetune.evaluate(input_fn=input_fn_eval) print(eval_results) class ResnetArgParser(argparse.ArgumentParser): """Arguments for configuring and running a Resnet Model. """ def __init__(self, resnet_size_choices=None): super(ResnetArgParser, self).__init__() self.add_argument( '--data_dir', type=str, default='./resnet_data', help='The directory where the input data is stored.') self.add_argument( '--num_parallel_calls', type=int, default=5, help='The number of records that are processed in parallel ' 'during input processing. This can be optimized per data set but ' 'for generally homogeneous data sets, should be approximately the ' 'number of available CPU cores.') self.add_argument( '--model_dir', type=str, default='./resnet_model', help='The directory where the model will be stored.') self.add_argument( '--resnet_size_mentor', type=int, default=50, choices=resnet_size_choices, help='The size of the ResNet Mentor model to use.') self.add_argument( '--resnet_size_mentee', type=int, default=10, choices=resnet_size_choices, help='The size of the ResNet Mentee model to use.') self.add_argument( '--train_epochs_mentor', type=int, default=100, help='The number of epochs to use for training.') self.add_argument( '--train_epochs_mentee', type=int, default=100, help='The number of epochs to use for training.') self.add_argument( '--train_epochs_finetune', type=int, default=100, help='The number of epochs to use for training.') self.add_argument( '--epochs_per_eval', type=int, default=1, help='The number of training epochs to run between evaluations.') self.add_argument( '--batch_size', type=int, default=32, help='Batch size for training and evaluation.') self.add_argument( '--mentor_optimizer', type=str, default='momentum', help='Optimizer for training and evaluation.') self.add_argument( '--mentee_optimizer', type=str, default='momentum', help='Optimizer for training and evaluation.') self.add_argument( '--finetune_optimizer', type=str, default='momentum', help='Optimizer for training and evaluation.') self.add_argument( '--data_format', type=str, default=None, choices=['channels_first', 'channels_last'], help='A flag to override the data format used in the model. ' 'channels_first provides a performance boost on GPU but ' 'is not always compatible with CPU. If left unspecified, ' 'the data format will be chosen automatically based on ' 'whether TensorFlow was built for CPU or GPU.') self.add_argument( '--distillation_coeff', type=float, default=0.01, help='Coefficient of distillation to be applied from parent to' 'child. This is only useful when performing distillaiton.') self.add_argument( '--probes_coeff', type=float, default=0.0001, help='Coefficient of weight to be applied from parent to' 'child. This is only useful when performing mentoring.') self.add_argument( '--weight_decay_coeff', type=float, default=0.0002, help='Coefficient of weight to be applied from to the' 'weight decay regularizer.') self.add_argument( '--temperature', type=float, default=3, help='Temperature to be used for the softmax layer') self.add_argument( '--num_probes', type=int, default=0, help='Number of probes to be used') self.add_argument( '--pool_probes', type=int, default=2, help='Maxpool probes by') self.add_argument( '--initial_learning_rate_mentor', type=float, default=0.001, help='Set initial learning rate for mentor') self.add_argument( '--initial_learning_rate_mentee', type=float, default=0.001, help='Set initial learning rate for mentee') self.add_argument( '--initial_learning_rate_finetune', type=float, default=0.001, help='Set initial learning rate finetune') self.add_argument( '--pool_type', type=str, default='max', help='Pool type for probes.')
44.399645
139
0.67008
from __future__ import absolute_import from __future__ import division from __future__ import print_function import argparse import os import tensorflow as tf _BATCH_NORM_DECAY = 0.997 _BATCH_NORM_EPSILON = 1e-5 er_mentee = tf.train.MomentumOptimizer( learning_rate=learning_rate_mentee, momentum=momentum) elif optimizer[1] == 'adam': with tf.variable_scope('mentee_adam_optimizer'): optimizer_mentee = tf.train.AdamOptimizer( learning_rate=learning_rate_mentee) elif optimizer[1] == 'adadelta': with tf.variable_scope('mentee_adadelta_optimizer'): optimizer_mentee = tf.train.AdadeltaOptimizer( learning_rate=learning_rate_mentee) if optimizer[2] == 'momentum': with tf.variable_scope('finetune_momentum_optimizer'): optimizer_finetune = tf.train.MomentumOptimizer( learning_rate=learning_rate_finetune, momentum=momentum) elif optimizer[2] == 'adam': with tf.variable_scope('finetune_adam_optimizer'): optimizer_finetune = tf.train.AdamOptimizer( learning_rate=learning_rate_finetune) elif optimizer[2] == 'adadelta': with tf.variable_scope('finetune_adadelta_optimizer'): optimizer_finetune = tf.train.AdadeltaOptimizer( learning_rate=learning_rate_finetune) update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS) with tf.control_dependencies(update_ops): with tf.variable_scope('optimizers'): train_op_mentor = optimizer_mentor.minimize(loss_mentor, global_step, var_list = mentor_variables) train_op_mentee = optimizer_mentee.minimize(loss_mentee, global_step, var_list = mentee_variables) train_op_finetune = optimizer_finetune.minimize(loss_finetune, global_step, var_list = mentee_variables) else: with tf.variable_scope('mentor_cumulative_loss'): loss_mentor = cross_entropy_mentor + weight_decay_coeff * l2_mentor with tf.variable_scope('mentee_cumulative_loss'): loss_mentee = cross_entropy_mentee + weight_decay_coeff * l2_mentee with tf.variable_scope('mentee_finetune'): loss_finetune = cross_entropy_mentee + weight_decay_coeff * l2_mentee train_op_mentor = None train_op_mentee = None train_op_finetune = None with tf.variable_scope('metrics'): accuracy_mentor = tf.metrics.accuracy( tf.argmax(labels, axis=1), predictions_mentor['classes']) accuracy_mentee = tf.metrics.accuracy( tf.argmax(labels, axis=1), predictions_mentee['classes']) metrics = {'accuracy_mentor': accuracy_mentor, 'accuracy_mentee': accuracy_mentee} tf.identity(accuracy_mentor[1], name='train_accuracy_mentor') tf.summary.scalar('train_accuracy_mentor', accuracy_mentor[1]) tf.identity(accuracy_mentee[1], name='train_accuracy_mentee') tf.summary.scalar('train_accuracy_mentee', accuracy_mentee[1]) saver=tf.train.Saver(var_list = tf.global_variables()) if trainee == 'mentor': return tf.estimator.EstimatorSpec( mode=mode, predictions=predictions_mentor, loss=loss_mentor, train_op=train_op_mentor, scaffold=tf.train.Scaffold(saver=saver), eval_metric_ops=metrics) elif trainee == 'mentee': return tf.estimator.EstimatorSpec( mode=mode, predictions=predictions_mentee, loss=loss_mentee, train_op=train_op_mentee, scaffold=tf.train.Scaffold(saver=saver), eval_metric_ops=metrics) elif trainee == 'finetune': return tf.estimator.EstimatorSpec( mode=mode, predictions=predictions_mentee, loss=loss_finetune, train_op=train_op_finetune, scaffold=tf.train.Scaffold(saver=saver), eval_metric_ops=metrics) def resnet_main(flags, model_function, input_function): os.environ['TF_ENABLE_WINOGRAD_NONFUSED'] = '1' run_config = tf.estimator.RunConfig().replace(save_checkpoints_secs=1e9) mentor = tf.estimator.Estimator( model_fn=model_function, model_dir=flags.model_dir, config=run_config, params={ 'resnet_size': [flags.resnet_size_mentor, flags.resnet_size_mentee], 'data_format': flags.data_format, 'batch_size': flags.batch_size, 'distillation_coeff': flags.distillation_coeff, 'probes_coeff': flags.probes_coeff, 'weight_decay_coeff': flags.weight_decay_coeff, 'optimizer': [flags.mentor_optimizer, flags.mentee_optimizer, flags.finetune_optimizer], 'temperature': flags.temperature, 'num_probes': flags.num_probes, 'pool_probes': flags.pool_probes, 'train_epochs_mentor': flags.train_epochs_mentor, 'train_epochs_mentee': flags.train_epochs_mentee, 'train_epochs_finetune': flags.train_epochs_finetune, 'initial_learning_rate_mentor': flags.initial_learning_rate_mentor, 'initial_learning_rate_mentee': flags.initial_learning_rate_mentee, 'initial_learning_rate_finetune': flags.initial_learning_rate_finetune, 'pool_type': flags.pool_type, 'trainee': 'mentor' }) for i in range(flags.train_epochs_mentor // flags.epochs_per_eval): tensors_to_log = { 'learning_rate': 'learning_rates/learning_rate_mentor', 'cross_entropy': 'cross_entropy/cross_entropy_mentor' , 'train_accuracy': 'metrics/train_accuracy_mentor' } logging_hook = tf.train.LoggingTensorHook( tensors=tensors_to_log, every_n_iter=100) def input_fn_train(): return input_function(True, flags.data_dir, flags.batch_size, flags.epochs_per_eval, flags.num_parallel_calls) print(' *********************** ' ) print(' Starting a mentor training cycle. [' + str(i) + '/' + str(flags.train_epochs_mentor // flags.epochs_per_eval) + ']') print(' *********************** ' ) mentor.train(input_fn=input_fn_train, hooks=[logging_hook]) print('Starting to evaluate.') def input_fn_eval(): return input_function(False, flags.data_dir, flags.batch_size, 1, flags.num_parallel_calls) eval_results = mentor.evaluate(input_fn=input_fn_eval) print(eval_results) mentee = tf.estimator.Estimator( model_fn=model_function, model_dir=flags.model_dir, config=run_config, params={ 'resnet_size': [flags.resnet_size_mentor, flags.resnet_size_mentee], 'data_format': flags.data_format, 'batch_size': flags.batch_size, 'distillation_coeff': flags.distillation_coeff, 'probes_coeff': flags.probes_coeff, 'optimizer': [flags.mentor_optimizer, flags.mentee_optimizer, flags.finetune_optimizer], 'weight_decay_coeff': flags.weight_decay_coeff, 'temperature': flags.temperature, 'num_probes': flags.num_probes, 'pool_probes': flags.pool_probes, 'train_epochs_mentor': flags.train_epochs_mentor, 'train_epochs_mentee': flags.train_epochs_mentee, 'train_epochs_finetune': flags.train_epochs_finetune, 'initial_learning_rate_mentor': flags.initial_learning_rate_mentor, 'initial_learning_rate_mentee': flags.initial_learning_rate_mentee, 'initial_learning_rate_finetune': flags.initial_learning_rate_finetune, 'pool_type': flags.pool_type, 'trainee': 'mentee' }) for i in range(flags.train_epochs_mentee // flags.epochs_per_eval): tensors_to_log = { 'learning_rate': 'learning_rates/learning_rate_mentee', 'cross_entropy': 'cross_entropy/cross_entropy_mentee', 'train_accuracy': 'metrics/train_accuracy_mentee', 'distillation_loss': 'distillery/distillation_loss', 'distillation_coeff':'mentee_cumulative_loss/distillation_coeff_decayed' } logging_hook = tf.train.LoggingTensorHook( tensors=tensors_to_log, every_n_iter=100) def input_fn_train(): return input_function(True, flags.data_dir, flags.batch_size, flags.epochs_per_eval, flags.num_parallel_calls) print(' *********************** ' ) print(' Starting a mentee training cycle. [' + str(i) + '/' + str(flags.train_epochs_mentee // flags.epochs_per_eval) + ']') print(' *********************** ' ) mentee.train(input_fn=input_fn_train, hooks=[logging_hook]) print('Starting to evaluate.') def input_fn_eval(): return input_function(False, flags.data_dir, flags.batch_size, 1, flags.num_parallel_calls) eval_results = mentee.evaluate(input_fn=input_fn_eval) print(eval_results) finetune = tf.estimator.Estimator( model_fn=model_function, model_dir=flags.model_dir, config=run_config, params={ 'resnet_size': [flags.resnet_size_mentor, flags.resnet_size_mentee], 'data_format': flags.data_format, 'batch_size': flags.batch_size, 'distillation_coeff': flags.distillation_coeff, 'probes_coeff': flags.probes_coeff, 'optimizer': [flags.mentor_optimizer, flags.mentee_optimizer, flags.finetune_optimizer], 'weight_decay_coeff': flags.weight_decay_coeff, 'temperature': flags.temperature, 'num_probes': flags.num_probes, 'pool_probes': flags.pool_probes, 'train_epochs_mentor': flags.train_epochs_mentor, 'train_epochs_mentee': flags.train_epochs_mentee, 'train_epochs_finetune': flags.train_epochs_finetune, 'initial_learning_rate_mentor': flags.initial_learning_rate_mentor, 'initial_learning_rate_mentee': flags.initial_learning_rate_mentee, 'initial_learning_rate_finetune': flags.initial_learning_rate_finetune, 'pool_type': flags.pool_type, 'trainee': 'finetune' }) for i in range(flags.train_epochs_finetune // flags.epochs_per_eval): tensors_to_log = { 'learning_rate': 'learning_rates/learning_rate_mentee', 'cross_entropy': 'cross_entropy/cross_entropy_mentee', 'train_accuracy': 'metrics/train_accuracy_mentee', } logging_hook = tf.train.LoggingTensorHook( tensors=tensors_to_log, every_n_iter=100) def input_fn_train(): return input_function(True, flags.data_dir, flags.batch_size, flags.epochs_per_eval, flags.num_parallel_calls) print(' *********************** ' ) print(' Starting a mentee finetune cycle. [' + str(i) + '/' + str(flags.train_epochs_finetune // flags.epochs_per_eval) + ']') print(' *********************** ' ) finetune.train(input_fn=input_fn_train, hooks=[logging_hook]) print('Starting to evaluate.') def input_fn_eval(): return input_function(False, flags.data_dir, flags.batch_size, 1, flags.num_parallel_calls) eval_results = finetune.evaluate(input_fn=input_fn_eval) print(eval_results) class ResnetArgParser(argparse.ArgumentParser): def __init__(self, resnet_size_choices=None): super(ResnetArgParser, self).__init__() self.add_argument( '--data_dir', type=str, default='./resnet_data', help='The directory where the input data is stored.') self.add_argument( '--num_parallel_calls', type=int, default=5, help='The number of records that are processed in parallel ' 'during input processing. This can be optimized per data set but ' 'for generally homogeneous data sets, should be approximately the ' 'number of available CPU cores.') self.add_argument( '--model_dir', type=str, default='./resnet_model', help='The directory where the model will be stored.') self.add_argument( '--resnet_size_mentor', type=int, default=50, choices=resnet_size_choices, help='The size of the ResNet Mentor model to use.') self.add_argument( '--resnet_size_mentee', type=int, default=10, choices=resnet_size_choices, help='The size of the ResNet Mentee model to use.') self.add_argument( '--train_epochs_mentor', type=int, default=100, help='The number of epochs to use for training.') self.add_argument( '--train_epochs_mentee', type=int, default=100, help='The number of epochs to use for training.') self.add_argument( '--train_epochs_finetune', type=int, default=100, help='The number of epochs to use for training.') self.add_argument( '--epochs_per_eval', type=int, default=1, help='The number of training epochs to run between evaluations.') self.add_argument( '--batch_size', type=int, default=32, help='Batch size for training and evaluation.') self.add_argument( '--mentor_optimizer', type=str, default='momentum', help='Optimizer for training and evaluation.') self.add_argument( '--mentee_optimizer', type=str, default='momentum', help='Optimizer for training and evaluation.') self.add_argument( '--finetune_optimizer', type=str, default='momentum', help='Optimizer for training and evaluation.') self.add_argument( '--data_format', type=str, default=None, choices=['channels_first', 'channels_last'], help='A flag to override the data format used in the model. ' 'channels_first provides a performance boost on GPU but ' 'is not always compatible with CPU. If left unspecified, ' 'the data format will be chosen automatically based on ' 'whether TensorFlow was built for CPU or GPU.') self.add_argument( '--distillation_coeff', type=float, default=0.01, help='Coefficient of distillation to be applied from parent to' 'child. This is only useful when performing distillaiton.') self.add_argument( '--probes_coeff', type=float, default=0.0001, help='Coefficient of weight to be applied from parent to' 'child. This is only useful when performing mentoring.') self.add_argument( '--weight_decay_coeff', type=float, default=0.0002, help='Coefficient of weight to be applied from to the' 'weight decay regularizer.') self.add_argument( '--temperature', type=float, default=3, help='Temperature to be used for the softmax layer') self.add_argument( '--num_probes', type=int, default=0, help='Number of probes to be used') self.add_argument( '--pool_probes', type=int, default=2, help='Maxpool probes by') self.add_argument( '--initial_learning_rate_mentor', type=float, default=0.001, help='Set initial learning rate for mentor') self.add_argument( '--initial_learning_rate_mentee', type=float, default=0.001, help='Set initial learning rate for mentee') self.add_argument( '--initial_learning_rate_finetune', type=float, default=0.001, help='Set initial learning rate finetune') self.add_argument( '--pool_type', type=str, default='max', help='Pool type for probes.')
true
true
f716e0e1798b4361d576daa1b6e3bf179cfdaf7c
5,362
py
Python
tests/scripts/thread-cert/Cert_9_2_12_Announce.py
TanJay/openthread
ffd28ebd4d874fbc71f556ced86efc306e6a2d4b
[ "BSD-3-Clause" ]
1
2018-12-31T08:12:49.000Z
2018-12-31T08:12:49.000Z
tests/scripts/thread-cert/Cert_9_2_12_Announce.py
syin2/openthread
a9f42768ec221380f42bfd311bc68e784b2163a6
[ "BSD-3-Clause" ]
null
null
null
tests/scripts/thread-cert/Cert_9_2_12_Announce.py
syin2/openthread
a9f42768ec221380f42bfd311bc68e784b2163a6
[ "BSD-3-Clause" ]
null
null
null
#!/usr/bin/env python # # Copyright (c) 2016, The OpenThread Authors. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # 1. Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # 3. Neither the name of the copyright holder nor the # names of its contributors may be used to endorse or promote products # derived from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE # LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF # SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. # import time import unittest import node LEADER1 = 1 ROUTER1 = 2 LEADER2 = 3 ROUTER2 = 4 MED = 5 DATASET1_TIMESTAMP = 20 DATASET1_CHANNEL = 11 DATASET1_PANID = 0xface DATASET2_TIMESTAMP = 10 DATASET2_CHANNEL = 12 DATASET2_PANID = 0xafce class Cert_9_2_12_Announce(unittest.TestCase): def setUp(self): self.nodes = {} for i in range(1,6): self.nodes[i] = node.Node(i) self.nodes[LEADER1].set_active_dataset(DATASET1_TIMESTAMP, channel=DATASET1_CHANNEL, panid=DATASET1_PANID) self.nodes[LEADER1].set_mode('rsdn') self.nodes[LEADER1].add_whitelist(self.nodes[ROUTER1].get_addr64()) self.nodes[LEADER1].enable_whitelist() self.nodes[ROUTER1].set_active_dataset(DATASET1_TIMESTAMP, channel=DATASET1_CHANNEL, panid=DATASET1_PANID) self.nodes[ROUTER1].set_mode('rsdn') self.nodes[ROUTER1].add_whitelist(self.nodes[LEADER1].get_addr64()) self.nodes[ROUTER1].add_whitelist(self.nodes[LEADER2].get_addr64()) self.nodes[ROUTER1].enable_whitelist() self.nodes[ROUTER1].set_router_selection_jitter(1) self.nodes[LEADER2].set_active_dataset(DATASET2_TIMESTAMP, channel=DATASET2_CHANNEL, panid=DATASET1_PANID) self.nodes[LEADER2].set_mode('rsdn') self.nodes[LEADER2].add_whitelist(self.nodes[ROUTER1].get_addr64()) self.nodes[LEADER2].add_whitelist(self.nodes[ROUTER2].get_addr64()) self.nodes[LEADER2].enable_whitelist() self.nodes[LEADER2].set_router_selection_jitter(1) self.nodes[ROUTER2].set_active_dataset(DATASET2_TIMESTAMP, channel=DATASET2_CHANNEL, panid=DATASET1_PANID) self.nodes[ROUTER2].set_mode('rsdn') self.nodes[ROUTER2].add_whitelist(self.nodes[LEADER2].get_addr64()) self.nodes[ROUTER2].add_whitelist(self.nodes[MED].get_addr64()) self.nodes[ROUTER2].enable_whitelist() self.nodes[ROUTER2].set_router_selection_jitter(1) self.nodes[MED].set_active_dataset(DATASET2_TIMESTAMP, channel=DATASET2_CHANNEL, panid=DATASET1_PANID) self.nodes[MED].set_mode('rsn') self.nodes[MED].add_whitelist(self.nodes[ROUTER2].get_addr64()) self.nodes[MED].enable_whitelist() def tearDown(self): for node in list(self.nodes.values()): node.stop() del self.nodes def test(self): self.nodes[LEADER1].start() self.nodes[LEADER1].set_state('leader') self.assertEqual(self.nodes[LEADER1].get_state(), 'leader') self.nodes[LEADER1].commissioner_start() time.sleep(3) self.nodes[ROUTER1].start() time.sleep(5) self.assertEqual(self.nodes[ROUTER1].get_state(), 'router') self.nodes[LEADER2].start() self.nodes[LEADER2].set_state('leader') self.assertEqual(self.nodes[LEADER2].get_state(), 'leader') self.nodes[ROUTER2].start() time.sleep(5) self.assertEqual(self.nodes[ROUTER2].get_state(), 'router') self.nodes[MED].start() time.sleep(5) self.assertEqual(self.nodes[MED].get_state(), 'child') ipaddrs = self.nodes[ROUTER1].get_addrs() for ipaddr in ipaddrs: if ipaddr[0:4] != 'fe80': break self.nodes[LEADER1].announce_begin(0x1000, 1, 1000, ipaddr) time.sleep(30) self.assertEqual(self.nodes[LEADER2].get_state(), 'router') self.assertEqual(self.nodes[ROUTER2].get_state(), 'router') self.assertEqual(self.nodes[MED].get_state(), 'child') ipaddrs = self.nodes[MED].get_addrs() for ipaddr in ipaddrs: if ipaddr[0:4] != 'fe80': self.assertTrue(self.nodes[LEADER1].ping(ipaddr)) if __name__ == '__main__': unittest.main()
40.621212
114
0.70179
import time import unittest import node LEADER1 = 1 ROUTER1 = 2 LEADER2 = 3 ROUTER2 = 4 MED = 5 DATASET1_TIMESTAMP = 20 DATASET1_CHANNEL = 11 DATASET1_PANID = 0xface DATASET2_TIMESTAMP = 10 DATASET2_CHANNEL = 12 DATASET2_PANID = 0xafce class Cert_9_2_12_Announce(unittest.TestCase): def setUp(self): self.nodes = {} for i in range(1,6): self.nodes[i] = node.Node(i) self.nodes[LEADER1].set_active_dataset(DATASET1_TIMESTAMP, channel=DATASET1_CHANNEL, panid=DATASET1_PANID) self.nodes[LEADER1].set_mode('rsdn') self.nodes[LEADER1].add_whitelist(self.nodes[ROUTER1].get_addr64()) self.nodes[LEADER1].enable_whitelist() self.nodes[ROUTER1].set_active_dataset(DATASET1_TIMESTAMP, channel=DATASET1_CHANNEL, panid=DATASET1_PANID) self.nodes[ROUTER1].set_mode('rsdn') self.nodes[ROUTER1].add_whitelist(self.nodes[LEADER1].get_addr64()) self.nodes[ROUTER1].add_whitelist(self.nodes[LEADER2].get_addr64()) self.nodes[ROUTER1].enable_whitelist() self.nodes[ROUTER1].set_router_selection_jitter(1) self.nodes[LEADER2].set_active_dataset(DATASET2_TIMESTAMP, channel=DATASET2_CHANNEL, panid=DATASET1_PANID) self.nodes[LEADER2].set_mode('rsdn') self.nodes[LEADER2].add_whitelist(self.nodes[ROUTER1].get_addr64()) self.nodes[LEADER2].add_whitelist(self.nodes[ROUTER2].get_addr64()) self.nodes[LEADER2].enable_whitelist() self.nodes[LEADER2].set_router_selection_jitter(1) self.nodes[ROUTER2].set_active_dataset(DATASET2_TIMESTAMP, channel=DATASET2_CHANNEL, panid=DATASET1_PANID) self.nodes[ROUTER2].set_mode('rsdn') self.nodes[ROUTER2].add_whitelist(self.nodes[LEADER2].get_addr64()) self.nodes[ROUTER2].add_whitelist(self.nodes[MED].get_addr64()) self.nodes[ROUTER2].enable_whitelist() self.nodes[ROUTER2].set_router_selection_jitter(1) self.nodes[MED].set_active_dataset(DATASET2_TIMESTAMP, channel=DATASET2_CHANNEL, panid=DATASET1_PANID) self.nodes[MED].set_mode('rsn') self.nodes[MED].add_whitelist(self.nodes[ROUTER2].get_addr64()) self.nodes[MED].enable_whitelist() def tearDown(self): for node in list(self.nodes.values()): node.stop() del self.nodes def test(self): self.nodes[LEADER1].start() self.nodes[LEADER1].set_state('leader') self.assertEqual(self.nodes[LEADER1].get_state(), 'leader') self.nodes[LEADER1].commissioner_start() time.sleep(3) self.nodes[ROUTER1].start() time.sleep(5) self.assertEqual(self.nodes[ROUTER1].get_state(), 'router') self.nodes[LEADER2].start() self.nodes[LEADER2].set_state('leader') self.assertEqual(self.nodes[LEADER2].get_state(), 'leader') self.nodes[ROUTER2].start() time.sleep(5) self.assertEqual(self.nodes[ROUTER2].get_state(), 'router') self.nodes[MED].start() time.sleep(5) self.assertEqual(self.nodes[MED].get_state(), 'child') ipaddrs = self.nodes[ROUTER1].get_addrs() for ipaddr in ipaddrs: if ipaddr[0:4] != 'fe80': break self.nodes[LEADER1].announce_begin(0x1000, 1, 1000, ipaddr) time.sleep(30) self.assertEqual(self.nodes[LEADER2].get_state(), 'router') self.assertEqual(self.nodes[ROUTER2].get_state(), 'router') self.assertEqual(self.nodes[MED].get_state(), 'child') ipaddrs = self.nodes[MED].get_addrs() for ipaddr in ipaddrs: if ipaddr[0:4] != 'fe80': self.assertTrue(self.nodes[LEADER1].ping(ipaddr)) if __name__ == '__main__': unittest.main()
true
true
f716e10b6e71953481b66f47f8b43c293e3fae0b
321
py
Python
wrench/labelmodel/__init__.py
rpryzant/wrench
3668c359aeff18724e927a207a85da17f2ead823
[ "Apache-2.0" ]
1
2021-11-24T04:01:08.000Z
2021-11-24T04:01:08.000Z
wrench/labelmodel/__init__.py
yinkaiw/wrench
f20135eb9b1d51b5bad92b3a910efd92235df356
[ "Apache-2.0" ]
null
null
null
wrench/labelmodel/__init__.py
yinkaiw/wrench
f20135eb9b1d51b5bad92b3a910efd92235df356
[ "Apache-2.0" ]
null
null
null
from .dawid_skene import DawidSkene from .flyingsquid import FlyingSquid from .generative_model import GenerativeModel from .gold import GoldCondProb from .majority_voting import MajorityVoting, MajorityWeightedVoting from .metal import MeTaL from .naive_bayes import NaiveBayesModel from .snorkel import Snorkel
35.666667
68
0.844237
from .dawid_skene import DawidSkene from .flyingsquid import FlyingSquid from .generative_model import GenerativeModel from .gold import GoldCondProb from .majority_voting import MajorityVoting, MajorityWeightedVoting from .metal import MeTaL from .naive_bayes import NaiveBayesModel from .snorkel import Snorkel
true
true
f716e2ca2dbea9c8c4a6ac6e99b6f76798d9cf6c
3,733
py
Python
python/bot_discord.py
angelopassaro/Hacktoberfest-1
21f90f5d49efba9b1a27f4d9b923f5017ab43f0e
[ "Apache-2.0" ]
1
2020-10-06T01:20:07.000Z
2020-10-06T01:20:07.000Z
python/bot_discord.py
angelopassaro/Hacktoberfest-1
21f90f5d49efba9b1a27f4d9b923f5017ab43f0e
[ "Apache-2.0" ]
null
null
null
python/bot_discord.py
angelopassaro/Hacktoberfest-1
21f90f5d49efba9b1a27f4d9b923f5017ab43f0e
[ "Apache-2.0" ]
null
null
null
"create by poomipat01" import asyncio import discord import youtube_dl import os from discord.ext import commands def read_token(): with open("token.ini",'r') as f: lines = f.readline() return lines.strip() # Suppress noise about console usage from errors youtube_dl.utils.bug_reports_message = lambda: '' TOKEN = os.environ['TOKEN'] ytdl_format_options = { 'format': 'bestaudio/best', 'outtmpl': '%(extractor)s-%(id)s-%(title)s.%(ext)s', 'restrictfilenames': True, 'noplaylist': True, 'nocheckcertificate': True, 'ignoreerrors': False, 'logtostderr': False, 'quiet': True, 'no_warnings': True, 'default_search': 'auto', 'source_address': '0.0.0.0' # bind to ipv4 since ipv6 addresses cause issues sometimes } ffmpeg_options = { 'options': '-vn' } ytdl = youtube_dl.YoutubeDL(ytdl_format_options) class YTDLSource(discord.PCMVolumeTransformer): def __init__(self, source, *, data, volume=1): super().__init__(source, volume) self.data = data self.title = data.get('title') self.url = data.get('url') @classmethod async def from_url(self, url, *, loop=None, stream=False): loop = loop or asyncio.get_event_loop() data = await loop.run_in_executor(None, lambda: ytdl.extract_info(url, download=not stream)) if 'entries' in data: # take first item from a playlist data = data['entries'][0] filename = data['url'] if stream else ytdl.prepare_filename(data) self.filename = filename return self(discord.FFmpegPCMAudio(filename, **ffmpeg_options), data=data) class Music(commands.Cog): def __init__(self, bot): self.bot = bot @commands.command() async def join(self, ctx, *, channel: discord.VoiceChannel): """Joins a voice channel""" if ctx.voice_client is not None: return await ctx.voice_client.move_to(channel) await channel.connect() @commands.command() async def ping(self,ctx): await ctx.send(f'ping : {round(bot.latency * 1000)}ms') @commands.command() async def play(self, ctx, *, url): async with ctx.typing(): player = await YTDLSource.from_url(url=url, loop=self.bot.loop) ctx.voice_client.play(player) await ctx.send('Now playing: {}'.format(player.title)) @commands.command() async def volume(self, ctx, volume: int): """Changes the player's volume""" if ctx.voice_client is None: return await ctx.send("Not connected to a voice channel.") ctx.voice_client.source.volume = volume / 100 await ctx.send("Changed volume to {}%".format(volume)) @commands.command() async def stop(self, ctx): """Stops and disconnects the bot from voice""" await ctx.voice_client.disconnect() @play.before_invoke async def ensure_voice(self, ctx): if ctx.voice_client is None: if ctx.author.voice: await ctx.author.voice.channel.connect() else: await ctx.send("You are not connected to a voice channel.") raise commands.CommandError("Author not connected to a voice channel.") elif ctx.voice_client.is_playing(): ctx.voice_client.stop() filename = YTDLSource.filename if os.path.exists(filename): os.remove(filename) bot = commands.Bot(command_prefix=commands.when_mentioned_or("$")) @bot.event async def on_ready(): await bot.change_presence(status=discord.Status.online, activity=discord.Game('$help for information')) print("Bot online!!!") bot.add_cog(Music(bot)) bot.run(TOKEN)
30.104839
107
0.639432
import asyncio import discord import youtube_dl import os from discord.ext import commands def read_token(): with open("token.ini",'r') as f: lines = f.readline() return lines.strip() youtube_dl.utils.bug_reports_message = lambda: '' TOKEN = os.environ['TOKEN'] ytdl_format_options = { 'format': 'bestaudio/best', 'outtmpl': '%(extractor)s-%(id)s-%(title)s.%(ext)s', 'restrictfilenames': True, 'noplaylist': True, 'nocheckcertificate': True, 'ignoreerrors': False, 'logtostderr': False, 'quiet': True, 'no_warnings': True, 'default_search': 'auto', 'source_address': '0.0.0.0' } ffmpeg_options = { 'options': '-vn' } ytdl = youtube_dl.YoutubeDL(ytdl_format_options) class YTDLSource(discord.PCMVolumeTransformer): def __init__(self, source, *, data, volume=1): super().__init__(source, volume) self.data = data self.title = data.get('title') self.url = data.get('url') @classmethod async def from_url(self, url, *, loop=None, stream=False): loop = loop or asyncio.get_event_loop() data = await loop.run_in_executor(None, lambda: ytdl.extract_info(url, download=not stream)) if 'entries' in data: data = data['entries'][0] filename = data['url'] if stream else ytdl.prepare_filename(data) self.filename = filename return self(discord.FFmpegPCMAudio(filename, **ffmpeg_options), data=data) class Music(commands.Cog): def __init__(self, bot): self.bot = bot @commands.command() async def join(self, ctx, *, channel: discord.VoiceChannel): if ctx.voice_client is not None: return await ctx.voice_client.move_to(channel) await channel.connect() @commands.command() async def ping(self,ctx): await ctx.send(f'ping : {round(bot.latency * 1000)}ms') @commands.command() async def play(self, ctx, *, url): async with ctx.typing(): player = await YTDLSource.from_url(url=url, loop=self.bot.loop) ctx.voice_client.play(player) await ctx.send('Now playing: {}'.format(player.title)) @commands.command() async def volume(self, ctx, volume: int): if ctx.voice_client is None: return await ctx.send("Not connected to a voice channel.") ctx.voice_client.source.volume = volume / 100 await ctx.send("Changed volume to {}%".format(volume)) @commands.command() async def stop(self, ctx): await ctx.voice_client.disconnect() @play.before_invoke async def ensure_voice(self, ctx): if ctx.voice_client is None: if ctx.author.voice: await ctx.author.voice.channel.connect() else: await ctx.send("You are not connected to a voice channel.") raise commands.CommandError("Author not connected to a voice channel.") elif ctx.voice_client.is_playing(): ctx.voice_client.stop() filename = YTDLSource.filename if os.path.exists(filename): os.remove(filename) bot = commands.Bot(command_prefix=commands.when_mentioned_or("$")) @bot.event async def on_ready(): await bot.change_presence(status=discord.Status.online, activity=discord.Game('$help for information')) print("Bot online!!!") bot.add_cog(Music(bot)) bot.run(TOKEN)
true
true
f716e5b3df3c3c98ce55161f28ad6090b87813a0
602
py
Python
quickstart/python/sms/example-2/send_notifications.6.x.py
Tshisuaka/api-snippets
52b50037d4af0f3b96adf76197964725a1501e96
[ "MIT" ]
234
2016-01-27T03:04:38.000Z
2022-02-25T20:13:43.000Z
quickstart/python/sms/example-2/send_notifications.6.x.py
Tshisuaka/api-snippets
52b50037d4af0f3b96adf76197964725a1501e96
[ "MIT" ]
351
2016-04-06T16:55:33.000Z
2022-03-10T18:42:36.000Z
quickstart/python/sms/example-2/send_notifications.6.x.py
Tshisuaka/api-snippets
52b50037d4af0f3b96adf76197964725a1501e96
[ "MIT" ]
494
2016-03-30T15:28:20.000Z
2022-03-28T19:39:36.000Z
# /usr/bin/env python # Download the twilio-python library from twilio.com/docs/libraries/python import os from twilio.rest import Client # Find these values at https://twilio.com/user/account # To set up environmental variables, see http://twil.io/secure account_sid = os.environ['TWILIO_ACCOUNT_SID'] auth_token = "YYYYYYYYYYYYYYYYYY" client = Client(account_sid, auth_token) message = client.api.account.messages.create( to="+12316851234", from_="+15555555555", body="Hello there!", media_url=['https://demo.twilio.com/owl.png', 'https://demo.twilio.com/logo.png'])
33.444444
74
0.727575
import os from twilio.rest import Client account_sid = os.environ['TWILIO_ACCOUNT_SID'] auth_token = "YYYYYYYYYYYYYYYYYY" client = Client(account_sid, auth_token) message = client.api.account.messages.create( to="+12316851234", from_="+15555555555", body="Hello there!", media_url=['https://demo.twilio.com/owl.png', 'https://demo.twilio.com/logo.png'])
true
true
f716e7003ce379bfc28bec594685939ffeb73fea
1,069
py
Python
award/migrations/0003_rating.py
EmmanuelMuchiri/Awards
d786689a6f5f32532d005ef6a50eed4600ba5ecc
[ "MIT" ]
null
null
null
award/migrations/0003_rating.py
EmmanuelMuchiri/Awards
d786689a6f5f32532d005ef6a50eed4600ba5ecc
[ "MIT" ]
5
2020-06-05T22:45:28.000Z
2021-09-08T01:16:58.000Z
award/migrations/0003_rating.py
EmmanuelMuchiri/Awards
d786689a6f5f32532d005ef6a50eed4600ba5ecc
[ "MIT" ]
3
2019-09-09T08:16:01.000Z
2019-11-25T11:37:58.000Z
# -*- coding: utf-8 -*- # Generated by Django 1.11.24 on 2019-09-07 12:47 from __future__ import unicode_literals from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ ('award', '0002_project'), ] operations = [ migrations.CreateModel( name='Rating', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('design', models.IntegerField(blank=True, default=0)), ('usability', models.IntegerField(blank=True, default=0)), ('creativity', models.IntegerField(blank=True, default=0)), ('overall_rating', models.IntegerField(blank=True, default=0)), ('profile', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='award.Profile')), ('project', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='award.Project')), ], ), ]
36.862069
114
0.615529
from __future__ import unicode_literals from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ ('award', '0002_project'), ] operations = [ migrations.CreateModel( name='Rating', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('design', models.IntegerField(blank=True, default=0)), ('usability', models.IntegerField(blank=True, default=0)), ('creativity', models.IntegerField(blank=True, default=0)), ('overall_rating', models.IntegerField(blank=True, default=0)), ('profile', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='award.Profile')), ('project', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='award.Project')), ], ), ]
true
true
f716e8fadfc4a02237cc5c211b47dc9372ec31ac
4,293
py
Python
salamanca/cli.py
coroa/salamanca
29da72cc40dd511c81bfdcb71ac956a24de1148b
[ "Apache-2.0" ]
null
null
null
salamanca/cli.py
coroa/salamanca
29da72cc40dd511c81bfdcb71ac956a24de1148b
[ "Apache-2.0" ]
null
null
null
salamanca/cli.py
coroa/salamanca
29da72cc40dd511c81bfdcb71ac956a24de1148b
[ "Apache-2.0" ]
null
null
null
import argparse import logging from salamanca import data from salamanca import currency COMMANDS = {} # # Download wb data # def download_wb_cli(parser): log = 'Print log output during download.' parser.add_argument('--log', help=log, action="store_true") overwrite = 'Overwrite local files if they exist.' parser.add_argument('--overwrite', help=overwrite, action="store_true") def download_wb(log=False, overwrite=False, **kwargs): if log: logger = logging.getLogger() logger.setLevel(logging.DEBUG) wb = data.WorldBank() wb.iso_metadata(overwrite=overwrite) for ind in data.INDICATORS_WB: wb.query(ind, overwrite=overwrite) COMMANDS['download_wb'] = ( """Download national World Bank data to your machine""", download_wb_cli, download_wb, ) # # Currency Exchange # def exchange_cli(parser): amt = 'quantity of currency (default: 1.0)' parser.add_argument('-x', '--amt', help=amt, default=1.0) units = "units in which to do conversion [MER or PPP] (default: MER)" parser.add_argument('-u', '--units', help=units, default='MER') meth = "method to use to do conversion [deflator or cpi] (default: deflator)" parser.add_argument('-m', '--meth', help=meth, default='deflator') required = parser.add_argument_group('required arguments') _from = """ ISO: 3-letter ISO code for the origin country, YEAR: origin year """ required.add_argument('-f', '--from', help=_from, nargs=2, metavar=('ISO', 'YEAR'), required=True) _to = """ ISO: 3-letter ISO code for the destination country, YEAR: destination year """ required.add_argument('-t', '--to', help=_to, nargs=2, metavar=('ISO', 'YEAR'), required=True) def exchange(**kwargs): amt = kwargs['amt'] fromiso, fromyr = kwargs['from'] toiso, toyr = kwargs['to'] units = kwargs['units'] inflation_method = kwargs['meth'] xlator = currency.Translator() ret = xlator.exchange(amt, fromiso=fromiso, fromyr=fromyr, toiso=toiso, toyr=toyr, units=units, inflation_method=inflation_method) print(ret) return ret COMMANDS['exchange'] = ( """Exchange currency from one country/year to another.""", exchange_cli, exchange, ) def to_ppp_cli(parser): amt = 'quantity of currency in MER (default: 1.0)' parser.add_argument('-x', '--amt', help=amt, type=float, default=1.0) iso = '3-letter ISO code for the country' parser.add_argument('--iso', help=iso) year = 'year of conversion' parser.add_argument('--year', type=int, help=year) def to_ppp(**kwargs): amt = kwargs['amt'] iso = kwargs['iso'] year = kwargs['year'] xlator = currency.Translator() ret = amt * xlator.mer_to_ppp(iso, year) print(ret) return ret COMMANDS['to_ppp'] = ( """Exchange currency in MER to PPP.""", to_ppp_cli, to_ppp, ) def to_mer_cli(parser): amt = 'quantity of currency in PPP (default: 1.0)' parser.add_argument('-x', '--amt', help=amt, type=float, default=1.0) iso = '3-letter ISO code for the country' parser.add_argument('--iso', help=iso) year = 'year of conversion' parser.add_argument('--year', type=int, help=year) def to_mer(**kwargs): amt = kwargs['amt'] iso = kwargs['iso'] year = int(kwargs['year']) xlator = currency.Translator() ret = amt / xlator.mer_to_ppp(iso, year) print(ret) return ret COMMANDS['to_mer'] = ( """Exchange currency in PPP to MER.""", to_mer_cli, to_mer, ) def main(): descr = """ Main CLI for salamanca. """ parser = argparse.ArgumentParser( description=descr, formatter_class=argparse.RawDescriptionHelpFormatter ) subparsers = parser.add_subparsers(dest='command') for cmd in COMMANDS: cli_help = COMMANDS[cmd][0] cli_func = COMMANDS[cmd][1] subparser = subparsers.add_parser( cmd, help=cli_help, ) cli_func(subparser) args = parser.parse_args() cmd = args.command cmd_func = COMMANDS[cmd][2] cmd_func(**vars(args)) if __name__ == '__main__': main()
25.553571
81
0.623107
import argparse import logging from salamanca import data from salamanca import currency COMMANDS = {} def download_wb_cli(parser): log = 'Print log output during download.' parser.add_argument('--log', help=log, action="store_true") overwrite = 'Overwrite local files if they exist.' parser.add_argument('--overwrite', help=overwrite, action="store_true") def download_wb(log=False, overwrite=False, **kwargs): if log: logger = logging.getLogger() logger.setLevel(logging.DEBUG) wb = data.WorldBank() wb.iso_metadata(overwrite=overwrite) for ind in data.INDICATORS_WB: wb.query(ind, overwrite=overwrite) COMMANDS['download_wb'] = ( """Download national World Bank data to your machine""", download_wb_cli, download_wb, ) def exchange_cli(parser): amt = 'quantity of currency (default: 1.0)' parser.add_argument('-x', '--amt', help=amt, default=1.0) units = "units in which to do conversion [MER or PPP] (default: MER)" parser.add_argument('-u', '--units', help=units, default='MER') meth = "method to use to do conversion [deflator or cpi] (default: deflator)" parser.add_argument('-m', '--meth', help=meth, default='deflator') required = parser.add_argument_group('required arguments') _from = """ ISO: 3-letter ISO code for the origin country, YEAR: origin year """ required.add_argument('-f', '--from', help=_from, nargs=2, metavar=('ISO', 'YEAR'), required=True) _to = """ ISO: 3-letter ISO code for the destination country, YEAR: destination year """ required.add_argument('-t', '--to', help=_to, nargs=2, metavar=('ISO', 'YEAR'), required=True) def exchange(**kwargs): amt = kwargs['amt'] fromiso, fromyr = kwargs['from'] toiso, toyr = kwargs['to'] units = kwargs['units'] inflation_method = kwargs['meth'] xlator = currency.Translator() ret = xlator.exchange(amt, fromiso=fromiso, fromyr=fromyr, toiso=toiso, toyr=toyr, units=units, inflation_method=inflation_method) print(ret) return ret COMMANDS['exchange'] = ( """Exchange currency from one country/year to another.""", exchange_cli, exchange, ) def to_ppp_cli(parser): amt = 'quantity of currency in MER (default: 1.0)' parser.add_argument('-x', '--amt', help=amt, type=float, default=1.0) iso = '3-letter ISO code for the country' parser.add_argument('--iso', help=iso) year = 'year of conversion' parser.add_argument('--year', type=int, help=year) def to_ppp(**kwargs): amt = kwargs['amt'] iso = kwargs['iso'] year = kwargs['year'] xlator = currency.Translator() ret = amt * xlator.mer_to_ppp(iso, year) print(ret) return ret COMMANDS['to_ppp'] = ( """Exchange currency in MER to PPP.""", to_ppp_cli, to_ppp, ) def to_mer_cli(parser): amt = 'quantity of currency in PPP (default: 1.0)' parser.add_argument('-x', '--amt', help=amt, type=float, default=1.0) iso = '3-letter ISO code for the country' parser.add_argument('--iso', help=iso) year = 'year of conversion' parser.add_argument('--year', type=int, help=year) def to_mer(**kwargs): amt = kwargs['amt'] iso = kwargs['iso'] year = int(kwargs['year']) xlator = currency.Translator() ret = amt / xlator.mer_to_ppp(iso, year) print(ret) return ret COMMANDS['to_mer'] = ( """Exchange currency in PPP to MER.""", to_mer_cli, to_mer, ) def main(): descr = """ Main CLI for salamanca. """ parser = argparse.ArgumentParser( description=descr, formatter_class=argparse.RawDescriptionHelpFormatter ) subparsers = parser.add_subparsers(dest='command') for cmd in COMMANDS: cli_help = COMMANDS[cmd][0] cli_func = COMMANDS[cmd][1] subparser = subparsers.add_parser( cmd, help=cli_help, ) cli_func(subparser) args = parser.parse_args() cmd = args.command cmd_func = COMMANDS[cmd][2] cmd_func(**vars(args)) if __name__ == '__main__': main()
true
true
f716ea6ba7a5521a99b2eb2280b6736549b4ed5d
1,648
py
Python
src/tests/unit/common/test_css.py
td00/pretalx
aff450de9420fca167e04345fa24ee7140fae819
[ "Apache-2.0" ]
null
null
null
src/tests/unit/common/test_css.py
td00/pretalx
aff450de9420fca167e04345fa24ee7140fae819
[ "Apache-2.0" ]
null
null
null
src/tests/unit/common/test_css.py
td00/pretalx
aff450de9420fca167e04345fa24ee7140fae819
[ "Apache-2.0" ]
null
null
null
import pytest from django.core.exceptions import ValidationError from pretalx.common.css import validate_css from pretalx.event.models import Event @pytest.fixture def valid_css(): return ''' body { background-color: #000; display: none; } .some-descriptor { border-style: dotted dashed solid double; BORDER-color: red green blue yellow; } #best-descriptor { border: 5px solid red; } ''' @pytest.fixture def invalid_css(valid_css): return valid_css + ''' a.other-descriptor { content: url("https://malicious.site.com"); } ''' @pytest.fixture def some_object(): class Foo: pass return Foo() def test_valid_css(valid_css): assert validate_css(valid_css) == valid_css def test_invalid_css(invalid_css): with pytest.raises(ValidationError): validate_css(invalid_css) @pytest.mark.django_db def test_regenerate_css(event): from pretalx.common.tasks import regenerate_css event.primary_color = '#00ff00' event.save() regenerate_css(event.pk) event = Event.objects.get(pk=event.pk) for local_app in ['agenda', 'cfp', 'orga']: assert event.settings.get(f'{local_app}_css_file') assert event.settings.get(f'{local_app}_css_checksum') @pytest.mark.django_db def test_regenerate_css_no_color(event): from pretalx.common.tasks import regenerate_css event.primary_color = None event.save() regenerate_css(event.pk) event = Event.objects.get(pk=event.pk) for local_app in ['agenda', 'cfp', 'orga']: assert not event.settings.get(f'{local_app}_css_file') assert not event.settings.get(f'{local_app}_css_checksum')
22.888889
66
0.711772
import pytest from django.core.exceptions import ValidationError from pretalx.common.css import validate_css from pretalx.event.models import Event @pytest.fixture def valid_css(): return ''' body { background-color: #000; display: none; } .some-descriptor { border-style: dotted dashed solid double; BORDER-color: red green blue yellow; } #best-descriptor { border: 5px solid red; } ''' @pytest.fixture def invalid_css(valid_css): return valid_css + ''' a.other-descriptor { content: url("https://malicious.site.com"); } ''' @pytest.fixture def some_object(): class Foo: pass return Foo() def test_valid_css(valid_css): assert validate_css(valid_css) == valid_css def test_invalid_css(invalid_css): with pytest.raises(ValidationError): validate_css(invalid_css) @pytest.mark.django_db def test_regenerate_css(event): from pretalx.common.tasks import regenerate_css event.primary_color = '#00ff00' event.save() regenerate_css(event.pk) event = Event.objects.get(pk=event.pk) for local_app in ['agenda', 'cfp', 'orga']: assert event.settings.get(f'{local_app}_css_file') assert event.settings.get(f'{local_app}_css_checksum') @pytest.mark.django_db def test_regenerate_css_no_color(event): from pretalx.common.tasks import regenerate_css event.primary_color = None event.save() regenerate_css(event.pk) event = Event.objects.get(pk=event.pk) for local_app in ['agenda', 'cfp', 'orga']: assert not event.settings.get(f'{local_app}_css_file') assert not event.settings.get(f'{local_app}_css_checksum')
true
true
f716ec2ee533f960f1e83f092c89ca170e08c6c2
679
py
Python
bin/django-admin.py
hkolstee/bachelor-project
5d26632c2d920327248efdabf2acc53781264dc2
[ "MIT" ]
null
null
null
bin/django-admin.py
hkolstee/bachelor-project
5d26632c2d920327248efdabf2acc53781264dc2
[ "MIT" ]
null
null
null
bin/django-admin.py
hkolstee/bachelor-project
5d26632c2d920327248efdabf2acc53781264dc2
[ "MIT" ]
null
null
null
#!/home/hkolstee/bproject/virtenv/bin/python # When the django-admin.py deprecation ends, remove this script. import warnings from django.core import management try: from django.utils.deprecation import RemovedInDjango40Warning except ImportError: raise ImportError( 'django-admin.py was deprecated in Django 3.1 and removed in Django ' '4.0. Please manually remove this script from your virtual environment ' 'and use django-admin instead.' ) if __name__ == "__main__": warnings.warn( 'django-admin.py is deprecated in favor of django-admin.', RemovedInDjango40Warning, ) management.execute_from_command_line()
30.863636
80
0.726068
import warnings from django.core import management try: from django.utils.deprecation import RemovedInDjango40Warning except ImportError: raise ImportError( 'django-admin.py was deprecated in Django 3.1 and removed in Django ' '4.0. Please manually remove this script from your virtual environment ' 'and use django-admin instead.' ) if __name__ == "__main__": warnings.warn( 'django-admin.py is deprecated in favor of django-admin.', RemovedInDjango40Warning, ) management.execute_from_command_line()
true
true
f716ece959d936bd66bf82233eb4b71aa5c73834
2,091
py
Python
quant/utils/dingding.py
tianhm/TheNextQuant
a0d062fe8160088118b128d757d01b396c129680
[ "MIT" ]
1
2020-03-24T02:19:20.000Z
2020-03-24T02:19:20.000Z
quant/utils/dingding.py
tianhm/TheNextQuant
a0d062fe8160088118b128d757d01b396c129680
[ "MIT" ]
null
null
null
quant/utils/dingding.py
tianhm/TheNextQuant
a0d062fe8160088118b128d757d01b396c129680
[ "MIT" ]
5
2019-08-12T09:40:27.000Z
2022-01-26T07:36:24.000Z
# -*- coding:utf-8 -*- """ 钉钉机器人接口 Author: HuangTao Date: 2018/08/04 Update: 2018/12/24 1. 增加markdown格式消息推送; """ from quant.utils import logger from quant.utils.http_client import AsyncHttpRequests class DingTalk: """ 钉钉机器人接口 """ BASE_URL = 'https://oapi.dingtalk.com/robot/send?access_token=' @classmethod async def send_text_msg(cls, access_token, content, phones=None, is_at_all=False): """ 发送文本消息 @param access_token 钉钉消息access_token @param content 消息内容 @param phones 需要@提醒的群成员手机号列表 @param is_at_all 是否需要@所有人,默认为False """ body = { 'msgtype': 'text', 'text': { 'content': content } } if is_at_all: body['at'] = {'isAtAll': True} if phones: assert isinstance(phones, list) body['at'] = {'atMobiles': phones} url = cls.BASE_URL + access_token headers = {'Content-Type': 'application/json'} result = await AsyncHttpRequests.post(url, data=body, headers=headers) logger.info('url:', url, 'body:', body, 'result:', result, caller=cls) @classmethod async def send_markdown_msg(cls, access_token, title, text, phones=None, is_at_all=False): """ 发送文本消息 @param access_token 钉钉消息access_token @param title 首屏会话透出的展示内容 @param text markdown格式的消息 @param phones 需要@提醒的群成员手机号列表 @param is_at_all 是否需要@所有人,默认为False """ body = { 'msgtype': 'markdown', 'markdown': { 'title': title, 'text': text } } if is_at_all: body['at'] = {'isAtAll': True} if phones: assert isinstance(phones, list) body['at'] = {'atMobiles': phones} url = cls.BASE_URL + access_token headers = {'Content-Type': 'application/json'} result = await AsyncHttpRequests.post(url, data=body, headers=headers) logger.info('url:', url, 'body:', body, 'result:', result, caller=cls)
30.304348
94
0.568149
from quant.utils import logger from quant.utils.http_client import AsyncHttpRequests class DingTalk: BASE_URL = 'https://oapi.dingtalk.com/robot/send?access_token=' @classmethod async def send_text_msg(cls, access_token, content, phones=None, is_at_all=False): body = { 'msgtype': 'text', 'text': { 'content': content } } if is_at_all: body['at'] = {'isAtAll': True} if phones: assert isinstance(phones, list) body['at'] = {'atMobiles': phones} url = cls.BASE_URL + access_token headers = {'Content-Type': 'application/json'} result = await AsyncHttpRequests.post(url, data=body, headers=headers) logger.info('url:', url, 'body:', body, 'result:', result, caller=cls) @classmethod async def send_markdown_msg(cls, access_token, title, text, phones=None, is_at_all=False): body = { 'msgtype': 'markdown', 'markdown': { 'title': title, 'text': text } } if is_at_all: body['at'] = {'isAtAll': True} if phones: assert isinstance(phones, list) body['at'] = {'atMobiles': phones} url = cls.BASE_URL + access_token headers = {'Content-Type': 'application/json'} result = await AsyncHttpRequests.post(url, data=body, headers=headers) logger.info('url:', url, 'body:', body, 'result:', result, caller=cls)
true
true
f716ed7302b9f41211156c88b591db55e8d6fd9c
5,726
py
Python
threedi_api_client/openapi/models/inline_response20068.py
nens/threedi-api-client
43b0eb1bd47310b1783f87f6ad8bfbfe0fb4d90a
[ "BSD-3-Clause" ]
null
null
null
threedi_api_client/openapi/models/inline_response20068.py
nens/threedi-api-client
43b0eb1bd47310b1783f87f6ad8bfbfe0fb4d90a
[ "BSD-3-Clause" ]
16
2021-05-31T09:52:04.000Z
2022-03-14T16:07:19.000Z
threedi_api_client/openapi/models/inline_response20068.py
nens/threedi-api-client
43b0eb1bd47310b1783f87f6ad8bfbfe0fb4d90a
[ "BSD-3-Clause" ]
null
null
null
# coding: utf-8 """ 3Di API 3Di simulation API (latest stable version: v3) Framework release: 2.9.0 3Di core release: 2.2.2 deployed on: 11:01AM (UTC) on January 11, 2022 # noqa: E501 The version of the OpenAPI document: v3 Contact: info@nelen-schuurmans.nl Generated by: https://openapi-generator.tech """ import logging import pprint import re # noqa: F401 import six from threedi_api_client.openapi.configuration import Configuration logger = logging.getLogger(__name__) class InlineResponse20068(object): """NOTE: This class is auto generated by OpenAPI Generator. Ref: https://openapi-generator.tech Do not edit the class manually. """ """ Attributes: openapi_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ openapi_types = { 'count': 'int', 'next': 'str', 'previous': 'str', 'results': 'list[ThreediModelTask]' } attribute_map = { 'count': 'count', 'next': 'next', 'previous': 'previous', 'results': 'results' } def __init__(self, count=None, next=None, previous=None, results=None, local_vars_configuration=None): # noqa: E501 """InlineResponse20068 - a model defined in OpenAPI""" # noqa: E501 if local_vars_configuration is None: local_vars_configuration = Configuration() self.local_vars_configuration = local_vars_configuration self._count = None self._next = None self._previous = None self._results = None self.discriminator = None self.count = count self.next = next self.previous = previous self.results = results @property def count(self): """Gets the count of this InlineResponse20068. # noqa: E501 :return: The count of this InlineResponse20068. # noqa: E501 :rtype: int """ return self._count @count.setter def count(self, count): """Sets the count of this InlineResponse20068. :param count: The count of this InlineResponse20068. # noqa: E501 :type: int """ if self.local_vars_configuration.client_side_validation and count is None: # noqa: E501 raise ValueError("Invalid value for `count`, must not be `None`") # noqa: E501 self._count = count @property def next(self): """Gets the next of this InlineResponse20068. # noqa: E501 :return: The next of this InlineResponse20068. # noqa: E501 :rtype: str """ return self._next @next.setter def next(self, next): """Sets the next of this InlineResponse20068. :param next: The next of this InlineResponse20068. # noqa: E501 :type: str """ self._next = next @property def previous(self): """Gets the previous of this InlineResponse20068. # noqa: E501 :return: The previous of this InlineResponse20068. # noqa: E501 :rtype: str """ return self._previous @previous.setter def previous(self, previous): """Sets the previous of this InlineResponse20068. :param previous: The previous of this InlineResponse20068. # noqa: E501 :type: str """ self._previous = previous @property def results(self): """Gets the results of this InlineResponse20068. # noqa: E501 :return: The results of this InlineResponse20068. # noqa: E501 :rtype: list[ThreediModelTask] """ return self._results @results.setter def results(self, results): """Sets the results of this InlineResponse20068. :param results: The results of this InlineResponse20068. # noqa: E501 :type: list[ThreediModelTask] """ if self.local_vars_configuration.client_side_validation and results is None: # noqa: E501 raise ValueError("Invalid value for `results`, must not be `None`") # noqa: E501 self._results = results def to_dict(self): """Returns the model properties as a dict""" result = {} for attr, _ in six.iteritems(self.openapi_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value return result def to_str(self): """Returns the string representation of the model""" return pprint.pformat(self.to_dict()) def __repr__(self): """For `print` and `pprint`""" return self.to_str() def __eq__(self, other): """Returns true if both objects are equal""" if not isinstance(other, InlineResponse20068): return False return self.to_dict() == other.to_dict() def __ne__(self, other): """Returns true if both objects are not equal""" if not isinstance(other, InlineResponse20068): return True return self.to_dict() != other.to_dict()
28.346535
166
0.59029
import logging import pprint import re import six from threedi_api_client.openapi.configuration import Configuration logger = logging.getLogger(__name__) class InlineResponse20068(object): openapi_types = { 'count': 'int', 'next': 'str', 'previous': 'str', 'results': 'list[ThreediModelTask]' } attribute_map = { 'count': 'count', 'next': 'next', 'previous': 'previous', 'results': 'results' } def __init__(self, count=None, next=None, previous=None, results=None, local_vars_configuration=None): if local_vars_configuration is None: local_vars_configuration = Configuration() self.local_vars_configuration = local_vars_configuration self._count = None self._next = None self._previous = None self._results = None self.discriminator = None self.count = count self.next = next self.previous = previous self.results = results @property def count(self): return self._count @count.setter def count(self, count): if self.local_vars_configuration.client_side_validation and count is None: raise ValueError("Invalid value for `count`, must not be `None`") self._count = count @property def next(self): return self._next @next.setter def next(self, next): self._next = next @property def previous(self): return self._previous @previous.setter def previous(self, previous): self._previous = previous @property def results(self): return self._results @results.setter def results(self, results): if self.local_vars_configuration.client_side_validation and results is None: raise ValueError("Invalid value for `results`, must not be `None`") self._results = results def to_dict(self): result = {} for attr, _ in six.iteritems(self.openapi_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value return result def to_str(self): return pprint.pformat(self.to_dict()) def __repr__(self): return self.to_str() def __eq__(self, other): if not isinstance(other, InlineResponse20068): return False return self.to_dict() == other.to_dict() def __ne__(self, other): if not isinstance(other, InlineResponse20068): return True return self.to_dict() != other.to_dict()
true
true
f716eee3eb58697b715967ba75f76f3d236c3384
3,353
py
Python
minigrid_basics/examples/rw_four_directions.py
dumpmemory/google-research
bc87d010ab9086b6e92c3f075410fa6e1f27251b
[ "Apache-2.0" ]
null
null
null
minigrid_basics/examples/rw_four_directions.py
dumpmemory/google-research
bc87d010ab9086b6e92c3f075410fa6e1f27251b
[ "Apache-2.0" ]
null
null
null
minigrid_basics/examples/rw_four_directions.py
dumpmemory/google-research
bc87d010ab9086b6e92c3f075410fa6e1f27251b
[ "Apache-2.0" ]
null
null
null
# coding=utf-8 # Copyright 2022 The Google Research Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. r"""Example that uses Gym-Minigrid, a custom environment, and custom actions. Gym-Minigrid has a larger action space that is not standard in reinforcement learning. By default, the actions are {rotate left, rotate right, forward, pick up object, drop object, toggle/activate object, done}. This example uses a class overridden to have the standard 4 directional actions: {left, right, up, down}. Here we have a random agent interacting with the environment. In this case, we also use a custom environment, which is likely what one will do in their research. We are writing the agent observations to the disk just as a simple way to get some feedback of what is going on. Sample run: ``` python -m minigrid_basics.examples.rw_four_directions \ --gin_bindings="MonMiniGridEnv.stochasticity=0.1" ``` """ import os from absl import app from absl import flags import gin import gym import gym_minigrid # pylint: disable=unused-import from gym_minigrid.wrappers import RGBImgObsWrapper import matplotlib.pylab as plt import tensorflow as tf from minigrid_basics.custom_wrappers import tabular_wrapper # pylint: disable=unused-import from minigrid_basics.envs import mon_minigrid FLAGS = flags.FLAGS flags.DEFINE_string('file_path', '/tmp/rw_four_directions', 'Path in which we will save the observations.') flags.DEFINE_multi_string( 'gin_bindings', [], 'Gin bindings to override default parameter values ' '(e.g. "MonMiniGridEnv.stochasticity=0.1").') def main(argv): if len(argv) > 1: raise app.UsageError('Too many command-line arguments.') gin.parse_config_files_and_bindings( [os.path.join(mon_minigrid.GIN_FILES_PREFIX, 'classic_fourrooms.gin')], bindings=FLAGS.gin_bindings, skip_unknown=False) env_id = mon_minigrid.register_environment() env = gym.make(env_id) env = RGBImgObsWrapper(env) # Get pixel observations # Get tabular observation and drop the 'mission' field: env = tabular_wrapper.TabularWrapper(env, get_rgb=True) env.reset() num_frames = 0 max_num_frames = 500 if not tf.io.gfile.exists(FLAGS.file_path): tf.io.gfile.makedirs(FLAGS.file_path) undisc_return = 0 while num_frames < max_num_frames: # Act randomly obs, reward, done, _ = env.step(env.action_space.sample()) undisc_return += reward num_frames += 1 print('t:', num_frames, ' s:', obs['state']) # Draw environment frame just for simple visualization plt.imshow(obs['image']) path = os.path.join(FLAGS.file_path, 'obs_{}.png'.format(num_frames)) plt.savefig(path) plt.clf() if done: break print('Undiscounted return: %.2f' % undisc_return) env.close() if __name__ == '__main__': app.run(main)
31.632075
92
0.737548
import os from absl import app from absl import flags import gin import gym import gym_minigrid from gym_minigrid.wrappers import RGBImgObsWrapper import matplotlib.pylab as plt import tensorflow as tf from minigrid_basics.custom_wrappers import tabular_wrapper from minigrid_basics.envs import mon_minigrid FLAGS = flags.FLAGS flags.DEFINE_string('file_path', '/tmp/rw_four_directions', 'Path in which we will save the observations.') flags.DEFINE_multi_string( 'gin_bindings', [], 'Gin bindings to override default parameter values ' '(e.g. "MonMiniGridEnv.stochasticity=0.1").') def main(argv): if len(argv) > 1: raise app.UsageError('Too many command-line arguments.') gin.parse_config_files_and_bindings( [os.path.join(mon_minigrid.GIN_FILES_PREFIX, 'classic_fourrooms.gin')], bindings=FLAGS.gin_bindings, skip_unknown=False) env_id = mon_minigrid.register_environment() env = gym.make(env_id) env = RGBImgObsWrapper(env) env = tabular_wrapper.TabularWrapper(env, get_rgb=True) env.reset() num_frames = 0 max_num_frames = 500 if not tf.io.gfile.exists(FLAGS.file_path): tf.io.gfile.makedirs(FLAGS.file_path) undisc_return = 0 while num_frames < max_num_frames: obs, reward, done, _ = env.step(env.action_space.sample()) undisc_return += reward num_frames += 1 print('t:', num_frames, ' s:', obs['state']) plt.imshow(obs['image']) path = os.path.join(FLAGS.file_path, 'obs_{}.png'.format(num_frames)) plt.savefig(path) plt.clf() if done: break print('Undiscounted return: %.2f' % undisc_return) env.close() if __name__ == '__main__': app.run(main)
true
true
f716ef059d911c1ddacddb527c6766ef71f00589
3,623
py
Python
scripts/run_mots_depth_inference.py
VladimirYugay/diw
d1a760f1911e9d09fbe038abffc3aa76d384f86a
[ "MIT" ]
1
2021-09-14T21:24:56.000Z
2021-09-14T21:24:56.000Z
scripts/run_mots_depth_inference.py
VladimirYugay/diw
d1a760f1911e9d09fbe038abffc3aa76d384f86a
[ "MIT" ]
null
null
null
scripts/run_mots_depth_inference.py
VladimirYugay/diw
d1a760f1911e9d09fbe038abffc3aa76d384f86a
[ "MIT" ]
null
null
null
""" Script for running depth inference assuming MOTS dataset structure """ import logging import os import sys from pathlib import Path, PurePath import click import matplotlib.pyplot as plt import numpy as np import tensorflow.compat.v1 as tf from IPython.core import ultratb from PIL import Image import diw from diw.model import Model, get_vars_to_save_and_restore sys.excepthook = ultratb.FormattedTB(mode="Verbose", color_scheme="Linux", call_pdb=1) _logger = logging.getLogger(__name__) def load_image(img_file): """Load image from disk. Output value range: [0,255].""" return Image.open(img_file).convert("RGB") def resize_img(img, img_shape): """ resizes an image """ return img.resize(img_shape, Image.LANCZOS).convert("RGB") def plot_image(image, image_type="RGB"): """ plots image with matplotlib """ plt.figure() color_map = None if image_type != "RGB": color_map = plt.cm.get_cmap("plasma").reversed() plt.imshow(image, cmap=color_map) plt.show() # display it return plt @click.command() @click.option( "--checkpoint_dir", "checkpoint_dir", default="./data/checkpoints/test", type=click.Path(exists=True), help="Path to the model checkpoint", ) @click.option( "--data_dir", "data_dir", default="./data/test/mots_data", type=click.Path(exists=True), help="Path to MOTS data", ) @click.option( "--save_img", "save_img", flag_value=True, help="Flag to whether save the image of the depth (besides numpy array)", ) @click.version_option(diw.__version__) def main(data_dir, checkpoint_dir, save_img): if save_img: plt.figure() height, width = 128, 416 os.environ["TF_FORCE_GPU_ALLOW_GROWTH"] = "true" # to fix CUDA bug inference_model = Model( is_training=False, batch_size=1, img_height=height, img_width=width ) checkpoint = tf.train.latest_checkpoint(checkpoint_dir) vars_to_restore = get_vars_to_save_and_restore(checkpoint) saver = tf.train.Saver(vars_to_restore) with tf.Session() as sess: saver.restore(sess, checkpoint) sequence_paths = [p for p in Path(data_dir).glob("*") if p.is_dir()] for seq_path in sequence_paths: model_name = PurePath(checkpoint_dir).parts[-1] (seq_path / model_name).mkdir(parents=True, exist_ok=True) if save_img: (seq_path / (model_name + "_depth_images")).mkdir( parents=True, exist_ok=True ) img_paths = sorted( [p for p in (seq_path / "img1").glob("*") if p.is_file()], key=lambda path: str(path), ) for img_path in img_paths: img_name = img_path.parts[-1].split(".")[0] print("Processing sequence: {}, image: {}".format(seq_path, img_name)) image = load_image(str(img_path)) image = resize_img(image, (width, height)) image = np.array(image) image = image[None, ...] depth = inference_model.inference_depth(image, sess) depth = depth[0, :, :, 0] np.save(str(seq_path / model_name / img_name), depth) if save_img: plt.imshow(depth, plt.cm.get_cmap("plasma").reversed()) plt.savefig( str(seq_path / (model_name + "_depth_images")) + "/" + (img_name + ".png") ) plt.clf() if __name__ == "__main__": main()
32.348214
86
0.606956
import logging import os import sys from pathlib import Path, PurePath import click import matplotlib.pyplot as plt import numpy as np import tensorflow.compat.v1 as tf from IPython.core import ultratb from PIL import Image import diw from diw.model import Model, get_vars_to_save_and_restore sys.excepthook = ultratb.FormattedTB(mode="Verbose", color_scheme="Linux", call_pdb=1) _logger = logging.getLogger(__name__) def load_image(img_file): return Image.open(img_file).convert("RGB") def resize_img(img, img_shape): return img.resize(img_shape, Image.LANCZOS).convert("RGB") def plot_image(image, image_type="RGB"): plt.figure() color_map = None if image_type != "RGB": color_map = plt.cm.get_cmap("plasma").reversed() plt.imshow(image, cmap=color_map) plt.show() return plt @click.command() @click.option( "--checkpoint_dir", "checkpoint_dir", default="./data/checkpoints/test", type=click.Path(exists=True), help="Path to the model checkpoint", ) @click.option( "--data_dir", "data_dir", default="./data/test/mots_data", type=click.Path(exists=True), help="Path to MOTS data", ) @click.option( "--save_img", "save_img", flag_value=True, help="Flag to whether save the image of the depth (besides numpy array)", ) @click.version_option(diw.__version__) def main(data_dir, checkpoint_dir, save_img): if save_img: plt.figure() height, width = 128, 416 os.environ["TF_FORCE_GPU_ALLOW_GROWTH"] = "true" inference_model = Model( is_training=False, batch_size=1, img_height=height, img_width=width ) checkpoint = tf.train.latest_checkpoint(checkpoint_dir) vars_to_restore = get_vars_to_save_and_restore(checkpoint) saver = tf.train.Saver(vars_to_restore) with tf.Session() as sess: saver.restore(sess, checkpoint) sequence_paths = [p for p in Path(data_dir).glob("*") if p.is_dir()] for seq_path in sequence_paths: model_name = PurePath(checkpoint_dir).parts[-1] (seq_path / model_name).mkdir(parents=True, exist_ok=True) if save_img: (seq_path / (model_name + "_depth_images")).mkdir( parents=True, exist_ok=True ) img_paths = sorted( [p for p in (seq_path / "img1").glob("*") if p.is_file()], key=lambda path: str(path), ) for img_path in img_paths: img_name = img_path.parts[-1].split(".")[0] print("Processing sequence: {}, image: {}".format(seq_path, img_name)) image = load_image(str(img_path)) image = resize_img(image, (width, height)) image = np.array(image) image = image[None, ...] depth = inference_model.inference_depth(image, sess) depth = depth[0, :, :, 0] np.save(str(seq_path / model_name / img_name), depth) if save_img: plt.imshow(depth, plt.cm.get_cmap("plasma").reversed()) plt.savefig( str(seq_path / (model_name + "_depth_images")) + "/" + (img_name + ".png") ) plt.clf() if __name__ == "__main__": main()
true
true
f716f00b794214b6366e86f868a33212f28fca85
2,250
py
Python
custom_components/nintendo_wishlist/__init__.py
custom-components/sensor.nintendo_wishlis
6709a5c1b6e323494e7449fa1ac24e61100fc302
[ "Apache-2.0" ]
13
2020-05-07T21:31:51.000Z
2022-02-09T01:53:53.000Z
custom_components/nintendo_wishlist/__init__.py
custom-components/sensor.nintendo_wishlis
6709a5c1b6e323494e7449fa1ac24e61100fc302
[ "Apache-2.0" ]
19
2019-07-24T08:10:06.000Z
2022-02-05T04:09:34.000Z
custom_components/nintendo_wishlist/__init__.py
custom-components/sensor.nintendo_wishlis
6709a5c1b6e323494e7449fa1ac24e61100fc302
[ "Apache-2.0" ]
5
2019-12-13T17:48:52.000Z
2020-07-06T07:45:31.000Z
"""Nintendo Wishlist integration.""" import logging import voluptuous as vol from homeassistant import core from homeassistant.const import CONF_SCAN_INTERVAL from homeassistant.helpers.aiohttp_client import async_get_clientsession import homeassistant.helpers.config_validation as cv from homeassistant.helpers.discovery import async_load_platform from homeassistant.helpers.update_coordinator import DataUpdateCoordinator from .const import CONF_COUNTRY, CONF_WISHLIST, DEFAULT_SCAN_INTERVAL, DOMAIN from .eshop import Country, EShop _LOGGER = logging.getLogger(__name__) CONFIG_SCHEMA = vol.Schema( { DOMAIN: vol.Schema( { vol.Required(CONF_WISHLIST): cv.ensure_list, vol.Required(CONF_COUNTRY): cv.enum(Country), vol.Optional( CONF_SCAN_INTERVAL, default=DEFAULT_SCAN_INTERVAL ): vol.All(cv.time_period, cv.positive_timedelta), } ) }, # The full HA configurations gets passed to `async_setup` so we need to allow # extra keys. extra=vol.ALLOW_EXTRA, ) async def async_setup(hass: core.HomeAssistant, config: dict) -> bool: """Set up the platform. @NOTE: `config` is the full dict from `configuration.yaml`. :returns: A boolean to indicate that initialization was successful. """ conf = config[DOMAIN] country = conf[CONF_COUNTRY].name wishlist = conf[CONF_WISHLIST] scan_interval = conf[CONF_SCAN_INTERVAL] eshop = EShop(country, async_get_clientsession(hass), wishlist) coordinator = DataUpdateCoordinator( hass, _LOGGER, # Name of the data. For logging purposes. name=DOMAIN, update_method=eshop.fetch_on_sale, # Polling interval. Will only be polled if there are subscribers. update_interval=scan_interval, ) # Fetch initial data so we have data when entities subscribe await coordinator.async_refresh() hass.data[DOMAIN] = { "conf": conf, "coordinator": coordinator, } hass.async_create_task(async_load_platform(hass, "sensor", DOMAIN, {}, conf)) hass.async_create_task(async_load_platform(hass, "binary_sensor", DOMAIN, {}, conf)) return True
33.58209
88
0.699556
import logging import voluptuous as vol from homeassistant import core from homeassistant.const import CONF_SCAN_INTERVAL from homeassistant.helpers.aiohttp_client import async_get_clientsession import homeassistant.helpers.config_validation as cv from homeassistant.helpers.discovery import async_load_platform from homeassistant.helpers.update_coordinator import DataUpdateCoordinator from .const import CONF_COUNTRY, CONF_WISHLIST, DEFAULT_SCAN_INTERVAL, DOMAIN from .eshop import Country, EShop _LOGGER = logging.getLogger(__name__) CONFIG_SCHEMA = vol.Schema( { DOMAIN: vol.Schema( { vol.Required(CONF_WISHLIST): cv.ensure_list, vol.Required(CONF_COUNTRY): cv.enum(Country), vol.Optional( CONF_SCAN_INTERVAL, default=DEFAULT_SCAN_INTERVAL ): vol.All(cv.time_period, cv.positive_timedelta), } ) }, extra=vol.ALLOW_EXTRA, ) async def async_setup(hass: core.HomeAssistant, config: dict) -> bool: conf = config[DOMAIN] country = conf[CONF_COUNTRY].name wishlist = conf[CONF_WISHLIST] scan_interval = conf[CONF_SCAN_INTERVAL] eshop = EShop(country, async_get_clientsession(hass), wishlist) coordinator = DataUpdateCoordinator( hass, _LOGGER, name=DOMAIN, update_method=eshop.fetch_on_sale, update_interval=scan_interval, ) await coordinator.async_refresh() hass.data[DOMAIN] = { "conf": conf, "coordinator": coordinator, } hass.async_create_task(async_load_platform(hass, "sensor", DOMAIN, {}, conf)) hass.async_create_task(async_load_platform(hass, "binary_sensor", DOMAIN, {}, conf)) return True
true
true
f716f19491be0a8291d501ec3e6e2ae018304842
5,052
py
Python
code/lib/warn/search/malicious_behaviours/telephony_identifiers.py
JaspervanRooijen/covid-apps-observer
59f6049a493c80797d83fd24e4a4789a14f3110e
[ "MIT" ]
null
null
null
code/lib/warn/search/malicious_behaviours/telephony_identifiers.py
JaspervanRooijen/covid-apps-observer
59f6049a493c80797d83fd24e4a4789a14f3110e
[ "MIT" ]
null
null
null
code/lib/warn/search/malicious_behaviours/telephony_identifiers.py
JaspervanRooijen/covid-apps-observer
59f6049a493c80797d83fd24e4a4789a14f3110e
[ "MIT" ]
null
null
null
#!/usr/bin/env python3 # -*- coding: utf-8 -*- # This file is part of Androwarn. # # Copyright (C) 2012, 2019, Thomas Debize <tdebize at mail.com> # All rights reserved. # # Androwarn is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Androwarn is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with Androwarn. If not, see <http://www.gnu.org/licenses/>. # Global imports import logging # Androwarn modules import from lib.warn.util.util import * # Logguer log = logging.getLogger('log') def detect_telephony_gsm_GsmCellLocation(x): """ @param x : a Analysis instance @rtype : a list strings for exemple [ 'This application makes phone calls', "This application sends an SMS message 'Premium SMS' to the '12345' phone number" ] """ method_listing = [ ("getLac()", "This application reads the Location Area Code value"), ("getCid()", "This application reads the Cell ID value") ] class_name = 'Landroid/telephony/gsm/GsmCellLocation' return structural_analysis_search_method_bulk(class_name, method_listing, x) def detect_Telephony_Manager_Leakages(x) : """ @param x : a Analysis instance @rtype : a list strings for exemple [ 'This application makes phone calls', "This application sends an SMS message 'Premium SMS' to the '12345' phone number" ] """ method_listing = [ ("getCallState()", "This application reads the phone's current state"), ("getCellLocation()", "This application reads the current location of the device"), ("getDataActivity()", "This application reads the type of activity on a data connection"), ("getDataState()", "This application reads the current data connection state"), ("getDeviceId()", "This application reads the unique device ID, i.e the IMEI for GSM and the MEID or ESN for CDMA phones"), ("getDeviceSoftwareVersion()", "This application reads the software version number for the device, for example, the IMEI/SV for GSM phones"), ("getLine1Number()", "This application reads the phone number string for line 1, for example, the MSISDN for a GSM phone"), ("getNeighboringCellInfo()", "This application reads the neighboring cell information of the device"), ("getNetworkCountryIso()", "This application reads the ISO country code equivalent of the current registered operator's MCC (Mobile Country Code)"), ("getNetworkOperator()", "This application reads the numeric name (MCC+MNC) of current registered operator"), ("getNetworkOperatorName()", "This application reads the operator name"), ("getNetworkType()", "This application reads the radio technology (network type) currently in use on the device for data transmission"), ("getPhoneType()", "This application reads the device phone type value"), ("getSimCountryIso()", "This application reads the ISO country code equivalent for the SIM provider's country code"), ("getSimOperator()", "This application reads the MCC+MNC of the provider of the SIM"), ("getSimOperatorName()", "This application reads the Service Provider Name (SPN)"), ("getSimSerialNumber()", "This application reads the SIM's serial number"), ("getSimState()", "This application reads the constant indicating the state of the device SIM card"), ("getSubscriberId()", "This application reads the unique subscriber ID, for example, the IMSI for a GSM phone"), ("getVoiceMailAlphaTag()", "This application reads the alphabetic identifier associated with the voice mail number"), ("getVoiceMailNumber()", "This application reads the voice mail number") ] class_name = 'Landroid/telephony/TelephonyManager' return structural_analysis_search_method_bulk(class_name, method_listing, x) def gather_telephony_identifiers_leakage(x) : """ @param x : a Analysis instance @rtype : a list strings for the concerned category, for exemple [ 'This application makes phone calls', "This application sends an SMS message 'Premium SMS' to the '12345' phone number" ] """ result = [] result.extend( detect_Telephony_Manager_Leakages(x) ) result.extend( detect_telephony_gsm_GsmCellLocation(x) ) return result
54.322581
195
0.66825
import logging from lib.warn.util.util import * log = logging.getLogger('log') def detect_telephony_gsm_GsmCellLocation(x): method_listing = [ ("getLac()", "This application reads the Location Area Code value"), ("getCid()", "This application reads the Cell ID value") ] class_name = 'Landroid/telephony/gsm/GsmCellLocation' return structural_analysis_search_method_bulk(class_name, method_listing, x) def detect_Telephony_Manager_Leakages(x) : method_listing = [ ("getCallState()", "This application reads the phone's current state"), ("getCellLocation()", "This application reads the current location of the device"), ("getDataActivity()", "This application reads the type of activity on a data connection"), ("getDataState()", "This application reads the current data connection state"), ("getDeviceId()", "This application reads the unique device ID, i.e the IMEI for GSM and the MEID or ESN for CDMA phones"), ("getDeviceSoftwareVersion()", "This application reads the software version number for the device, for example, the IMEI/SV for GSM phones"), ("getLine1Number()", "This application reads the phone number string for line 1, for example, the MSISDN for a GSM phone"), ("getNeighboringCellInfo()", "This application reads the neighboring cell information of the device"), ("getNetworkCountryIso()", "This application reads the ISO country code equivalent of the current registered operator's MCC (Mobile Country Code)"), ("getNetworkOperator()", "This application reads the numeric name (MCC+MNC) of current registered operator"), ("getNetworkOperatorName()", "This application reads the operator name"), ("getNetworkType()", "This application reads the radio technology (network type) currently in use on the device for data transmission"), ("getPhoneType()", "This application reads the device phone type value"), ("getSimCountryIso()", "This application reads the ISO country code equivalent for the SIM provider's country code"), ("getSimOperator()", "This application reads the MCC+MNC of the provider of the SIM"), ("getSimOperatorName()", "This application reads the Service Provider Name (SPN)"), ("getSimSerialNumber()", "This application reads the SIM's serial number"), ("getSimState()", "This application reads the constant indicating the state of the device SIM card"), ("getSubscriberId()", "This application reads the unique subscriber ID, for example, the IMSI for a GSM phone"), ("getVoiceMailAlphaTag()", "This application reads the alphabetic identifier associated with the voice mail number"), ("getVoiceMailNumber()", "This application reads the voice mail number") ] class_name = 'Landroid/telephony/TelephonyManager' return structural_analysis_search_method_bulk(class_name, method_listing, x) def gather_telephony_identifiers_leakage(x) : result = [] result.extend( detect_Telephony_Manager_Leakages(x) ) result.extend( detect_telephony_gsm_GsmCellLocation(x) ) return result
true
true
f716f2899ec4b9277a6cd89d3948a14978b46dc3
4,763
py
Python
izi_shipping/packers.py
izi-ecommerce/izi-shipping
863dc84ad73a2e1413b3ef8043af3ac87fa5cdb9
[ "BSD-3-Clause" ]
null
null
null
izi_shipping/packers.py
izi-ecommerce/izi-shipping
863dc84ad73a2e1413b3ef8043af3ac87fa5cdb9
[ "BSD-3-Clause" ]
null
null
null
izi_shipping/packers.py
izi-ecommerce/izi-shipping
863dc84ad73a2e1413b3ef8043af3ac87fa5cdb9
[ "BSD-3-Clause" ]
null
null
null
from decimal import Decimal as D from django.conf import settings from django.core.exceptions import ObjectDoesNotExist from django.utils.translation import ugettext_lazy as _ from izi.core import loading Scale = loading.get_class('shipping.scales', 'Scale') weight_precision = getattr( settings, 'IZI_SHIPPING_WEIGHT_PRECISION', D('0.000')) volume_precision = getattr( settings, 'IZI_SHIPPING_VOLUME_PRECISION', D('0.000')) # per product defaults # 0.1m x 0.1m x 0.1m DEFAULT_BOX = getattr(settings, 'IZI_SHIPPING_DEFAULT_BOX', {'width': float('0.1'), 'height': float('0.1'), 'length': float('0.1')}) # 1 Kg DEFAULT_WEIGHT = getattr(settings, 'IZI_SHIPPING_DEFAULT_WEIGHT', 1) # basket volue * VOLUME_RATIO = estimated container(s) volume # very simple method VOLUME_RATIO = getattr(settings, 'IZI_SHIPPING_VOLUME_RATIO', D('1.3')) class Box(object): height = 0 width = 0 length = 0 def __init__(self, h, w, l): self.height, self.width, self.length = h, w, l @property def volume(self): return D(self.height*self.width*self.length).quantize(volume_precision) class Container(Box): name = '' def __init__(self, h, w, l, name): self.name = name super(Container, self).__init__(h, w, l) class ProductBox(Box): """ 'Packs' given product to the virtual box and scale it. Takes size and weight from product attributes (if present) """ weight = 0 def __init__(self, product, size_codes=('width', 'height', 'length'), weight_code='weight', default_weight=DEFAULT_WEIGHT): self.attributes = size_codes attr_vals = {} scale = Scale(attribute_code=weight_code, default_weight=default_weight) try: for attr in self.attributes: attr_vals[attr] = product.attribute_values.get( attribute__code=attr).value except ObjectDoesNotExist: attr_vals = DEFAULT_BOX self.weight = scale.weigh_product(product) for attr in attr_vals.keys(): setattr(self, attr, attr_vals[attr]) class Packer(object): """ To calculate shipping charge the set of containers required. That set should be enough for all items of basket which shoud have appropriate attributes (height,width,lenght) And this is the problem known as Bin Packing Problem """ def __init__(self, containers, **kwargs): self.containers = containers self.attributes = kwargs.get( 'attribute_codes', ('width', 'height', 'length')) self.weight_code = kwargs.get('weight_code', 'weight') self.default_weight = kwargs.get('default_weight', DEFAULT_WEIGHT) def get_default_container(self, volume): """Generates _virtual_ cube container which does not exists in the db but enough to calculate estimated shipping charge for the basket's volume given """ side = float(volume) ** (1 / 3.0) return Container(side, side, side, _('virtual volume (%s)') % volume) def box_product(self, product): return ProductBox(product, self.attributes, self.weight_code, self.default_weight) def pack_basket(self, basket): # First attempt but very weird volume = 0 weight = 0 box = container = matched = None for line in basket.lines.all(): box = self.box_product(line.product) volume += box.volume * line.quantity weight += box.weight * line.quantity del box volume = volume * VOLUME_RATIO # Calc container volume during DB query excution # source: http://stackoverflow.com/questions/1652577/django-ordering-queryset-by-a-calculated-field # as we can't use computed values in the WHERE clause # we will filter containers as python list # container = self.containers.extra(select={'volume': 'height*width*lenght'})\ # .extra(order_by=['volume'])\ # .extra(where=['"volume">%s'], params=[volume])[0] # select containers which volumes greater than summarized basket volume matched = [c for c in self.containers.all() if c.volume >= volume] if len(matched) > 0: container = matched[0] # TODO: count container's weight - add it to model else: container = self.get_default_container(volume) return [{'weight': D(weight).quantize(weight_precision), 'container': container}]
35.81203
107
0.617888
from decimal import Decimal as D from django.conf import settings from django.core.exceptions import ObjectDoesNotExist from django.utils.translation import ugettext_lazy as _ from izi.core import loading Scale = loading.get_class('shipping.scales', 'Scale') weight_precision = getattr( settings, 'IZI_SHIPPING_WEIGHT_PRECISION', D('0.000')) volume_precision = getattr( settings, 'IZI_SHIPPING_VOLUME_PRECISION', D('0.000')) DEFAULT_BOX = getattr(settings, 'IZI_SHIPPING_DEFAULT_BOX', {'width': float('0.1'), 'height': float('0.1'), 'length': float('0.1')}) DEFAULT_WEIGHT = getattr(settings, 'IZI_SHIPPING_DEFAULT_WEIGHT', 1) VOLUME_RATIO = getattr(settings, 'IZI_SHIPPING_VOLUME_RATIO', D('1.3')) class Box(object): height = 0 width = 0 length = 0 def __init__(self, h, w, l): self.height, self.width, self.length = h, w, l @property def volume(self): return D(self.height*self.width*self.length).quantize(volume_precision) class Container(Box): name = '' def __init__(self, h, w, l, name): self.name = name super(Container, self).__init__(h, w, l) class ProductBox(Box): weight = 0 def __init__(self, product, size_codes=('width', 'height', 'length'), weight_code='weight', default_weight=DEFAULT_WEIGHT): self.attributes = size_codes attr_vals = {} scale = Scale(attribute_code=weight_code, default_weight=default_weight) try: for attr in self.attributes: attr_vals[attr] = product.attribute_values.get( attribute__code=attr).value except ObjectDoesNotExist: attr_vals = DEFAULT_BOX self.weight = scale.weigh_product(product) for attr in attr_vals.keys(): setattr(self, attr, attr_vals[attr]) class Packer(object): def __init__(self, containers, **kwargs): self.containers = containers self.attributes = kwargs.get( 'attribute_codes', ('width', 'height', 'length')) self.weight_code = kwargs.get('weight_code', 'weight') self.default_weight = kwargs.get('default_weight', DEFAULT_WEIGHT) def get_default_container(self, volume): side = float(volume) ** (1 / 3.0) return Container(side, side, side, _('virtual volume (%s)') % volume) def box_product(self, product): return ProductBox(product, self.attributes, self.weight_code, self.default_weight) def pack_basket(self, basket): volume = 0 weight = 0 box = container = matched = None for line in basket.lines.all(): box = self.box_product(line.product) volume += box.volume * line.quantity weight += box.weight * line.quantity del box volume = volume * VOLUME_RATIO # we will filter containers as python list # container = self.containers.extra(select={'volume': 'height*width*lenght'})\ # .extra(order_by=['volume'])\ # .extra(where=['"volume">%s'], params=[volume])[0] # select containers which volumes greater than summarized basket volume matched = [c for c in self.containers.all() if c.volume >= volume] if len(matched) > 0: container = matched[0] # TODO: count container's weight - add it to model else: container = self.get_default_container(volume) return [{'weight': D(weight).quantize(weight_precision), 'container': container}]
true
true
f716f32b4db4b79169801db929ce86099f51f34b
564
py
Python
hw4/part3/table_printer.py
jonescarissa/csc221
1052b4cf9f3aab86c063c1b3845895a590bc2083
[ "CC0-1.0" ]
null
null
null
hw4/part3/table_printer.py
jonescarissa/csc221
1052b4cf9f3aab86c063c1b3845895a590bc2083
[ "CC0-1.0" ]
null
null
null
hw4/part3/table_printer.py
jonescarissa/csc221
1052b4cf9f3aab86c063c1b3845895a590bc2083
[ "CC0-1.0" ]
1
2021-09-02T03:55:17.000Z
2021-09-02T03:55:17.000Z
''' Table Printer practice project Author: Carissa Jones ''' tableData = [['I', 'out', 'chair.'], ['just', 'of', 'Im'], ['fell', 'my', 'fine.']] def printTable(tableData): '''Given list of strings, tableData, displays in a well-organized table with each column right-justified''' colWidths = [0] * len(tableData) for x in range(len(tableData[0])): for y in range(len(colWidths)): print(tableData[y][x].rjust(colWidths[y]), end= ' ') print(end='\n') printTable(tableData)
23.5
69
0.56383
tableData = [['I', 'out', 'chair.'], ['just', 'of', 'Im'], ['fell', 'my', 'fine.']] def printTable(tableData): colWidths = [0] * len(tableData) for x in range(len(tableData[0])): for y in range(len(colWidths)): print(tableData[y][x].rjust(colWidths[y]), end= ' ') print(end='\n') printTable(tableData)
true
true
f716f342dfdde7c09452213431f486564cd316b0
20,293
py
Python
blender/arm/lightmapper/utility/encoding.py
onelsonic/armory
55cfead0844923d419d75bf4bd677ebed714b4b5
[ "Zlib" ]
2,583
2016-07-27T08:25:47.000Z
2022-03-31T10:42:17.000Z
blender/arm/lightmapper/utility/encoding.py
onelsonic/armory
55cfead0844923d419d75bf4bd677ebed714b4b5
[ "Zlib" ]
2,122
2016-07-31T14:20:04.000Z
2022-03-31T20:44:14.000Z
blender/arm/lightmapper/utility/encoding.py
onelsonic/armory
55cfead0844923d419d75bf4bd677ebed714b4b5
[ "Zlib" ]
451
2016-08-12T05:52:58.000Z
2022-03-31T01:33:07.000Z
import bpy, math, os, gpu, bgl import numpy as np from . import utility from fractions import Fraction from gpu_extras.batch import batch_for_shader def encodeLogLuvGPU(image, outDir, quality): input_image = bpy.data.images[image.name] image_name = input_image.name offscreen = gpu.types.GPUOffScreen(input_image.size[0], input_image.size[1]) image = input_image vertex_shader = ''' uniform mat4 ModelViewProjectionMatrix; in vec2 texCoord; in vec2 pos; out vec2 texCoord_interp; void main() { //gl_Position = ModelViewProjectionMatrix * vec4(pos.xy, 0.0f, 1.0f); //gl_Position.z = 1.0; gl_Position = vec4(pos.xy, 100, 100); texCoord_interp = texCoord; } ''' fragment_shader = ''' in vec2 texCoord_interp; out vec4 fragColor; uniform sampler2D image; const mat3 cLogLuvM = mat3( 0.2209, 0.3390, 0.4184, 0.1138, 0.6780, 0.7319, 0.0102, 0.1130, 0.2969 ); vec4 LinearToLogLuv( in vec4 value ) { vec3 Xp_Y_XYZp = cLogLuvM * value.rgb; Xp_Y_XYZp = max( Xp_Y_XYZp, vec3( 1e-6, 1e-6, 1e-6 ) ); vec4 vResult; vResult.xy = Xp_Y_XYZp.xy / Xp_Y_XYZp.z; float Le = 2.0 * log2(Xp_Y_XYZp.y) + 127.0; vResult.w = fract( Le ); vResult.z = ( Le - ( floor( vResult.w * 255.0 ) ) / 255.0 ) / 255.0; return vResult; //return vec4(Xp_Y_XYZp,1); } const mat3 cLogLuvInverseM = mat3( 6.0014, -2.7008, -1.7996, -1.3320, 3.1029, -5.7721, 0.3008, -1.0882, 5.6268 ); vec4 LogLuvToLinear( in vec4 value ) { float Le = value.z * 255.0 + value.w; vec3 Xp_Y_XYZp; Xp_Y_XYZp.y = exp2( ( Le - 127.0 ) / 2.0 ); Xp_Y_XYZp.z = Xp_Y_XYZp.y / value.y; Xp_Y_XYZp.x = value.x * Xp_Y_XYZp.z; vec3 vRGB = cLogLuvInverseM * Xp_Y_XYZp.rgb; //return vec4( max( vRGB, 0.0 ), 1.0 ); return vec4( max( Xp_Y_XYZp, 0.0 ), 1.0 ); } void main() { //fragColor = LinearToLogLuv(pow(texture(image, texCoord_interp), vec4(0.454))); fragColor = LinearToLogLuv(texture(image, texCoord_interp)); //fragColor = LogLuvToLinear(LinearToLogLuv(texture(image, texCoord_interp))); } ''' x_screen = 0 off_x = -100 off_y = -100 y_screen_flip = 0 sx = 200 sy = 200 vertices = ( (x_screen + off_x, y_screen_flip - off_y), (x_screen + off_x, y_screen_flip - sy - off_y), (x_screen + off_x + sx, y_screen_flip - sy - off_y), (x_screen + off_x + sx, y_screen_flip - off_x)) if input_image.colorspace_settings.name != 'Linear': input_image.colorspace_settings.name = 'Linear' # Removing .exr or .hdr prefix if image_name[-4:] == '.exr' or image_name[-4:] == '.hdr': image_name = image_name[:-4] target_image = bpy.data.images.get(image_name + '_encoded') if bpy.context.scene.TLM_SceneProperties.tlm_verbose: print(image_name + '_encoded') if not target_image: target_image = bpy.data.images.new( name = image_name + '_encoded', width = input_image.size[0], height = input_image.size[1], alpha = True, float_buffer = False ) shader = gpu.types.GPUShader(vertex_shader, fragment_shader) batch = batch_for_shader( shader, 'TRI_FAN', { "pos": vertices, "texCoord": ((0, 1), (0, 0), (1, 0), (1, 1)), }, ) if image.gl_load(): raise Exception() with offscreen.bind(): bgl.glActiveTexture(bgl.GL_TEXTURE0) bgl.glBindTexture(bgl.GL_TEXTURE_2D, image.bindcode) shader.bind() shader.uniform_int("image", 0) batch.draw(shader) buffer = bgl.Buffer(bgl.GL_BYTE, input_image.size[0] * input_image.size[1] * 4) bgl.glReadBuffer(bgl.GL_BACK) bgl.glReadPixels(0, 0, input_image.size[0], input_image.size[1], bgl.GL_RGBA, bgl.GL_UNSIGNED_BYTE, buffer) offscreen.free() target_image.pixels = [v / 255 for v in buffer] input_image = target_image #Save LogLuv if bpy.context.scene.TLM_SceneProperties.tlm_verbose: print(input_image.name) input_image.filepath_raw = outDir + "/" + input_image.name + ".png" #input_image.filepath_raw = outDir + "_encoded.png" input_image.file_format = "PNG" bpy.context.scene.render.image_settings.quality = quality #input_image.save_render(filepath = input_image.filepath_raw, scene = bpy.context.scene) input_image.save() #Todo - Find a way to save #bpy.ops.image.save_all_modified() def encodeImageRGBDGPU(image, maxRange, outDir, quality): input_image = bpy.data.images[image.name] image_name = input_image.name offscreen = gpu.types.GPUOffScreen(input_image.size[0], input_image.size[1]) image = input_image vertex_shader = ''' uniform mat4 ModelViewProjectionMatrix; in vec2 texCoord; in vec2 pos; out vec2 texCoord_interp; void main() { //gl_Position = ModelViewProjectionMatrix * vec4(pos.xy, 0.0f, 1.0f); //gl_Position.z = 1.0; gl_Position = vec4(pos.xy, 100, 100); texCoord_interp = texCoord; } ''' fragment_shader = ''' in vec2 texCoord_interp; out vec4 fragColor; uniform sampler2D image; //Code from here: https://github.com/BabylonJS/Babylon.js/blob/master/src/Shaders/ShadersInclude/helperFunctions.fx const float PI = 3.1415926535897932384626433832795; const float HALF_MIN = 5.96046448e-08; // Smallest positive half. const float LinearEncodePowerApprox = 2.2; const float GammaEncodePowerApprox = 1.0 / LinearEncodePowerApprox; const vec3 LuminanceEncodeApprox = vec3(0.2126, 0.7152, 0.0722); const float Epsilon = 0.0000001; #define saturate(x) clamp(x, 0.0, 1.0) float maxEps(float x) { return max(x, Epsilon); } float toLinearSpace(float color) { return pow(color, LinearEncodePowerApprox); } vec3 toLinearSpace(vec3 color) { return pow(color, vec3(LinearEncodePowerApprox)); } vec4 toLinearSpace(vec4 color) { return vec4(pow(color.rgb, vec3(LinearEncodePowerApprox)), color.a); } vec3 toGammaSpace(vec3 color) { return pow(color, vec3(GammaEncodePowerApprox)); } vec4 toGammaSpace(vec4 color) { return vec4(pow(color.rgb, vec3(GammaEncodePowerApprox)), color.a); } float toGammaSpace(float color) { return pow(color, GammaEncodePowerApprox); } float square(float value) { return value * value; } // Check if configurable value is needed. const float rgbdMaxRange = 255.0; vec4 toRGBD(vec3 color) { float maxRGB = maxEps(max(color.r, max(color.g, color.b))); float D = max(rgbdMaxRange / maxRGB, 1.); D = clamp(floor(D) / 255.0, 0., 1.); vec3 rgb = color.rgb * D; // Helps with png quantization. rgb = toGammaSpace(rgb); return vec4(rgb, D); } vec3 fromRGBD(vec4 rgbd) { // Helps with png quantization. rgbd.rgb = toLinearSpace(rgbd.rgb); // return rgbd.rgb * ((rgbdMaxRange / 255.0) / rgbd.a); return rgbd.rgb / rgbd.a; } void main() { fragColor = toRGBD(texture(image, texCoord_interp).rgb); } ''' x_screen = 0 off_x = -100 off_y = -100 y_screen_flip = 0 sx = 200 sy = 200 vertices = ( (x_screen + off_x, y_screen_flip - off_y), (x_screen + off_x, y_screen_flip - sy - off_y), (x_screen + off_x + sx, y_screen_flip - sy - off_y), (x_screen + off_x + sx, y_screen_flip - off_x)) if input_image.colorspace_settings.name != 'Linear': input_image.colorspace_settings.name = 'Linear' # Removing .exr or .hdr prefix if image_name[-4:] == '.exr' or image_name[-4:] == '.hdr': image_name = image_name[:-4] target_image = bpy.data.images.get(image_name + '_encoded') if bpy.context.scene.TLM_SceneProperties.tlm_verbose: print(image_name + '_encoded') if not target_image: target_image = bpy.data.images.new( name = image_name + '_encoded', width = input_image.size[0], height = input_image.size[1], alpha = True, float_buffer = False ) shader = gpu.types.GPUShader(vertex_shader, fragment_shader) batch = batch_for_shader( shader, 'TRI_FAN', { "pos": vertices, "texCoord": ((0, 1), (0, 0), (1, 0), (1, 1)), }, ) if image.gl_load(): raise Exception() with offscreen.bind(): bgl.glActiveTexture(bgl.GL_TEXTURE0) bgl.glBindTexture(bgl.GL_TEXTURE_2D, image.bindcode) shader.bind() shader.uniform_int("image", 0) batch.draw(shader) buffer = bgl.Buffer(bgl.GL_BYTE, input_image.size[0] * input_image.size[1] * 4) bgl.glReadBuffer(bgl.GL_BACK) bgl.glReadPixels(0, 0, input_image.size[0], input_image.size[1], bgl.GL_RGBA, bgl.GL_UNSIGNED_BYTE, buffer) offscreen.free() target_image.pixels = [v / 255 for v in buffer] input_image = target_image #Save LogLuv if bpy.context.scene.TLM_SceneProperties.tlm_verbose: print(input_image.name) input_image.filepath_raw = outDir + "/" + input_image.name + ".png" #input_image.filepath_raw = outDir + "_encoded.png" input_image.file_format = "PNG" bpy.context.scene.render.image_settings.quality = quality #input_image.save_render(filepath = input_image.filepath_raw, scene = bpy.context.scene) input_image.save() #Todo - Find a way to save #bpy.ops.image.save_all_modified() #TODO - FINISH THIS def encodeImageRGBMGPU(image, maxRange, outDir, quality): input_image = bpy.data.images[image.name] image_name = input_image.name offscreen = gpu.types.GPUOffScreen(input_image.size[0], input_image.size[1]) image = input_image vertex_shader = ''' uniform mat4 ModelViewProjectionMatrix; in vec2 texCoord; in vec2 pos; out vec2 texCoord_interp; void main() { //gl_Position = ModelViewProjectionMatrix * vec4(pos.xy, 0.0f, 1.0f); //gl_Position.z = 1.0; gl_Position = vec4(pos.xy, 100, 100); texCoord_interp = texCoord; } ''' fragment_shader = ''' in vec2 texCoord_interp; out vec4 fragColor; uniform sampler2D image; //Code from here: https://github.com/BabylonJS/Babylon.js/blob/master/src/Shaders/ShadersInclude/helperFunctions.fx const float PI = 3.1415926535897932384626433832795; const float HALF_MIN = 5.96046448e-08; // Smallest positive half. const float LinearEncodePowerApprox = 2.2; const float GammaEncodePowerApprox = 1.0 / LinearEncodePowerApprox; const vec3 LuminanceEncodeApprox = vec3(0.2126, 0.7152, 0.0722); const float Epsilon = 0.0000001; #define saturate(x) clamp(x, 0.0, 1.0) float maxEps(float x) { return max(x, Epsilon); } float toLinearSpace(float color) { return pow(color, LinearEncodePowerApprox); } vec3 toLinearSpace(vec3 color) { return pow(color, vec3(LinearEncodePowerApprox)); } vec4 toLinearSpace(vec4 color) { return vec4(pow(color.rgb, vec3(LinearEncodePowerApprox)), color.a); } vec3 toGammaSpace(vec3 color) { return pow(color, vec3(GammaEncodePowerApprox)); } vec4 toGammaSpace(vec4 color) { return vec4(pow(color.rgb, vec3(GammaEncodePowerApprox)), color.a); } float toGammaSpace(float color) { return pow(color, GammaEncodePowerApprox); } float square(float value) { return value * value; } // Check if configurable value is needed. const float rgbdMaxRange = 255.0; vec4 toRGBM(vec3 color) { vec4 rgbm; color *= 1.0/6.0; rgbm.a = saturate( max( max( color.r, color.g ), max( color.b, 1e-6 ) ) ); rgbm.a = clamp(floor(D) / 255.0, 0., 1.); rgbm.rgb = color / rgbm.a; return float maxRGB = maxEps(max(color.r, max(color.g, color.b))); float D = max(rgbdMaxRange / maxRGB, 1.); D = clamp(floor(D) / 255.0, 0., 1.); vec3 rgb = color.rgb * D; // Helps with png quantization. rgb = toGammaSpace(rgb); return vec4(rgb, D); } vec3 fromRGBD(vec4 rgbd) { // Helps with png quantization. rgbd.rgb = toLinearSpace(rgbd.rgb); // return rgbd.rgb * ((rgbdMaxRange / 255.0) / rgbd.a); return rgbd.rgb / rgbd.a; } void main() { fragColor = toRGBM(texture(image, texCoord_interp).rgb); } ''' x_screen = 0 off_x = -100 off_y = -100 y_screen_flip = 0 sx = 200 sy = 200 vertices = ( (x_screen + off_x, y_screen_flip - off_y), (x_screen + off_x, y_screen_flip - sy - off_y), (x_screen + off_x + sx, y_screen_flip - sy - off_y), (x_screen + off_x + sx, y_screen_flip - off_x)) if input_image.colorspace_settings.name != 'Linear': input_image.colorspace_settings.name = 'Linear' # Removing .exr or .hdr prefix if image_name[-4:] == '.exr' or image_name[-4:] == '.hdr': image_name = image_name[:-4] target_image = bpy.data.images.get(image_name + '_encoded') if bpy.context.scene.TLM_SceneProperties.tlm_verbose: print(image_name + '_encoded') if not target_image: target_image = bpy.data.images.new( name = image_name + '_encoded', width = input_image.size[0], height = input_image.size[1], alpha = True, float_buffer = False ) shader = gpu.types.GPUShader(vertex_shader, fragment_shader) batch = batch_for_shader( shader, 'TRI_FAN', { "pos": vertices, "texCoord": ((0, 1), (0, 0), (1, 0), (1, 1)), }, ) if image.gl_load(): raise Exception() with offscreen.bind(): bgl.glActiveTexture(bgl.GL_TEXTURE0) bgl.glBindTexture(bgl.GL_TEXTURE_2D, image.bindcode) shader.bind() shader.uniform_int("image", 0) batch.draw(shader) buffer = bgl.Buffer(bgl.GL_BYTE, input_image.size[0] * input_image.size[1] * 4) bgl.glReadBuffer(bgl.GL_BACK) bgl.glReadPixels(0, 0, input_image.size[0], input_image.size[1], bgl.GL_RGBA, bgl.GL_UNSIGNED_BYTE, buffer) offscreen.free() target_image.pixels = [v / 255 for v in buffer] input_image = target_image #Save LogLuv if bpy.context.scene.TLM_SceneProperties.tlm_verbose: print(input_image.name) input_image.filepath_raw = outDir + "/" + input_image.name + ".png" #input_image.filepath_raw = outDir + "_encoded.png" input_image.file_format = "PNG" bpy.context.scene.render.image_settings.quality = quality #input_image.save_render(filepath = input_image.filepath_raw, scene = bpy.context.scene) input_image.save() #Todo - Find a way to save #bpy.ops.image.save_all_modified() def encodeImageRGBMCPU(image, maxRange, outDir, quality): input_image = bpy.data.images[image.name] image_name = input_image.name if input_image.colorspace_settings.name != 'Linear': input_image.colorspace_settings.name = 'Linear' # Removing .exr or .hdr prefix if image_name[-4:] == '.exr' or image_name[-4:] == '.hdr': image_name = image_name[:-4] target_image = bpy.data.images.get(image_name + '_encoded') if bpy.context.scene.TLM_SceneProperties.tlm_verbose: print(image_name + '_encoded') if not target_image: target_image = bpy.data.images.new( name = image_name + '_encoded', width = input_image.size[0], height = input_image.size[1], alpha = True, float_buffer = False ) num_pixels = len(input_image.pixels) result_pixel = list(input_image.pixels) for i in range(0,num_pixels,4): for j in range(3): result_pixel[i+j] *= 1.0 / maxRange; result_pixel[i+3] = saturate(max(result_pixel[i], result_pixel[i+1], result_pixel[i+2], 1e-6)) result_pixel[i+3] = math.ceil(result_pixel[i+3] * 255.0) / 255.0 for j in range(3): result_pixel[i+j] /= result_pixel[i+3] target_image.pixels = result_pixel input_image = target_image #Save RGBM if bpy.context.scene.TLM_SceneProperties.tlm_verbose: print(input_image.name) input_image.filepath_raw = outDir + "/" + input_image.name + ".png" input_image.file_format = "PNG" bpy.context.scene.render.image_settings.quality = quality input_image.save() #input_image.save_render(filepath = input_image.filepath_raw, scene = bpy.context.scene) # input_image.filepath_raw = outDir + "_encoded.png" # input_image.file_format = "PNG" # bpy.context.scene.render.image_settings.quality = quality # input_image.save_render(filepath = input_image.filepath_raw, scene = bpy.context.scene) #input_image. #input_image.save() def saturate(num, floats=True): if num <= 0: num = 0 elif num > (1 if floats else 255): num = (1 if floats else 255) return num def maxEps(x): return max(x, 1e-6) def encodeImageRGBDCPU(image, maxRange, outDir, quality): input_image = bpy.data.images[image.name] image_name = input_image.name if input_image.colorspace_settings.name != 'Linear': input_image.colorspace_settings.name = 'Linear' # Removing .exr or .hdr prefix if image_name[-4:] == '.exr' or image_name[-4:] == '.hdr': image_name = image_name[:-4] target_image = bpy.data.images.get(image_name + '_encoded') if not target_image: target_image = bpy.data.images.new( name = image_name + '_encoded', width = input_image.size[0], height = input_image.size[1], alpha = True, float_buffer = False ) num_pixels = len(input_image.pixels) result_pixel = list(input_image.pixels) rgbdMaxRange = 255.0 for i in range(0,num_pixels,4): maxRGB = maxEps(max(result_pixel[i], result_pixel[i+1], result_pixel[i+2])) D = max(rgbdMaxRange/maxRGB, 1.0) D = np.clip((math.floor(D) / 255.0), 0.0, 1.0) result_pixel[i] = math.pow(result_pixel[i] * D, 1/2.2) result_pixel[i+1] = math.pow(result_pixel[i+1] * D, 1/2.2) result_pixel[i+2] = math.pow(result_pixel[i+2] * D, 1/2.2) result_pixel[i+3] = D target_image.pixels = result_pixel input_image = target_image #Save RGBD if bpy.context.scene.TLM_SceneProperties.tlm_verbose: print(input_image.name) input_image.filepath_raw = outDir + "/" + input_image.name + ".png" input_image.file_format = "PNG" bpy.context.scene.render.image_settings.quality = quality input_image.save()
31.51087
123
0.588824
import bpy, math, os, gpu, bgl import numpy as np from . import utility from fractions import Fraction from gpu_extras.batch import batch_for_shader def encodeLogLuvGPU(image, outDir, quality): input_image = bpy.data.images[image.name] image_name = input_image.name offscreen = gpu.types.GPUOffScreen(input_image.size[0], input_image.size[1]) image = input_image vertex_shader = ''' uniform mat4 ModelViewProjectionMatrix; in vec2 texCoord; in vec2 pos; out vec2 texCoord_interp; void main() { //gl_Position = ModelViewProjectionMatrix * vec4(pos.xy, 0.0f, 1.0f); //gl_Position.z = 1.0; gl_Position = vec4(pos.xy, 100, 100); texCoord_interp = texCoord; } ''' fragment_shader = ''' in vec2 texCoord_interp; out vec4 fragColor; uniform sampler2D image; const mat3 cLogLuvM = mat3( 0.2209, 0.3390, 0.4184, 0.1138, 0.6780, 0.7319, 0.0102, 0.1130, 0.2969 ); vec4 LinearToLogLuv( in vec4 value ) { vec3 Xp_Y_XYZp = cLogLuvM * value.rgb; Xp_Y_XYZp = max( Xp_Y_XYZp, vec3( 1e-6, 1e-6, 1e-6 ) ); vec4 vResult; vResult.xy = Xp_Y_XYZp.xy / Xp_Y_XYZp.z; float Le = 2.0 * log2(Xp_Y_XYZp.y) + 127.0; vResult.w = fract( Le ); vResult.z = ( Le - ( floor( vResult.w * 255.0 ) ) / 255.0 ) / 255.0; return vResult; //return vec4(Xp_Y_XYZp,1); } const mat3 cLogLuvInverseM = mat3( 6.0014, -2.7008, -1.7996, -1.3320, 3.1029, -5.7721, 0.3008, -1.0882, 5.6268 ); vec4 LogLuvToLinear( in vec4 value ) { float Le = value.z * 255.0 + value.w; vec3 Xp_Y_XYZp; Xp_Y_XYZp.y = exp2( ( Le - 127.0 ) / 2.0 ); Xp_Y_XYZp.z = Xp_Y_XYZp.y / value.y; Xp_Y_XYZp.x = value.x * Xp_Y_XYZp.z; vec3 vRGB = cLogLuvInverseM * Xp_Y_XYZp.rgb; //return vec4( max( vRGB, 0.0 ), 1.0 ); return vec4( max( Xp_Y_XYZp, 0.0 ), 1.0 ); } void main() { //fragColor = LinearToLogLuv(pow(texture(image, texCoord_interp), vec4(0.454))); fragColor = LinearToLogLuv(texture(image, texCoord_interp)); //fragColor = LogLuvToLinear(LinearToLogLuv(texture(image, texCoord_interp))); } ''' x_screen = 0 off_x = -100 off_y = -100 y_screen_flip = 0 sx = 200 sy = 200 vertices = ( (x_screen + off_x, y_screen_flip - off_y), (x_screen + off_x, y_screen_flip - sy - off_y), (x_screen + off_x + sx, y_screen_flip - sy - off_y), (x_screen + off_x + sx, y_screen_flip - off_x)) if input_image.colorspace_settings.name != 'Linear': input_image.colorspace_settings.name = 'Linear' if image_name[-4:] == '.exr' or image_name[-4:] == '.hdr': image_name = image_name[:-4] target_image = bpy.data.images.get(image_name + '_encoded') if bpy.context.scene.TLM_SceneProperties.tlm_verbose: print(image_name + '_encoded') if not target_image: target_image = bpy.data.images.new( name = image_name + '_encoded', width = input_image.size[0], height = input_image.size[1], alpha = True, float_buffer = False ) shader = gpu.types.GPUShader(vertex_shader, fragment_shader) batch = batch_for_shader( shader, 'TRI_FAN', { "pos": vertices, "texCoord": ((0, 1), (0, 0), (1, 0), (1, 1)), }, ) if image.gl_load(): raise Exception() with offscreen.bind(): bgl.glActiveTexture(bgl.GL_TEXTURE0) bgl.glBindTexture(bgl.GL_TEXTURE_2D, image.bindcode) shader.bind() shader.uniform_int("image", 0) batch.draw(shader) buffer = bgl.Buffer(bgl.GL_BYTE, input_image.size[0] * input_image.size[1] * 4) bgl.glReadBuffer(bgl.GL_BACK) bgl.glReadPixels(0, 0, input_image.size[0], input_image.size[1], bgl.GL_RGBA, bgl.GL_UNSIGNED_BYTE, buffer) offscreen.free() target_image.pixels = [v / 255 for v in buffer] input_image = target_image if bpy.context.scene.TLM_SceneProperties.tlm_verbose: print(input_image.name) input_image.filepath_raw = outDir + "/" + input_image.name + ".png" input_image.file_format = "PNG" bpy.context.scene.render.image_settings.quality = quality input_image.save() def encodeImageRGBDGPU(image, maxRange, outDir, quality): input_image = bpy.data.images[image.name] image_name = input_image.name offscreen = gpu.types.GPUOffScreen(input_image.size[0], input_image.size[1]) image = input_image vertex_shader = ''' uniform mat4 ModelViewProjectionMatrix; in vec2 texCoord; in vec2 pos; out vec2 texCoord_interp; void main() { //gl_Position = ModelViewProjectionMatrix * vec4(pos.xy, 0.0f, 1.0f); //gl_Position.z = 1.0; gl_Position = vec4(pos.xy, 100, 100); texCoord_interp = texCoord; } ''' fragment_shader = ''' in vec2 texCoord_interp; out vec4 fragColor; uniform sampler2D image; //Code from here: https://github.com/BabylonJS/Babylon.js/blob/master/src/Shaders/ShadersInclude/helperFunctions.fx const float PI = 3.1415926535897932384626433832795; const float HALF_MIN = 5.96046448e-08; // Smallest positive half. const float LinearEncodePowerApprox = 2.2; const float GammaEncodePowerApprox = 1.0 / LinearEncodePowerApprox; const vec3 LuminanceEncodeApprox = vec3(0.2126, 0.7152, 0.0722); const float Epsilon = 0.0000001; #define saturate(x) clamp(x, 0.0, 1.0) float maxEps(float x) { return max(x, Epsilon); } float toLinearSpace(float color) { return pow(color, LinearEncodePowerApprox); } vec3 toLinearSpace(vec3 color) { return pow(color, vec3(LinearEncodePowerApprox)); } vec4 toLinearSpace(vec4 color) { return vec4(pow(color.rgb, vec3(LinearEncodePowerApprox)), color.a); } vec3 toGammaSpace(vec3 color) { return pow(color, vec3(GammaEncodePowerApprox)); } vec4 toGammaSpace(vec4 color) { return vec4(pow(color.rgb, vec3(GammaEncodePowerApprox)), color.a); } float toGammaSpace(float color) { return pow(color, GammaEncodePowerApprox); } float square(float value) { return value * value; } // Check if configurable value is needed. const float rgbdMaxRange = 255.0; vec4 toRGBD(vec3 color) { float maxRGB = maxEps(max(color.r, max(color.g, color.b))); float D = max(rgbdMaxRange / maxRGB, 1.); D = clamp(floor(D) / 255.0, 0., 1.); vec3 rgb = color.rgb * D; // Helps with png quantization. rgb = toGammaSpace(rgb); return vec4(rgb, D); } vec3 fromRGBD(vec4 rgbd) { // Helps with png quantization. rgbd.rgb = toLinearSpace(rgbd.rgb); // return rgbd.rgb * ((rgbdMaxRange / 255.0) / rgbd.a); return rgbd.rgb / rgbd.a; } void main() { fragColor = toRGBD(texture(image, texCoord_interp).rgb); } ''' x_screen = 0 off_x = -100 off_y = -100 y_screen_flip = 0 sx = 200 sy = 200 vertices = ( (x_screen + off_x, y_screen_flip - off_y), (x_screen + off_x, y_screen_flip - sy - off_y), (x_screen + off_x + sx, y_screen_flip - sy - off_y), (x_screen + off_x + sx, y_screen_flip - off_x)) if input_image.colorspace_settings.name != 'Linear': input_image.colorspace_settings.name = 'Linear' if image_name[-4:] == '.exr' or image_name[-4:] == '.hdr': image_name = image_name[:-4] target_image = bpy.data.images.get(image_name + '_encoded') if bpy.context.scene.TLM_SceneProperties.tlm_verbose: print(image_name + '_encoded') if not target_image: target_image = bpy.data.images.new( name = image_name + '_encoded', width = input_image.size[0], height = input_image.size[1], alpha = True, float_buffer = False ) shader = gpu.types.GPUShader(vertex_shader, fragment_shader) batch = batch_for_shader( shader, 'TRI_FAN', { "pos": vertices, "texCoord": ((0, 1), (0, 0), (1, 0), (1, 1)), }, ) if image.gl_load(): raise Exception() with offscreen.bind(): bgl.glActiveTexture(bgl.GL_TEXTURE0) bgl.glBindTexture(bgl.GL_TEXTURE_2D, image.bindcode) shader.bind() shader.uniform_int("image", 0) batch.draw(shader) buffer = bgl.Buffer(bgl.GL_BYTE, input_image.size[0] * input_image.size[1] * 4) bgl.glReadBuffer(bgl.GL_BACK) bgl.glReadPixels(0, 0, input_image.size[0], input_image.size[1], bgl.GL_RGBA, bgl.GL_UNSIGNED_BYTE, buffer) offscreen.free() target_image.pixels = [v / 255 for v in buffer] input_image = target_image if bpy.context.scene.TLM_SceneProperties.tlm_verbose: print(input_image.name) input_image.filepath_raw = outDir + "/" + input_image.name + ".png" input_image.file_format = "PNG" bpy.context.scene.render.image_settings.quality = quality input_image.save() def encodeImageRGBMGPU(image, maxRange, outDir, quality): input_image = bpy.data.images[image.name] image_name = input_image.name offscreen = gpu.types.GPUOffScreen(input_image.size[0], input_image.size[1]) image = input_image vertex_shader = ''' uniform mat4 ModelViewProjectionMatrix; in vec2 texCoord; in vec2 pos; out vec2 texCoord_interp; void main() { //gl_Position = ModelViewProjectionMatrix * vec4(pos.xy, 0.0f, 1.0f); //gl_Position.z = 1.0; gl_Position = vec4(pos.xy, 100, 100); texCoord_interp = texCoord; } ''' fragment_shader = ''' in vec2 texCoord_interp; out vec4 fragColor; uniform sampler2D image; //Code from here: https://github.com/BabylonJS/Babylon.js/blob/master/src/Shaders/ShadersInclude/helperFunctions.fx const float PI = 3.1415926535897932384626433832795; const float HALF_MIN = 5.96046448e-08; // Smallest positive half. const float LinearEncodePowerApprox = 2.2; const float GammaEncodePowerApprox = 1.0 / LinearEncodePowerApprox; const vec3 LuminanceEncodeApprox = vec3(0.2126, 0.7152, 0.0722); const float Epsilon = 0.0000001; #define saturate(x) clamp(x, 0.0, 1.0) float maxEps(float x) { return max(x, Epsilon); } float toLinearSpace(float color) { return pow(color, LinearEncodePowerApprox); } vec3 toLinearSpace(vec3 color) { return pow(color, vec3(LinearEncodePowerApprox)); } vec4 toLinearSpace(vec4 color) { return vec4(pow(color.rgb, vec3(LinearEncodePowerApprox)), color.a); } vec3 toGammaSpace(vec3 color) { return pow(color, vec3(GammaEncodePowerApprox)); } vec4 toGammaSpace(vec4 color) { return vec4(pow(color.rgb, vec3(GammaEncodePowerApprox)), color.a); } float toGammaSpace(float color) { return pow(color, GammaEncodePowerApprox); } float square(float value) { return value * value; } // Check if configurable value is needed. const float rgbdMaxRange = 255.0; vec4 toRGBM(vec3 color) { vec4 rgbm; color *= 1.0/6.0; rgbm.a = saturate( max( max( color.r, color.g ), max( color.b, 1e-6 ) ) ); rgbm.a = clamp(floor(D) / 255.0, 0., 1.); rgbm.rgb = color / rgbm.a; return float maxRGB = maxEps(max(color.r, max(color.g, color.b))); float D = max(rgbdMaxRange / maxRGB, 1.); D = clamp(floor(D) / 255.0, 0., 1.); vec3 rgb = color.rgb * D; // Helps with png quantization. rgb = toGammaSpace(rgb); return vec4(rgb, D); } vec3 fromRGBD(vec4 rgbd) { // Helps with png quantization. rgbd.rgb = toLinearSpace(rgbd.rgb); // return rgbd.rgb * ((rgbdMaxRange / 255.0) / rgbd.a); return rgbd.rgb / rgbd.a; } void main() { fragColor = toRGBM(texture(image, texCoord_interp).rgb); } ''' x_screen = 0 off_x = -100 off_y = -100 y_screen_flip = 0 sx = 200 sy = 200 vertices = ( (x_screen + off_x, y_screen_flip - off_y), (x_screen + off_x, y_screen_flip - sy - off_y), (x_screen + off_x + sx, y_screen_flip - sy - off_y), (x_screen + off_x + sx, y_screen_flip - off_x)) if input_image.colorspace_settings.name != 'Linear': input_image.colorspace_settings.name = 'Linear' if image_name[-4:] == '.exr' or image_name[-4:] == '.hdr': image_name = image_name[:-4] target_image = bpy.data.images.get(image_name + '_encoded') if bpy.context.scene.TLM_SceneProperties.tlm_verbose: print(image_name + '_encoded') if not target_image: target_image = bpy.data.images.new( name = image_name + '_encoded', width = input_image.size[0], height = input_image.size[1], alpha = True, float_buffer = False ) shader = gpu.types.GPUShader(vertex_shader, fragment_shader) batch = batch_for_shader( shader, 'TRI_FAN', { "pos": vertices, "texCoord": ((0, 1), (0, 0), (1, 0), (1, 1)), }, ) if image.gl_load(): raise Exception() with offscreen.bind(): bgl.glActiveTexture(bgl.GL_TEXTURE0) bgl.glBindTexture(bgl.GL_TEXTURE_2D, image.bindcode) shader.bind() shader.uniform_int("image", 0) batch.draw(shader) buffer = bgl.Buffer(bgl.GL_BYTE, input_image.size[0] * input_image.size[1] * 4) bgl.glReadBuffer(bgl.GL_BACK) bgl.glReadPixels(0, 0, input_image.size[0], input_image.size[1], bgl.GL_RGBA, bgl.GL_UNSIGNED_BYTE, buffer) offscreen.free() target_image.pixels = [v / 255 for v in buffer] input_image = target_image if bpy.context.scene.TLM_SceneProperties.tlm_verbose: print(input_image.name) input_image.filepath_raw = outDir + "/" + input_image.name + ".png" input_image.file_format = "PNG" bpy.context.scene.render.image_settings.quality = quality input_image.save() def encodeImageRGBMCPU(image, maxRange, outDir, quality): input_image = bpy.data.images[image.name] image_name = input_image.name if input_image.colorspace_settings.name != 'Linear': input_image.colorspace_settings.name = 'Linear' if image_name[-4:] == '.exr' or image_name[-4:] == '.hdr': image_name = image_name[:-4] target_image = bpy.data.images.get(image_name + '_encoded') if bpy.context.scene.TLM_SceneProperties.tlm_verbose: print(image_name + '_encoded') if not target_image: target_image = bpy.data.images.new( name = image_name + '_encoded', width = input_image.size[0], height = input_image.size[1], alpha = True, float_buffer = False ) num_pixels = len(input_image.pixels) result_pixel = list(input_image.pixels) for i in range(0,num_pixels,4): for j in range(3): result_pixel[i+j] *= 1.0 / maxRange; result_pixel[i+3] = saturate(max(result_pixel[i], result_pixel[i+1], result_pixel[i+2], 1e-6)) result_pixel[i+3] = math.ceil(result_pixel[i+3] * 255.0) / 255.0 for j in range(3): result_pixel[i+j] /= result_pixel[i+3] target_image.pixels = result_pixel input_image = target_image if bpy.context.scene.TLM_SceneProperties.tlm_verbose: print(input_image.name) input_image.filepath_raw = outDir + "/" + input_image.name + ".png" input_image.file_format = "PNG" bpy.context.scene.render.image_settings.quality = quality input_image.save() def saturate(num, floats=True): if num <= 0: num = 0 elif num > (1 if floats else 255): num = (1 if floats else 255) return num def maxEps(x): return max(x, 1e-6) def encodeImageRGBDCPU(image, maxRange, outDir, quality): input_image = bpy.data.images[image.name] image_name = input_image.name if input_image.colorspace_settings.name != 'Linear': input_image.colorspace_settings.name = 'Linear' if image_name[-4:] == '.exr' or image_name[-4:] == '.hdr': image_name = image_name[:-4] target_image = bpy.data.images.get(image_name + '_encoded') if not target_image: target_image = bpy.data.images.new( name = image_name + '_encoded', width = input_image.size[0], height = input_image.size[1], alpha = True, float_buffer = False ) num_pixels = len(input_image.pixels) result_pixel = list(input_image.pixels) rgbdMaxRange = 255.0 for i in range(0,num_pixels,4): maxRGB = maxEps(max(result_pixel[i], result_pixel[i+1], result_pixel[i+2])) D = max(rgbdMaxRange/maxRGB, 1.0) D = np.clip((math.floor(D) / 255.0), 0.0, 1.0) result_pixel[i] = math.pow(result_pixel[i] * D, 1/2.2) result_pixel[i+1] = math.pow(result_pixel[i+1] * D, 1/2.2) result_pixel[i+2] = math.pow(result_pixel[i+2] * D, 1/2.2) result_pixel[i+3] = D target_image.pixels = result_pixel input_image = target_image if bpy.context.scene.TLM_SceneProperties.tlm_verbose: print(input_image.name) input_image.filepath_raw = outDir + "/" + input_image.name + ".png" input_image.file_format = "PNG" bpy.context.scene.render.image_settings.quality = quality input_image.save()
true
true
f716f3a29096b6b3d1684fa66a9f8119736e670b
468
py
Python
contentcuration/contentcuration/migrations/0015_auto_20160914_1640.py
Tlazypanda/studio
cd1c2f169c705027cdd808cbbcae907d0a9b21d2
[ "MIT" ]
1
2019-03-30T18:14:25.000Z
2019-03-30T18:14:25.000Z
contentcuration/contentcuration/migrations/0015_auto_20160914_1640.py
Tlazypanda/studio
cd1c2f169c705027cdd808cbbcae907d0a9b21d2
[ "MIT" ]
4
2016-05-06T17:19:30.000Z
2019-03-15T01:51:24.000Z
contentcuration/contentcuration/migrations/0015_auto_20160914_1640.py
Tlazypanda/studio
cd1c2f169c705027cdd808cbbcae907d0a9b21d2
[ "MIT" ]
4
2016-10-18T22:49:08.000Z
2019-09-17T11:20:51.000Z
# -*- coding: utf-8 -*- # Generated by Django 1.9.7 on 2016-09-14 23:40 from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('contentcuration', '0014_channel_language'), ] operations = [ migrations.AlterField( model_name='channel', name='thumbnail', field=models.TextField(blank=True, null=True), ), ]
22.285714
58
0.623932
from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('contentcuration', '0014_channel_language'), ] operations = [ migrations.AlterField( model_name='channel', name='thumbnail', field=models.TextField(blank=True, null=True), ), ]
true
true
f716f3b18a7254d8f31a8cfae36f39274adaf28e
815
py
Python
src/code/sort_clique_data_by_size.py
Buddyboy201/clique_analysis
9e7fd9acb2aba8cf3ced0b0ddb3d11ebc74734fb
[ "MIT" ]
null
null
null
src/code/sort_clique_data_by_size.py
Buddyboy201/clique_analysis
9e7fd9acb2aba8cf3ced0b0ddb3d11ebc74734fb
[ "MIT" ]
null
null
null
src/code/sort_clique_data_by_size.py
Buddyboy201/clique_analysis
9e7fd9acb2aba8cf3ced0b0ddb3d11ebc74734fb
[ "MIT" ]
null
null
null
import json #top 50 all cliques in category #top 50 size n till n=2 in category def update_ref(data, ref): for clique_str, count in data: clique = eval(clique_str) if ref.get(len(clique)) is None: ref[len(clique)] = [] ref[len(clique)].append((clique, count)) return ref with open("clique_data.json", "r") as data_file: data = json.load(data_file) size_ref = {"total":{}, "water":{}, "interface":{}, "hydrophobic":{}} size_ref["total"] = update_ref(data["total"], {}) size_ref["water"] = update_ref(data["water"], {}) size_ref["interface"] = update_ref(data["interface"], {}) size_ref["hydrophobic"] = update_ref(data["hydrophobic"], {}) with open("size_sorted_clique_data.json", "w") as dump_file: json.dump(size_ref, dump_file)
32.6
73
0.628221
import json def update_ref(data, ref): for clique_str, count in data: clique = eval(clique_str) if ref.get(len(clique)) is None: ref[len(clique)] = [] ref[len(clique)].append((clique, count)) return ref with open("clique_data.json", "r") as data_file: data = json.load(data_file) size_ref = {"total":{}, "water":{}, "interface":{}, "hydrophobic":{}} size_ref["total"] = update_ref(data["total"], {}) size_ref["water"] = update_ref(data["water"], {}) size_ref["interface"] = update_ref(data["interface"], {}) size_ref["hydrophobic"] = update_ref(data["hydrophobic"], {}) with open("size_sorted_clique_data.json", "w") as dump_file: json.dump(size_ref, dump_file)
true
true
f716f3fadbdd20212933b188540902a9450b91e8
6,260
py
Python
pimsviewer/dimension.py
soft-matter/pimsviewer
9263ece121a58a0504c6e4d319ec6e18d1bb460a
[ "BSD-3-Clause" ]
9
2018-06-26T06:49:34.000Z
2022-03-01T19:54:56.000Z
pimsviewer/dimension.py
soft-matter/pimsviewer
9263ece121a58a0504c6e4d319ec6e18d1bb460a
[ "BSD-3-Clause" ]
14
2017-03-02T17:34:08.000Z
2020-06-23T15:09:23.000Z
pimsviewer/dimension.py
soft-matter/pimsviewer
9263ece121a58a0504c6e4d319ec6e18d1bb460a
[ "BSD-3-Clause" ]
6
2017-03-02T18:36:20.000Z
2020-11-22T23:27:14.000Z
import os import numpy as np from PyQt5 import uic from PyQt5.QtCore import QDir, Qt, QTimer, pyqtSignal from PyQt5.QtGui import QImage, QPainter, QPalette, QPixmap from PyQt5.QtWidgets import (QHBoxLayout, QSlider, QWidget, QAction, QApplication, QFileDialog, QLabel, QMainWindow, QMenu, QMessageBox, QScrollArea, QSizePolicy, QStatusBar, QVBoxLayout, QDockWidget, QPushButton, QStyle, QLineEdit, QCheckBox, QInputDialog) class Dimension(QWidget): _playing = False _size = 0 _position = 0 _mergeable = False _merge = False _playable = False _fps = 5.0 _max_playback_fps = 5.0 play_event = pyqtSignal(QWidget) def __init__(self, name, size=0): super(Dimension, self).__init__() self.name = name self._size = size dirname = os.path.dirname(os.path.realpath(__file__)) uic.loadUi(os.path.join(dirname, 'dimension.ui'), self) self.playButton.setIcon(self.style().standardIcon(QStyle.SP_MediaPlay)) self.playButton.clicked.connect(self.click_event) self.playTimer = QTimer() self.playTimer.timeout.connect(self.play_tick) self.posButton.pressed.connect(self.update_position_from_btn) self.slider.setMaximum(self.size-1) self.slider.valueChanged.connect(self.update_position_from_slider) self.mergeButton.clicked.connect(self.update_merge) if not self.mergeable: self.mergeButton.hide() self._merge = self.mergeButton.isChecked() self.fps = self._fps self.fpsButton.pressed.connect(self.fps_changed) self.hide() def merge_image_over_dimension(self, image): # problem here: could be two axes with same size # TODO: think of a clever fix for this try: ix = image.shape.index(self._size) except ValueError: return image if self.name != 'c': # I don't know what to do, sum over axis image = np.sum(image, axis=ix) return image def enable(self): if not self.playable: return self.setEnabled(True) self.playButton.setEnabled(True) self.posButton.setEnabled(True) self.slider.setEnabled(True) self.fpsButton.setEnabled(True) if self.mergeable: self.mergeButton.setEnabled(True) self.mergeButton.show() self.show() def disable(self): self.setEnabled(False) self.playButton.setEnabled(False) self.posButton.setEnabled(False) self.slider.setEnabled(False) self.fpsButton.setEnabled(False) self.mergeButton.setEnabled(False) def fps_changed(self): fps, ok = QInputDialog.getDouble(self, "Playback framerate", "New playback framerate", self.fps) if ok: self.fps = fps def click_event(self): if not self.playable: return if not self.playing: self.playing = True else: self.playing = False def play_tick(self): if not self.playing: return if self._fps > self._max_playback_fps: self.position += int(round(self._fps / self._max_playback_fps)) else: self.position += 1 @property def size(self): return self._size @size.setter def size(self, size): self._size = size self.position = 0 self.playing = False self.slider.setMinimum(0) self.slider.setMaximum(self.size-1) @property def fps(self): return self._fps @fps.setter def fps(self, fps): fps = float(fps) self._fps = fps play_fps = fps if fps < self._max_playback_fps else self._max_playback_fps self.playTimer.setInterval(int(round(1000.0 / play_fps))) self.fpsButton.setText('%d fps' % self.fps) @property def playable(self): return self._playable @playable.setter def playable(self, playable): self._playable = bool(playable) @property def playing(self): return self._playing @playing.setter def playing(self, playing): self._playing = bool(playing) if self._playing: self.playTimer.start() else: self.playTimer.stop() @property def position(self): return self._position def update_position_from_slider(self): position = self.slider.value() if position >= 0: self.position = position def update_position_from_btn(self): position, ok = QInputDialog.getInt(self, "'%s' position" % self.name, "New '%s' position (0-%d)" % (self.name, self.size-1), self.position, 0, self.size-1) if ok: self.position = position @position.setter def position(self, position): old_position = self.position while position < 0: position += self.size if position < self.size: self._position = position else: self._position = position - self.size self.slider.setValue(self.position) self.posButton.setText('%s=%d' % (self.name, self.position)) if old_position != self.position: self.play_event.emit(self) def update_merge(self): self.merge = self.mergeButton.isChecked() @property def merge(self): return self._merge @merge.setter def merge(self, merge): if not self.mergeable: merge = False if merge != self._merge: self._merge = bool(merge) self.mergeButton.setChecked(self._merge) self.play_event.emit(self) @property def mergeable(self): return self._mergeable @mergeable.setter def mergeable(self, mergeable): self._mergeable = bool(mergeable) if not mergeable: self.merge = False def __len__(self): return self.size def __str__(self): classname = self.__class__.__name__ playing = "playing" if self.playing else "not playing" return "<%s %s of length %d (%s)>" % (classname, self.name, self.size, playing) def __repr__(self): return self.__str__()
27.099567
257
0.616613
import os import numpy as np from PyQt5 import uic from PyQt5.QtCore import QDir, Qt, QTimer, pyqtSignal from PyQt5.QtGui import QImage, QPainter, QPalette, QPixmap from PyQt5.QtWidgets import (QHBoxLayout, QSlider, QWidget, QAction, QApplication, QFileDialog, QLabel, QMainWindow, QMenu, QMessageBox, QScrollArea, QSizePolicy, QStatusBar, QVBoxLayout, QDockWidget, QPushButton, QStyle, QLineEdit, QCheckBox, QInputDialog) class Dimension(QWidget): _playing = False _size = 0 _position = 0 _mergeable = False _merge = False _playable = False _fps = 5.0 _max_playback_fps = 5.0 play_event = pyqtSignal(QWidget) def __init__(self, name, size=0): super(Dimension, self).__init__() self.name = name self._size = size dirname = os.path.dirname(os.path.realpath(__file__)) uic.loadUi(os.path.join(dirname, 'dimension.ui'), self) self.playButton.setIcon(self.style().standardIcon(QStyle.SP_MediaPlay)) self.playButton.clicked.connect(self.click_event) self.playTimer = QTimer() self.playTimer.timeout.connect(self.play_tick) self.posButton.pressed.connect(self.update_position_from_btn) self.slider.setMaximum(self.size-1) self.slider.valueChanged.connect(self.update_position_from_slider) self.mergeButton.clicked.connect(self.update_merge) if not self.mergeable: self.mergeButton.hide() self._merge = self.mergeButton.isChecked() self.fps = self._fps self.fpsButton.pressed.connect(self.fps_changed) self.hide() def merge_image_over_dimension(self, image): try: ix = image.shape.index(self._size) except ValueError: return image if self.name != 'c': image = np.sum(image, axis=ix) return image def enable(self): if not self.playable: return self.setEnabled(True) self.playButton.setEnabled(True) self.posButton.setEnabled(True) self.slider.setEnabled(True) self.fpsButton.setEnabled(True) if self.mergeable: self.mergeButton.setEnabled(True) self.mergeButton.show() self.show() def disable(self): self.setEnabled(False) self.playButton.setEnabled(False) self.posButton.setEnabled(False) self.slider.setEnabled(False) self.fpsButton.setEnabled(False) self.mergeButton.setEnabled(False) def fps_changed(self): fps, ok = QInputDialog.getDouble(self, "Playback framerate", "New playback framerate", self.fps) if ok: self.fps = fps def click_event(self): if not self.playable: return if not self.playing: self.playing = True else: self.playing = False def play_tick(self): if not self.playing: return if self._fps > self._max_playback_fps: self.position += int(round(self._fps / self._max_playback_fps)) else: self.position += 1 @property def size(self): return self._size @size.setter def size(self, size): self._size = size self.position = 0 self.playing = False self.slider.setMinimum(0) self.slider.setMaximum(self.size-1) @property def fps(self): return self._fps @fps.setter def fps(self, fps): fps = float(fps) self._fps = fps play_fps = fps if fps < self._max_playback_fps else self._max_playback_fps self.playTimer.setInterval(int(round(1000.0 / play_fps))) self.fpsButton.setText('%d fps' % self.fps) @property def playable(self): return self._playable @playable.setter def playable(self, playable): self._playable = bool(playable) @property def playing(self): return self._playing @playing.setter def playing(self, playing): self._playing = bool(playing) if self._playing: self.playTimer.start() else: self.playTimer.stop() @property def position(self): return self._position def update_position_from_slider(self): position = self.slider.value() if position >= 0: self.position = position def update_position_from_btn(self): position, ok = QInputDialog.getInt(self, "'%s' position" % self.name, "New '%s' position (0-%d)" % (self.name, self.size-1), self.position, 0, self.size-1) if ok: self.position = position @position.setter def position(self, position): old_position = self.position while position < 0: position += self.size if position < self.size: self._position = position else: self._position = position - self.size self.slider.setValue(self.position) self.posButton.setText('%s=%d' % (self.name, self.position)) if old_position != self.position: self.play_event.emit(self) def update_merge(self): self.merge = self.mergeButton.isChecked() @property def merge(self): return self._merge @merge.setter def merge(self, merge): if not self.mergeable: merge = False if merge != self._merge: self._merge = bool(merge) self.mergeButton.setChecked(self._merge) self.play_event.emit(self) @property def mergeable(self): return self._mergeable @mergeable.setter def mergeable(self, mergeable): self._mergeable = bool(mergeable) if not mergeable: self.merge = False def __len__(self): return self.size def __str__(self): classname = self.__class__.__name__ playing = "playing" if self.playing else "not playing" return "<%s %s of length %d (%s)>" % (classname, self.name, self.size, playing) def __repr__(self): return self.__str__()
true
true
f716f40ff8616b758469bb436739279671f60ee1
327
py
Python
deform_conv/utils.py
lone17/deform-conv
3502cedbeae61c961d7e988382c55b9d45fd1873
[ "MIT" ]
221
2017-03-30T12:31:02.000Z
2022-03-24T08:39:26.000Z
deform_conv/utils.py
ml-lab/deform-conv
126ebcc283a4325c474332fa170f57d52a59e34d
[ "MIT" ]
1
2019-03-09T11:01:39.000Z
2019-03-09T11:01:39.000Z
deform_conv/utils.py
ml-lab/deform-conv
126ebcc283a4325c474332fa170f57d52a59e34d
[ "MIT" ]
78
2017-03-30T21:46:59.000Z
2022-03-19T19:52:19.000Z
from __future__ import absolute_import, division from tensorflow.python import debug as tf_debug import keras.backend as K def keras_set_tf_debug(): sess = K.get_session() sess = tf_debug.LocalCLIDebugWrapperSession(sess) sess.add_tensor_filter("has_inf_or_nan", tf_debug.has_inf_or_nan) K.set_session(sess)
27.25
69
0.788991
from __future__ import absolute_import, division from tensorflow.python import debug as tf_debug import keras.backend as K def keras_set_tf_debug(): sess = K.get_session() sess = tf_debug.LocalCLIDebugWrapperSession(sess) sess.add_tensor_filter("has_inf_or_nan", tf_debug.has_inf_or_nan) K.set_session(sess)
true
true
f716f5a922081969766e4529bf89d92758cc2879
11,057
py
Python
runway/aws_sso_botocore/credentials.py
avosper-intellaegis/runway
757d4e7db269ec16479b044ac82a69f25fa2a450
[ "Apache-2.0" ]
null
null
null
runway/aws_sso_botocore/credentials.py
avosper-intellaegis/runway
757d4e7db269ec16479b044ac82a69f25fa2a450
[ "Apache-2.0" ]
null
null
null
runway/aws_sso_botocore/credentials.py
avosper-intellaegis/runway
757d4e7db269ec16479b044ac82a69f25fa2a450
[ "Apache-2.0" ]
null
null
null
"""Botocore with support for AWS SSO credential assets.""" # Copyright 2012-2014 Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). You # may not use this file except in compliance with the License. A copy of # the License is located at # # http://aws.amazon.com/apache2.0/ # # or in the "license" file accompanying this file. This file is # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. import datetime import json import logging import os from hashlib import sha1 from botocore import UNSIGNED from botocore.config import Config from botocore.credentials import ( AssumeRoleProvider, BotoProvider, CachedCredentialFetcher, CanonicalNameCredentialSourcer, ContainerProvider, CredentialProvider, CredentialResolver, DeferredRefreshableCredentials, EnvProvider, InstanceMetadataFetcher, InstanceMetadataProvider, JSONFileCache, OriginalEC2Provider, ) from botocore.credentials import ( ProfileProviderBuilder as BotocoreProfileProviderBuilder, ) from botocore.credentials import _get_client_creator, _serialize_if_needed from botocore.exceptions import InvalidConfigError from dateutil.tz import tzutc from .exceptions import UnauthorizedSSOTokenError from .util import SSOTokenLoader LOGGER = logging.getLogger(__name__) def create_credential_resolver(session, cache=None, region_name=None): """Create a default credential resolver. This creates a pre-configured credential resolver that includes the default lookup chain for credentials. """ profile_name = session.get_config_variable("profile") or "default" metadata_timeout = session.get_config_variable("metadata_service_timeout") num_attempts = session.get_config_variable("metadata_service_num_attempts") disable_env_vars = session.instance_variables().get("profile") is not None if cache is None: cache = {} env_provider = EnvProvider() container_provider = ContainerProvider() instance_metadata_provider = InstanceMetadataProvider( iam_role_fetcher=InstanceMetadataFetcher( timeout=metadata_timeout, num_attempts=num_attempts, user_agent=session.user_agent(), ) ) profile_provider_builder = ProfileProviderBuilder( session, cache=cache, region_name=region_name ) assume_role_provider = AssumeRoleProvider( load_config=lambda: session.full_config, client_creator=_get_client_creator(session, region_name), cache=cache, profile_name=profile_name, credential_sourcer=CanonicalNameCredentialSourcer( [env_provider, container_provider, instance_metadata_provider] ), profile_provider_builder=profile_provider_builder, ) pre_profile = [ env_provider, assume_role_provider, ] profile_providers = profile_provider_builder.providers( profile_name=profile_name, disable_env_vars=disable_env_vars, ) post_profile = [ OriginalEC2Provider(), BotoProvider(), container_provider, instance_metadata_provider, ] providers = pre_profile + profile_providers + post_profile if disable_env_vars: # An explicitly provided profile will negate an EnvProvider. # We will defer to providers that understand the "profile" # concept to retrieve credentials. # The one edge case if is all three values are provided via # env vars: # export AWS_ACCESS_KEY_ID=foo # export AWS_SECRET_ACCESS_KEY=bar # export AWS_PROFILE=baz # Then, just like our client() calls, the explicit credentials # will take precedence. # # This precedence is enforced by leaving the EnvProvider in the chain. # This means that the only way a "profile" would win is if the # EnvProvider does not return credentials, which is what we want # in this scenario. providers.remove(env_provider) LOGGER.debug( "Skipping environment variable credential check" " because profile name was explicitly set." ) return CredentialResolver(providers=providers) class ProfileProviderBuilder(BotocoreProfileProviderBuilder): """Extends the botocore profile provider builder to support AWS SSO.""" def __init__(self, session, cache=None, region_name=None, sso_token_cache=None): """Instantiate class.""" super().__init__(session, cache, region_name) self._sso_token_cache = sso_token_cache def providers(self, profile_name, disable_env_vars=False): """Return list of providers.""" return [ self._create_web_identity_provider(profile_name, disable_env_vars,), self._create_sso_provider(profile_name), self._create_shared_credential_provider(profile_name), self._create_process_provider(profile_name), self._create_config_provider(profile_name), ] def _create_sso_provider(self, profile_name): """AWS SSO credential provider.""" return SSOProvider( load_config=lambda: self._session.full_config, client_creator=self._session.create_client, profile_name=profile_name, cache=self._cache, token_cache=self._sso_token_cache, ) class SSOCredentialFetcher(CachedCredentialFetcher): """AWS SSO credential fetcher.""" def __init__( self, start_url, sso_region, role_name, account_id, client_creator, token_loader=None, cache=None, expiry_window_seconds=None, ): """Instantiate class.""" self._client_creator = client_creator self._sso_region = sso_region self._role_name = role_name self._account_id = account_id self._start_url = start_url self._token_loader = token_loader super().__init__(cache, expiry_window_seconds) def _create_cache_key(self): """Create a predictable cache key for the current configuration. The cache key is intended to be compatible with file names. """ args = { "startUrl": self._start_url, "roleName": self._role_name, "accountId": self._account_id, } # NOTE: It would be good to hoist this cache key construction logic # into the CachedCredentialFetcher class as we should be consistent. # Unfortunately, the current assume role fetchers that sub class don't # pass separators resulting in non-minified JSON. In the long term, # all fetchers should use the below caching scheme. args = json.dumps(args, sort_keys=True, separators=(",", ":")) argument_hash = sha1(args.encode("utf-8")).hexdigest() return self._make_file_safe(argument_hash) def _parse_timestamp(self, timestamp_ms): # pylint: disable=no-self-use """Parse timestamp.""" # fromtimestamp expects seconds so: milliseconds / 1000 = seconds timestamp_seconds = timestamp_ms / 1000.0 timestamp = datetime.datetime.fromtimestamp(timestamp_seconds, tzutc()) return _serialize_if_needed(timestamp) def _get_credentials(self): """Get credentials by calling SSO get role credentials.""" config = Config(signature_version=UNSIGNED, region_name=self._sso_region,) client = self._client_creator("sso", config=config) kwargs = { "roleName": self._role_name, "accountId": self._account_id, "accessToken": self._token_loader(self._start_url), } try: response = client.get_role_credentials(**kwargs) except client.exceptions.UnauthorizedException as exc: raise UnauthorizedSSOTokenError() from exc credentials = response["roleCredentials"] credentials = { "ProviderType": "sso", "Credentials": { "AccessKeyId": credentials["accessKeyId"], "SecretAccessKey": credentials["secretAccessKey"], "SessionToken": credentials["sessionToken"], "Expiration": self._parse_timestamp(credentials["expiration"]), }, } return credentials class SSOProvider(CredentialProvider): """AWS SSO credential provider.""" METHOD = "sso" _SSO_TOKEN_CACHE_DIR = os.path.expanduser(os.path.join("~", ".aws", "sso", "cache")) _SSO_CONFIG_VARS = [ "sso_start_url", "sso_region", "sso_role_name", "sso_account_id", ] # pylint: disable=super-init-not-called def __init__( self, load_config, client_creator, profile_name, cache=None, token_cache=None ): """Instantiate class.""" if token_cache is None: token_cache = JSONFileCache(self._SSO_TOKEN_CACHE_DIR) self._token_cache = token_cache if cache is None: cache = {} self.cache = cache self._load_config = load_config self._client_creator = client_creator self._profile_name = profile_name def _load_sso_config(self): """Load sso config.""" loaded_config = self._load_config() profiles = loaded_config.get("profiles", {}) profile_name = self._profile_name profile_config = profiles.get(self._profile_name, {}) if all(c not in profile_config for c in self._SSO_CONFIG_VARS): return None config = {} missing_config_vars = [] for config_var in self._SSO_CONFIG_VARS: if config_var in profile_config: config[config_var] = profile_config[config_var] else: missing_config_vars.append(config_var) if missing_config_vars: missing = ", ".join(missing_config_vars) raise InvalidConfigError( error_msg=( 'The profile "%s" is configured to use SSO but is missing ' "required configuration: %s" % (profile_name, missing) ) ) return config def load(self): """Load AWS SSO credentials.""" sso_config = self._load_sso_config() if not sso_config: return None sso_fetcher = SSOCredentialFetcher( sso_config["sso_start_url"], sso_config["sso_region"], sso_config["sso_role_name"], sso_config["sso_account_id"], self._client_creator, token_loader=SSOTokenLoader(cache=self._token_cache), cache=self.cache, ) return DeferredRefreshableCredentials( method=self.METHOD, refresh_using=sso_fetcher.fetch_credentials, )
35.213376
88
0.665823
import datetime import json import logging import os from hashlib import sha1 from botocore import UNSIGNED from botocore.config import Config from botocore.credentials import ( AssumeRoleProvider, BotoProvider, CachedCredentialFetcher, CanonicalNameCredentialSourcer, ContainerProvider, CredentialProvider, CredentialResolver, DeferredRefreshableCredentials, EnvProvider, InstanceMetadataFetcher, InstanceMetadataProvider, JSONFileCache, OriginalEC2Provider, ) from botocore.credentials import ( ProfileProviderBuilder as BotocoreProfileProviderBuilder, ) from botocore.credentials import _get_client_creator, _serialize_if_needed from botocore.exceptions import InvalidConfigError from dateutil.tz import tzutc from .exceptions import UnauthorizedSSOTokenError from .util import SSOTokenLoader LOGGER = logging.getLogger(__name__) def create_credential_resolver(session, cache=None, region_name=None): profile_name = session.get_config_variable("profile") or "default" metadata_timeout = session.get_config_variable("metadata_service_timeout") num_attempts = session.get_config_variable("metadata_service_num_attempts") disable_env_vars = session.instance_variables().get("profile") is not None if cache is None: cache = {} env_provider = EnvProvider() container_provider = ContainerProvider() instance_metadata_provider = InstanceMetadataProvider( iam_role_fetcher=InstanceMetadataFetcher( timeout=metadata_timeout, num_attempts=num_attempts, user_agent=session.user_agent(), ) ) profile_provider_builder = ProfileProviderBuilder( session, cache=cache, region_name=region_name ) assume_role_provider = AssumeRoleProvider( load_config=lambda: session.full_config, client_creator=_get_client_creator(session, region_name), cache=cache, profile_name=profile_name, credential_sourcer=CanonicalNameCredentialSourcer( [env_provider, container_provider, instance_metadata_provider] ), profile_provider_builder=profile_provider_builder, ) pre_profile = [ env_provider, assume_role_provider, ] profile_providers = profile_provider_builder.providers( profile_name=profile_name, disable_env_vars=disable_env_vars, ) post_profile = [ OriginalEC2Provider(), BotoProvider(), container_provider, instance_metadata_provider, ] providers = pre_profile + profile_providers + post_profile if disable_env_vars: providers.remove(env_provider) LOGGER.debug( "Skipping environment variable credential check" " because profile name was explicitly set." ) return CredentialResolver(providers=providers) class ProfileProviderBuilder(BotocoreProfileProviderBuilder): def __init__(self, session, cache=None, region_name=None, sso_token_cache=None): super().__init__(session, cache, region_name) self._sso_token_cache = sso_token_cache def providers(self, profile_name, disable_env_vars=False): return [ self._create_web_identity_provider(profile_name, disable_env_vars,), self._create_sso_provider(profile_name), self._create_shared_credential_provider(profile_name), self._create_process_provider(profile_name), self._create_config_provider(profile_name), ] def _create_sso_provider(self, profile_name): return SSOProvider( load_config=lambda: self._session.full_config, client_creator=self._session.create_client, profile_name=profile_name, cache=self._cache, token_cache=self._sso_token_cache, ) class SSOCredentialFetcher(CachedCredentialFetcher): def __init__( self, start_url, sso_region, role_name, account_id, client_creator, token_loader=None, cache=None, expiry_window_seconds=None, ): self._client_creator = client_creator self._sso_region = sso_region self._role_name = role_name self._account_id = account_id self._start_url = start_url self._token_loader = token_loader super().__init__(cache, expiry_window_seconds) def _create_cache_key(self): args = { "startUrl": self._start_url, "roleName": self._role_name, "accountId": self._account_id, } # pass separators resulting in non-minified JSON. In the long term, # all fetchers should use the below caching scheme. args = json.dumps(args, sort_keys=True, separators=(",", ":")) argument_hash = sha1(args.encode("utf-8")).hexdigest() return self._make_file_safe(argument_hash) def _parse_timestamp(self, timestamp_ms): # pylint: disable=no-self-use # fromtimestamp expects seconds so: milliseconds / 1000 = seconds timestamp_seconds = timestamp_ms / 1000.0 timestamp = datetime.datetime.fromtimestamp(timestamp_seconds, tzutc()) return _serialize_if_needed(timestamp) def _get_credentials(self): config = Config(signature_version=UNSIGNED, region_name=self._sso_region,) client = self._client_creator("sso", config=config) kwargs = { "roleName": self._role_name, "accountId": self._account_id, "accessToken": self._token_loader(self._start_url), } try: response = client.get_role_credentials(**kwargs) except client.exceptions.UnauthorizedException as exc: raise UnauthorizedSSOTokenError() from exc credentials = response["roleCredentials"] credentials = { "ProviderType": "sso", "Credentials": { "AccessKeyId": credentials["accessKeyId"], "SecretAccessKey": credentials["secretAccessKey"], "SessionToken": credentials["sessionToken"], "Expiration": self._parse_timestamp(credentials["expiration"]), }, } return credentials class SSOProvider(CredentialProvider): METHOD = "sso" _SSO_TOKEN_CACHE_DIR = os.path.expanduser(os.path.join("~", ".aws", "sso", "cache")) _SSO_CONFIG_VARS = [ "sso_start_url", "sso_region", "sso_role_name", "sso_account_id", ] # pylint: disable=super-init-not-called def __init__( self, load_config, client_creator, profile_name, cache=None, token_cache=None ): if token_cache is None: token_cache = JSONFileCache(self._SSO_TOKEN_CACHE_DIR) self._token_cache = token_cache if cache is None: cache = {} self.cache = cache self._load_config = load_config self._client_creator = client_creator self._profile_name = profile_name def _load_sso_config(self): loaded_config = self._load_config() profiles = loaded_config.get("profiles", {}) profile_name = self._profile_name profile_config = profiles.get(self._profile_name, {}) if all(c not in profile_config for c in self._SSO_CONFIG_VARS): return None config = {} missing_config_vars = [] for config_var in self._SSO_CONFIG_VARS: if config_var in profile_config: config[config_var] = profile_config[config_var] else: missing_config_vars.append(config_var) if missing_config_vars: missing = ", ".join(missing_config_vars) raise InvalidConfigError( error_msg=( 'The profile "%s" is configured to use SSO but is missing ' "required configuration: %s" % (profile_name, missing) ) ) return config def load(self): sso_config = self._load_sso_config() if not sso_config: return None sso_fetcher = SSOCredentialFetcher( sso_config["sso_start_url"], sso_config["sso_region"], sso_config["sso_role_name"], sso_config["sso_account_id"], self._client_creator, token_loader=SSOTokenLoader(cache=self._token_cache), cache=self.cache, ) return DeferredRefreshableCredentials( method=self.METHOD, refresh_using=sso_fetcher.fetch_credentials, )
true
true
f716f787333a832b615b0a6c98f4f3cf3223b40f
148
py
Python
_config.py
atourkow/at-InAppScoring
6603a0b7bce7d456620d760f5a06e5b0f0dc2a90
[ "Apache-2.0" ]
null
null
null
_config.py
atourkow/at-InAppScoring
6603a0b7bce7d456620d760f5a06e5b0f0dc2a90
[ "Apache-2.0" ]
null
null
null
_config.py
atourkow/at-InAppScoring
6603a0b7bce7d456620d760f5a06e5b0f0dc2a90
[ "Apache-2.0" ]
null
null
null
class Config(object): def __init__(self): self.servers = [ "127.0.0.1" ] self.keyspace = 'at_inappscoring'
18.5
41
0.513514
class Config(object): def __init__(self): self.servers = [ "127.0.0.1" ] self.keyspace = 'at_inappscoring'
true
true
f716f8c015aff41ef3b8ac361688f47dbb0587ed
860
py
Python
app/models.py
imireallan/Bucketlist
2dc496cf866d6b21594f9bd7efd12af43ee77cba
[ "MIT" ]
null
null
null
app/models.py
imireallan/Bucketlist
2dc496cf866d6b21594f9bd7efd12af43ee77cba
[ "MIT" ]
null
null
null
app/models.py
imireallan/Bucketlist
2dc496cf866d6b21594f9bd7efd12af43ee77cba
[ "MIT" ]
null
null
null
# app/models.py from app import db class Bucketlist(db.Model): """This class represents the bucketlist table.""" __tablename__ = 'bucketlists' id = db.Column(db.Integer, primary_key=True) name = db.Column(db.String(255)) date_created = db.Column(db.DateTime, default=db.func.current_timestamp()) date_modified = db.Column( db.DateTime, default=db.func.current_timestamp(), onupdate=db.func.current_timestamp()) def __init__(self, name): """initialize with name.""" self.name = name def save(self): db.session.add(self) db.session.commit() @staticmethod def get_all(): return Bucketlist.query.all() def delete(self): db.session.delete(self) db.session.commit() def __repr__(self): return "<Bucketlist: {}>".format(self.name)
25.294118
78
0.636047
from app import db class Bucketlist(db.Model): __tablename__ = 'bucketlists' id = db.Column(db.Integer, primary_key=True) name = db.Column(db.String(255)) date_created = db.Column(db.DateTime, default=db.func.current_timestamp()) date_modified = db.Column( db.DateTime, default=db.func.current_timestamp(), onupdate=db.func.current_timestamp()) def __init__(self, name): self.name = name def save(self): db.session.add(self) db.session.commit() @staticmethod def get_all(): return Bucketlist.query.all() def delete(self): db.session.delete(self) db.session.commit() def __repr__(self): return "<Bucketlist: {}>".format(self.name)
true
true
f716fa1463f1193d4ff8966502c6b97695669997
5,056
py
Python
tf2_ndg_benckmarks/metrics/embedding.py
katsugeneration/tf2-ndg-benchmarks
ba2d07ef997fac87b3991a54c0a234f7c5425b0f
[ "MIT" ]
1
2020-11-17T07:03:47.000Z
2020-11-17T07:03:47.000Z
tf2_ndg_benckmarks/metrics/embedding.py
katsugeneration/tf2-ndg-benchmarks
ba2d07ef997fac87b3991a54c0a234f7c5425b0f
[ "MIT" ]
null
null
null
tf2_ndg_benckmarks/metrics/embedding.py
katsugeneration/tf2-ndg-benchmarks
ba2d07ef997fac87b3991a54c0a234f7c5425b0f
[ "MIT" ]
null
null
null
""" Copyright: Copyright 2019 by Katsuya SHIMABUKURO. License: MIT, see LICENSE for details. """ import pathlib import gzip import requests import tqdm import numpy as np from gensim.models import KeyedVectors FILE_ID = '0B7XkCwpI5KDYNlNUTTlSS21pQmM' SOURCE_URL = 'https://drive.google.com/uc?export=download&id={file_id}' SOURCE_URL_WITH_CONFIRM = 'https://drive.google.com/uc?export=download&confirm={code}&id={file_id}' class EmbeddingBase(object): """Embedding based score calculator base.""" def __init__( self, emb_path: str = '/tmp/vector.bin'): """Embedding class initialization. Args: emb_path (str): Embedding binary file path. When emb_path is not found, start to download from internet. """ self.emb_path = emb_path _emb_path = pathlib.Path(self.emb_path) if _emb_path.exists(): self._load() return _emb_gz_path = pathlib.Path(self.emb_path + '.gz') # Downloas Google pre-trained vector bin from Google Drive # Get confirmation code res = requests.get(SOURCE_URL.format(**{'file_id': FILE_ID})) cookies = res.cookies res.close() code = cookies[next(filter(lambda k: '_warning_' in k, cookies.keys()))] # Download file. res = requests.get( SOURCE_URL_WITH_CONFIRM.format(**{'file_id': FILE_ID, 'code': code}), cookies=cookies, stream=True) pbar = tqdm.tqdm(unit="B", unit_scale=True, desc='Download Google news corpus pre-trained vectors.') chunck_size = 1024 with _emb_gz_path.open('wb') as w: for chunck in res.iter_content(chunck_size): w.write(chunck) pbar.update(len(chunck)) pbar.close() res.close() # Decompress gzip file. with _emb_gz_path.open('rb') as f: with _emb_path.open('wb') as w: w.write(gzip.decompress(f.read())) self._load() def _load(self): """Load word2vec model.""" self.model = KeyedVectors.load_word2vec_format(self.emb_path, binary=True) assert 'dog' in self.model def _get_vectors_from_sentene(self, sentence): """Return contains word vector list.""" return [self.model.get_vector(w) for w in sentence.split(' ') if w in self.model] def _calc_cosine_sim(self, vectors1, vectors2): """Calculate cosine similarity.""" vectors1 /= np.linalg.norm(vectors1, axis=-1, keepdims=True) vectors2 /= np.linalg.norm(vectors2, axis=-1, keepdims=True) return np.dot(vectors1, vectors2.T) class Average(EmbeddingBase): """Embedding based average score calculator.""" def sentence_score( self, reference: str, hypothesis: str) -> float: """Embedding Average metrics. Args: reference (str): reference sentence. hypothesis: (str): hypothesis sentence. Return: float: Embedding Average score """ emb_ref = np.sum(self._get_vectors_from_sentene(reference), axis=0) emb_hyp = np.sum(self._get_vectors_from_sentene(hypothesis), axis=0) return self._calc_cosine_sim(emb_ref, emb_hyp) class VectorExtrema(EmbeddingBase): """Embedding based vector extrema score calculator.""" def sentence_score( self, reference: str, hypothesis: str) -> float: """Embedding Vector Extrema metrics. Args: reference (str): reference sentence. hypothesis: (str): hypothesis sentence. Return: float: Embedding Vector Extrema score """ def extema(vectors): vec_max = np.max(vectors, axis=0) vec_min = np.min(vectors, axis=0) return list(map(lambda x, y: x if np.abs(x) > np.abs(y) else y, vec_max, vec_min)) extema_ref = extema(self._get_vectors_from_sentene(reference)) extema_hyp = extema(self._get_vectors_from_sentene(hypothesis)) return self._calc_cosine_sim(extema_ref, extema_hyp) class GreedyMatching(EmbeddingBase): """Embedding based greedy matching score calculator.""" def sentence_score( self, reference: str, hypothesis: str) -> float: """Embedding greedy matching metrics. Args: reference (str): reference sentence. hypothesis: (str): hypothesis sentence. Return: float: Embedding Greedy Matching score """ embs_ref = np.array(self._get_vectors_from_sentene(reference)) embs_hyp = np.array(self._get_vectors_from_sentene(hypothesis)) cs_matrix = self._calc_cosine_sim(embs_ref, embs_hyp) # len(embs_ref) x len(embs_hyp) matrix greedy_ref = np.max(cs_matrix, axis=0).mean() greedy_hyp = np.max(cs_matrix, axis=1).mean() return (greedy_ref + greedy_hyp) / 2.0
31.798742
116
0.617089
import pathlib import gzip import requests import tqdm import numpy as np from gensim.models import KeyedVectors FILE_ID = '0B7XkCwpI5KDYNlNUTTlSS21pQmM' SOURCE_URL = 'https://drive.google.com/uc?export=download&id={file_id}' SOURCE_URL_WITH_CONFIRM = 'https://drive.google.com/uc?export=download&confirm={code}&id={file_id}' class EmbeddingBase(object): def __init__( self, emb_path: str = '/tmp/vector.bin'): self.emb_path = emb_path _emb_path = pathlib.Path(self.emb_path) if _emb_path.exists(): self._load() return _emb_gz_path = pathlib.Path(self.emb_path + '.gz') res = requests.get(SOURCE_URL.format(**{'file_id': FILE_ID})) cookies = res.cookies res.close() code = cookies[next(filter(lambda k: '_warning_' in k, cookies.keys()))] res = requests.get( SOURCE_URL_WITH_CONFIRM.format(**{'file_id': FILE_ID, 'code': code}), cookies=cookies, stream=True) pbar = tqdm.tqdm(unit="B", unit_scale=True, desc='Download Google news corpus pre-trained vectors.') chunck_size = 1024 with _emb_gz_path.open('wb') as w: for chunck in res.iter_content(chunck_size): w.write(chunck) pbar.update(len(chunck)) pbar.close() res.close() with _emb_gz_path.open('rb') as f: with _emb_path.open('wb') as w: w.write(gzip.decompress(f.read())) self._load() def _load(self): self.model = KeyedVectors.load_word2vec_format(self.emb_path, binary=True) assert 'dog' in self.model def _get_vectors_from_sentene(self, sentence): return [self.model.get_vector(w) for w in sentence.split(' ') if w in self.model] def _calc_cosine_sim(self, vectors1, vectors2): vectors1 /= np.linalg.norm(vectors1, axis=-1, keepdims=True) vectors2 /= np.linalg.norm(vectors2, axis=-1, keepdims=True) return np.dot(vectors1, vectors2.T) class Average(EmbeddingBase): def sentence_score( self, reference: str, hypothesis: str) -> float: emb_ref = np.sum(self._get_vectors_from_sentene(reference), axis=0) emb_hyp = np.sum(self._get_vectors_from_sentene(hypothesis), axis=0) return self._calc_cosine_sim(emb_ref, emb_hyp) class VectorExtrema(EmbeddingBase): def sentence_score( self, reference: str, hypothesis: str) -> float: def extema(vectors): vec_max = np.max(vectors, axis=0) vec_min = np.min(vectors, axis=0) return list(map(lambda x, y: x if np.abs(x) > np.abs(y) else y, vec_max, vec_min)) extema_ref = extema(self._get_vectors_from_sentene(reference)) extema_hyp = extema(self._get_vectors_from_sentene(hypothesis)) return self._calc_cosine_sim(extema_ref, extema_hyp) class GreedyMatching(EmbeddingBase): def sentence_score( self, reference: str, hypothesis: str) -> float: embs_ref = np.array(self._get_vectors_from_sentene(reference)) embs_hyp = np.array(self._get_vectors_from_sentene(hypothesis)) cs_matrix = self._calc_cosine_sim(embs_ref, embs_hyp) greedy_ref = np.max(cs_matrix, axis=0).mean() greedy_hyp = np.max(cs_matrix, axis=1).mean() return (greedy_ref + greedy_hyp) / 2.0
true
true
f716fbfc8df971e7d631e0b0b90237bc351ca7f6
11,145
py
Python
dolo/algos/simulations.py
EconForge/dolo
9bb75b8f6ea87578393fe748003092ffb745e8d6
[ "BSD-2-Clause" ]
50
2015-03-16T01:07:00.000Z
2020-02-07T22:18:43.000Z
dolo/algos/simulations.py
EconForge/dolo
9bb75b8f6ea87578393fe748003092ffb745e8d6
[ "BSD-2-Clause" ]
130
2015-01-01T19:33:21.000Z
2020-04-27T15:57:22.000Z
dolo/algos/simulations.py
EconForge/dolo
9bb75b8f6ea87578393fe748003092ffb745e8d6
[ "BSD-2-Clause" ]
56
2015-10-14T12:27:26.000Z
2020-04-21T14:56:02.000Z
import numpy import pandas import xarray as xr import numpy as np from dolo.compiler.model import Model from dolo.numeric.optimize.ncpsolve import ncpsolve from dolo.numeric.optimize.newton import newton as newton_solver from dolo.numeric.optimize.newton import SerialDifferentiableFunction ## TODO: extend for mc process def response(model, dr, varname, T=40, impulse: float = None): i_exo = model.symbols["exogenous"].index(varname) if impulse is None: try: impulse = numpy.sqrt( model.exogenous.Σ[i_exo, i_exo] ) # works only for IID/AR1 except: impulse = numpy.sqrt(model.exogenous.σ) # works only for IID/AR1 e1 = numpy.zeros(len(model.symbols["exogenous"])) e1[i_exo] = impulse exogenous = model.exogenous print(exogenous) print(T, e1) m_simul = model.exogenous.response(T - 1, e1) # this is an xarray T x V m_simul = m_simul.expand_dims("N") m_simul = m_simul.transpose("T", "N", "V").data sim = simulate(model, dr, N=1, T=T, driving_process=m_simul, stochastic=False) irf = sim.sel(N=0) return irf def find_index(sim, values): sh = sim.shape N = sh[0] T = sh[1] indices = np.zeros((N, T), dtype=int) for n in range(N): for t in range(T): v = sim[n, t, :] ind = np.where((values == v[None, :]).all(axis=1))[0][0] indices[n, t] = ind return indices from dolo.numeric.grids import CartesianGrid, UnstructuredGrid from dolo.algos.results import AlgoResult from dolo.numeric.decision_rule import DecisionRule def simulate( model: Model, dr: DecisionRule, *, process=None, N=1, T=40, s0=None, i0=None, m0=None, driving_process=None, seed=42, stochastic=True, ): """Simulate a model using the specified decision rule. Parameters ---------- model: Model dr: decision rule process: s0: ndarray initial state where all simulations start driving_process: ndarray realization of exogenous driving process (drawn randomly if None) N: int number of simulations T: int horizon for the simulations seed: int used to initialize the random number generator. Use it to replicate exact same results among simulations discard: boolean (False) if True, then all simulations containing at least one non finite value are discarded Returns ------- xarray.DataArray: returns a ``T x N x n_v`` array where ``n_v`` is the number of variables. """ if isinstance(dr, AlgoResult): dr = dr.dr calib = model.calibration parms = numpy.array(calib["parameters"]) if s0 is None: s0 = calib["states"] n_x = len(model.symbols["controls"]) n_s = len(model.symbols["states"]) s_simul = numpy.zeros((T, N, n_s)) x_simul = numpy.zeros((T, N, n_x)) s_simul[0, :, :] = s0[None, :] # are we simulating a markov chain or a continuous process ? if driving_process is not None: if len(driving_process.shape) == 3: m_simul = driving_process sim_type = "continuous" if m0 is None: m0 = model.calibration["exogenous"] x_simul[0, :, :] = dr.eval_ms(m0[None, :], s0[None, :])[0, :] elif len(driving_process.shape) == 2: i_simul = driving_process nodes = dr.exo_grid.nodes m_simul = nodes[i_simul] # inds = i_simul.ravel() # m_simul = np.reshape( np.concatenate( [nodes[i,:][None,:] for i in inds.ravel()], axis=0 ), inds.shape + (-1,) ) sim_type = "discrete" x_simul[0, :, :] = dr.eval_is(i0, s0[None, :])[0, :] else: raise Exception("Incorrect specification of driving values.") m0 = m_simul[0, :, :] else: from dolo.numeric.processes import DiscreteProcess if process is None: if hasattr(dr, "dprocess") and hasattr(dr.dprocess, "simulate"): process = dr.dprocess else: process = model.exogenous # detect type of simulation if not isinstance(process, DiscreteProcess): sim_type = "continuous" else: sim_type = "discrete" if sim_type == "discrete": if i0 is None: i0 = 0 dp = process m_simul = dp.simulate(N, T, i0=i0, stochastic=stochastic) i_simul = find_index(m_simul, dp.values) m0 = dp.node(i0) x0 = dr.eval_is(i0, s0[None, :])[0, :] else: m_simul = process.simulate(N, T, m0=m0, stochastic=stochastic) if isinstance(m_simul, xr.DataArray): m_simul = m_simul.data sim_type = "continuous" if m0 is None: m0 = model.calibration["exogenous"] x0 = dr.eval_ms(m0[None, :], s0[None, :])[0, :] x_simul[0, :, :] = x0[None, :] f = model.functions["arbitrage"] g = model.functions["transition"] numpy.random.seed(seed) mp = m0 for i in range(T): m = m_simul[i, :, :] s = s_simul[i, :, :] if sim_type == "discrete": i_m = i_simul[i, :] xx = [ dr.eval_is(i_m[ii], s[ii, :][None, :])[0, :] for ii in range(s.shape[0]) ] x = np.row_stack(xx) else: x = dr.eval_ms(m, s) x_simul[i, :, :] = x ss = g(mp, s, x, m, parms) if i < T - 1: s_simul[i + 1, :, :] = ss mp = m if "auxiliary" not in model.functions: # TODO: find a better test than this l = [s_simul, x_simul] varnames = model.symbols["states"] + model.symbols["controls"] else: aux = model.functions["auxiliary"] a_simul = aux( m_simul.reshape((N * T, -1)), s_simul.reshape((N * T, -1)), x_simul.reshape((N * T, -1)), parms, ) a_simul = a_simul.reshape(T, N, -1) l = [m_simul, s_simul, x_simul, a_simul] varnames = ( model.symbols["exogenous"] + model.symbols["states"] + model.symbols["controls"] + model.symbols["auxiliaries"] ) simul = numpy.concatenate(l, axis=2) if sim_type == "discrete": varnames = ["_i_m"] + varnames simul = np.concatenate([i_simul[:, :, None], simul], axis=2) data = xr.DataArray( simul, dims=["T", "N", "V"], coords={"T": range(T), "N": range(N), "V": varnames}, ) return data def tabulate( model, dr, state, bounds=None, n_steps=100, s0=None, i0=None, m0=None, **kwargs ): import numpy if isinstance(dr, AlgoResult): dr = dr.dr states_names = model.symbols["states"] controls_names = model.symbols["controls"] index = states_names.index(str(state)) if bounds is None: try: endo_grid = dr.endo_grid bounds = [endo_grid.min[index], endo_grid.max[index]] except: domain = model.domain bounds = [domain.min[index], domain.max[index]] if bounds is None: raise Exception("No bounds provided for simulation or by model.") values = numpy.linspace(bounds[0], bounds[1], n_steps) if s0 is None: s0 = model.calibration["states"] svec = numpy.row_stack([s0] * n_steps) svec[:, index] = values try: dp = dr.dprocess except: dp = model.exogenous.discretize() if (i0 is None) and (m0 is None): from dolo.numeric.grids import UnstructuredGrid if isinstance(dp.grid, UnstructuredGrid): n_ms = dp.n_nodes [q, r] = divmod(n_ms, 2) i0 = q - 1 + r else: m0 = model.calibration["exogenous"] if i0 is not None: m = dp.node(i0) xvec = dr.eval_is(i0, svec) elif m0 is not None: m = m0 xvec = dr.eval_ms(m0, svec) mm = numpy.row_stack([m] * n_steps) l = [mm, svec, xvec] series = ( model.symbols["exogenous"] + model.symbols["states"] + model.symbols["controls"] ) if "auxiliary" in model.functions: p = model.calibration["parameters"] pp = numpy.row_stack([p] * n_steps) avec = model.functions["auxiliary"](mm, svec, xvec, pp) l.append(avec) series.extend(model.symbols["auxiliaries"]) import pandas tb = numpy.concatenate(l, axis=1) df = pandas.DataFrame(tb, columns=series) return df def tabulate_2d(model, dr, states=None, i0=0, s0=None, n=[12, 13]): import numpy import xarray as xr if isinstance(dr, AlgoResult): dr = dr.dr if s0 is None: s0 = model.calibration["states"] if states is None: states = model.symbols["states"] assert len(states) == 2 domain = model.get_domain() lps = [numpy.linspace(*domain[s], n[i]) for i, s in enumerate(states)] i_x = model.symbols["states"].index(states[0]) i_y = model.symbols["states"].index(states[1]) vals = [] vstates = [] s = s0.copy() for xx in lps[0]: vv = [] s[i_x] = xx for yy in lps[1]: s[i_y] = yy x = dr.eval_is(i0, s) vv.append(numpy.concatenate([s, x])) vals.append(vv) vv = numpy.array(vals) controls = model.symbols["states"] + model.symbols["controls"] # tab = xr.DataArray(vv, dims=[states[0], states[1], 'V'], coords=[lps[0], lps[1], 'V']) tab = xr.DataArray( vv, dims=[states[0], states[1], "V"], coords={states[0]: lps[0], states[1]: lps[1], "V": controls}, ) return tab def plot3d(tab, varname): X = numpy.array(tab[tab.dims[0]]) Y = numpy.array(tab[tab.dims[1]]) Z = numpy.array(tab.loc[:, :, varname]) data = [go.Surface(x=X, y=Y, z=Z)] layout = go.Layout( title="Equity", autosize=False, width=500, height=500, # xaxis=go.XAxis(title=tab.dims[0]), # yaxis={'title':tab.dims[1]}, # zaxis={'title':varname}, xaxis=dict( title="x Axis", nticks=7, titlefont=dict(family="Courier New, monospace", size=18, color="#7f7f7f"), ), margin=dict(l=65, r=50, b=65, t=90), ) fig = go.Figure(data=data, layout=layout) return iplot(fig, filename="graph_" + varname) def plot_decision_rule(plot_controls=None, **kwargs): if isinstance(dr, AlgoResult): dr = dr.dr df = tabulate(dr, state, bounds=None, n_steps=100, s0=None, i0=None, m0=None) from matplotlib import pyplot if isinstance(plot_controls, str): cn = plot_controls pyplot.plot(values, df[cn], **kwargs) else: for cn in plot_controls: pyplot.plot(values, df[cn], label=cn, **kwargs) pyplot.legend() pyplot.xlabel("state = {} | mstate = {}".format(state, i0))
28.286802
126
0.557649
import numpy import pandas import xarray as xr import numpy as np from dolo.compiler.model import Model from dolo.numeric.optimize.ncpsolve import ncpsolve from dolo.numeric.optimize.newton import newton as newton_solver from dolo.numeric.optimize.newton import SerialDifferentiableFunction rname, T=40, impulse: float = None): i_exo = model.symbols["exogenous"].index(varname) if impulse is None: try: impulse = numpy.sqrt( model.exogenous.Σ[i_exo, i_exo] ) except: impulse = numpy.sqrt(model.exogenous.σ) e1 = numpy.zeros(len(model.symbols["exogenous"])) e1[i_exo] = impulse exogenous = model.exogenous print(exogenous) print(T, e1) m_simul = model.exogenous.response(T - 1, e1) m_simul = m_simul.expand_dims("N") m_simul = m_simul.transpose("T", "N", "V").data sim = simulate(model, dr, N=1, T=T, driving_process=m_simul, stochastic=False) irf = sim.sel(N=0) return irf def find_index(sim, values): sh = sim.shape N = sh[0] T = sh[1] indices = np.zeros((N, T), dtype=int) for n in range(N): for t in range(T): v = sim[n, t, :] ind = np.where((values == v[None, :]).all(axis=1))[0][0] indices[n, t] = ind return indices from dolo.numeric.grids import CartesianGrid, UnstructuredGrid from dolo.algos.results import AlgoResult from dolo.numeric.decision_rule import DecisionRule def simulate( model: Model, dr: DecisionRule, *, process=None, N=1, T=40, s0=None, i0=None, m0=None, driving_process=None, seed=42, stochastic=True, ): if isinstance(dr, AlgoResult): dr = dr.dr calib = model.calibration parms = numpy.array(calib["parameters"]) if s0 is None: s0 = calib["states"] n_x = len(model.symbols["controls"]) n_s = len(model.symbols["states"]) s_simul = numpy.zeros((T, N, n_s)) x_simul = numpy.zeros((T, N, n_x)) s_simul[0, :, :] = s0[None, :] if driving_process is not None: if len(driving_process.shape) == 3: m_simul = driving_process sim_type = "continuous" if m0 is None: m0 = model.calibration["exogenous"] x_simul[0, :, :] = dr.eval_ms(m0[None, :], s0[None, :])[0, :] elif len(driving_process.shape) == 2: i_simul = driving_process nodes = dr.exo_grid.nodes m_simul = nodes[i_simul] sim_type = "discrete" x_simul[0, :, :] = dr.eval_is(i0, s0[None, :])[0, :] else: raise Exception("Incorrect specification of driving values.") m0 = m_simul[0, :, :] else: from dolo.numeric.processes import DiscreteProcess if process is None: if hasattr(dr, "dprocess") and hasattr(dr.dprocess, "simulate"): process = dr.dprocess else: process = model.exogenous if not isinstance(process, DiscreteProcess): sim_type = "continuous" else: sim_type = "discrete" if sim_type == "discrete": if i0 is None: i0 = 0 dp = process m_simul = dp.simulate(N, T, i0=i0, stochastic=stochastic) i_simul = find_index(m_simul, dp.values) m0 = dp.node(i0) x0 = dr.eval_is(i0, s0[None, :])[0, :] else: m_simul = process.simulate(N, T, m0=m0, stochastic=stochastic) if isinstance(m_simul, xr.DataArray): m_simul = m_simul.data sim_type = "continuous" if m0 is None: m0 = model.calibration["exogenous"] x0 = dr.eval_ms(m0[None, :], s0[None, :])[0, :] x_simul[0, :, :] = x0[None, :] f = model.functions["arbitrage"] g = model.functions["transition"] numpy.random.seed(seed) mp = m0 for i in range(T): m = m_simul[i, :, :] s = s_simul[i, :, :] if sim_type == "discrete": i_m = i_simul[i, :] xx = [ dr.eval_is(i_m[ii], s[ii, :][None, :])[0, :] for ii in range(s.shape[0]) ] x = np.row_stack(xx) else: x = dr.eval_ms(m, s) x_simul[i, :, :] = x ss = g(mp, s, x, m, parms) if i < T - 1: s_simul[i + 1, :, :] = ss mp = m if "auxiliary" not in model.functions: l = [s_simul, x_simul] varnames = model.symbols["states"] + model.symbols["controls"] else: aux = model.functions["auxiliary"] a_simul = aux( m_simul.reshape((N * T, -1)), s_simul.reshape((N * T, -1)), x_simul.reshape((N * T, -1)), parms, ) a_simul = a_simul.reshape(T, N, -1) l = [m_simul, s_simul, x_simul, a_simul] varnames = ( model.symbols["exogenous"] + model.symbols["states"] + model.symbols["controls"] + model.symbols["auxiliaries"] ) simul = numpy.concatenate(l, axis=2) if sim_type == "discrete": varnames = ["_i_m"] + varnames simul = np.concatenate([i_simul[:, :, None], simul], axis=2) data = xr.DataArray( simul, dims=["T", "N", "V"], coords={"T": range(T), "N": range(N), "V": varnames}, ) return data def tabulate( model, dr, state, bounds=None, n_steps=100, s0=None, i0=None, m0=None, **kwargs ): import numpy if isinstance(dr, AlgoResult): dr = dr.dr states_names = model.symbols["states"] controls_names = model.symbols["controls"] index = states_names.index(str(state)) if bounds is None: try: endo_grid = dr.endo_grid bounds = [endo_grid.min[index], endo_grid.max[index]] except: domain = model.domain bounds = [domain.min[index], domain.max[index]] if bounds is None: raise Exception("No bounds provided for simulation or by model.") values = numpy.linspace(bounds[0], bounds[1], n_steps) if s0 is None: s0 = model.calibration["states"] svec = numpy.row_stack([s0] * n_steps) svec[:, index] = values try: dp = dr.dprocess except: dp = model.exogenous.discretize() if (i0 is None) and (m0 is None): from dolo.numeric.grids import UnstructuredGrid if isinstance(dp.grid, UnstructuredGrid): n_ms = dp.n_nodes [q, r] = divmod(n_ms, 2) i0 = q - 1 + r else: m0 = model.calibration["exogenous"] if i0 is not None: m = dp.node(i0) xvec = dr.eval_is(i0, svec) elif m0 is not None: m = m0 xvec = dr.eval_ms(m0, svec) mm = numpy.row_stack([m] * n_steps) l = [mm, svec, xvec] series = ( model.symbols["exogenous"] + model.symbols["states"] + model.symbols["controls"] ) if "auxiliary" in model.functions: p = model.calibration["parameters"] pp = numpy.row_stack([p] * n_steps) avec = model.functions["auxiliary"](mm, svec, xvec, pp) l.append(avec) series.extend(model.symbols["auxiliaries"]) import pandas tb = numpy.concatenate(l, axis=1) df = pandas.DataFrame(tb, columns=series) return df def tabulate_2d(model, dr, states=None, i0=0, s0=None, n=[12, 13]): import numpy import xarray as xr if isinstance(dr, AlgoResult): dr = dr.dr if s0 is None: s0 = model.calibration["states"] if states is None: states = model.symbols["states"] assert len(states) == 2 domain = model.get_domain() lps = [numpy.linspace(*domain[s], n[i]) for i, s in enumerate(states)] i_x = model.symbols["states"].index(states[0]) i_y = model.symbols["states"].index(states[1]) vals = [] vstates = [] s = s0.copy() for xx in lps[0]: vv = [] s[i_x] = xx for yy in lps[1]: s[i_y] = yy x = dr.eval_is(i0, s) vv.append(numpy.concatenate([s, x])) vals.append(vv) vv = numpy.array(vals) controls = model.symbols["states"] + model.symbols["controls"] tab = xr.DataArray( vv, dims=[states[0], states[1], "V"], coords={states[0]: lps[0], states[1]: lps[1], "V": controls}, ) return tab def plot3d(tab, varname): X = numpy.array(tab[tab.dims[0]]) Y = numpy.array(tab[tab.dims[1]]) Z = numpy.array(tab.loc[:, :, varname]) data = [go.Surface(x=X, y=Y, z=Z)] layout = go.Layout( title="Equity", autosize=False, width=500, height=500, xaxis=dict( title="x Axis", nticks=7, titlefont=dict(family="Courier New, monospace", size=18, color="#7f7f7f"), ), margin=dict(l=65, r=50, b=65, t=90), ) fig = go.Figure(data=data, layout=layout) return iplot(fig, filename="graph_" + varname) def plot_decision_rule(plot_controls=None, **kwargs): if isinstance(dr, AlgoResult): dr = dr.dr df = tabulate(dr, state, bounds=None, n_steps=100, s0=None, i0=None, m0=None) from matplotlib import pyplot if isinstance(plot_controls, str): cn = plot_controls pyplot.plot(values, df[cn], **kwargs) else: for cn in plot_controls: pyplot.plot(values, df[cn], label=cn, **kwargs) pyplot.legend() pyplot.xlabel("state = {} | mstate = {}".format(state, i0))
true
true
f716fcb9c6975762b9257b5426a806187ac75ac5
6,270
py
Python
python_modules/dagster-airflow/dagster_airflow_tests/conftest.py
vishvananda/dagster
f6aa44714246bc770fe05a9c986fe8b7d848956b
[ "Apache-2.0" ]
null
null
null
python_modules/dagster-airflow/dagster_airflow_tests/conftest.py
vishvananda/dagster
f6aa44714246bc770fe05a9c986fe8b7d848956b
[ "Apache-2.0" ]
null
null
null
python_modules/dagster-airflow/dagster_airflow_tests/conftest.py
vishvananda/dagster
f6aa44714246bc770fe05a9c986fe8b7d848956b
[ "Apache-2.0" ]
null
null
null
# pylint doesn't understand the way that pytest constructs fixture dependnecies # pylint: disable=redefined-outer-name import datetime import os import shutil import subprocess import sys import tempfile import uuid import airflow.plugins_manager import docker import pytest from dagster import check from dagster.core.execution import create_execution_plan from dagster.utils import load_yaml_from_path, mkdir_p, script_relative_path from dagster_airflow import scaffold_airflow_dag from .test_project.dagster_airflow_demo import define_demo_execution_pipeline from .utils import reload_module IMAGE = 'dagster-airflow-demo' # py2 compat try: FileNotFoundError except NameError: FileNotFoundError = IOError @pytest.fixture(scope='module') def airflow_home(): airflow_home_dir = os.getenv('AIRFLOW_HOME') assert airflow_home_dir, 'No AIRFLOW_HOME set -- is airflow installed?' airflow_home_dir = os.path.abspath(os.path.expanduser(airflow_home_dir)) return airflow_home_dir @pytest.fixture(scope='module') def temp_dir(): dir_path = os.path.join('/tmp', str(uuid.uuid4())) mkdir_p(dir_path) yield dir_path shutil.rmtree(dir_path) @pytest.fixture(scope='module') def docker_client(): try: client = docker.from_env() client.info() except docker.errors.APIError: check.failed('Couldn\'t find docker at {url} -- is it running?'.format(url=client._url(''))) return client @pytest.fixture(scope='module') def docker_image(docker_client): try: docker_client.images.get(IMAGE) except docker.errors.ImageNotFound: check.failed( 'Couldn\'t find docker image {image} required for test: please run the script at ' '{script_path}'.format( image=IMAGE, script_path=script_relative_path('test_project/build.sh') ) ) return IMAGE @pytest.fixture(scope='module') def dags_path(airflow_home): path = os.path.join(airflow_home, 'dags', '') mkdir_p(os.path.abspath(path)) return path @pytest.fixture(scope='module') def plugins_path(airflow_home): path = os.path.join(airflow_home, 'plugins', '') mkdir_p(os.path.abspath(path)) return path @pytest.fixture(scope='module') def host_tmp_dir(): mkdir_p('/tmp/results') return '/tmp/results' @pytest.fixture(scope='module') def airflow_test(docker_image, dags_path, plugins_path, host_tmp_dir): assert docker_image plugin_definition_filename = 'dagster_plugin.py' plugin_path = os.path.abspath(os.path.join(plugins_path, plugin_definition_filename)) temporary_plugin_path = None try: if os.path.exists(plugin_path): temporary_plugin_file = tempfile.NamedTemporaryFile(delete=False) temporary_plugin_file.close() temporary_plugin_path = temporary_plugin_file.name shutil.copyfile(plugin_path, temporary_plugin_path) shutil.copyfile( script_relative_path(os.path.join('..', 'dagster_airflow', plugin_definition_filename)), plugin_path, ) mkdir_p(os.path.abspath(dags_path)) sys.path.append(os.path.abspath(dags_path)) created_init_py = False init_py_path = os.path.join(os.path.abspath(dags_path), '__init__.py') if not os.path.exists(init_py_path): with open(init_py_path, 'a'): pass created_init_py = True subprocess.check_output(['airflow', 'initdb']) # necromancy; follows airflow.operators.__init__ reload_module(airflow.plugins_manager) for operators_module in airflow.plugins_manager.operators_modules: sys.modules[operators_module.__name__] = operators_module globals()[operators_module._name] = operators_module # Test that we can now actually import the DagsterOperator from airflow.operators.dagster_plugin import DagsterOperator del DagsterOperator yield (docker_image, dags_path, host_tmp_dir) finally: if os.path.exists(plugin_path): os.remove(plugin_path) if temporary_plugin_path is not None: shutil.copyfile(temporary_plugin_path, plugin_path) os.remove(temporary_plugin_path) if created_init_py: os.remove(init_py_path) sys.path = sys.path[:-1] @pytest.fixture(scope='module') def scaffold_dag(airflow_test): docker_image, dags_path, _ = airflow_test pipeline = define_demo_execution_pipeline() env_config = load_yaml_from_path(script_relative_path('test_project/env.yml')) tempdir = tempfile.gettempdir() static_path, editable_path = scaffold_airflow_dag( pipeline=pipeline, env_config=env_config, image=docker_image, output_path=tempdir, dag_kwargs={'default_args': {'start_date': datetime.datetime(1900, 1, 1)}}, ) # Ensure that the scaffolded files parse correctly subprocess.check_output(['python', editable_path]) shutil.copyfile( static_path, os.path.abspath(os.path.join(dags_path, os.path.basename(static_path))) ) shutil.copyfile( editable_path, os.path.abspath(os.path.join(dags_path, os.path.basename(editable_path))) ) os.remove(static_path) os.remove(editable_path) execution_date = datetime.datetime.utcnow().strftime('%Y-%m-%d') pipeline_name = pipeline.name execution_plan = create_execution_plan(pipeline, env_config) yield ( pipeline_name, execution_plan, execution_date, os.path.abspath(os.path.join(dags_path, os.path.basename(static_path))), os.path.abspath(os.path.join(dags_path, os.path.basename(editable_path))), ) os.remove(os.path.abspath(os.path.join(dags_path, os.path.basename(static_path)))) os.remove(os.path.abspath(os.path.join(dags_path, os.path.basename(editable_path)))) try: os.remove( os.path.abspath(os.path.join(dags_path, os.path.basename(static_path)[:-3] + '.pyc')) ) os.remove( os.path.abspath(os.path.join(dags_path, os.path.basename(editable_path)[:-3] + '.pyc')) ) except (FileNotFoundError, OSError): pass
29.299065
100
0.693461
# pylint: disable=redefined-outer-name import datetime import os import shutil import subprocess import sys import tempfile import uuid import airflow.plugins_manager import docker import pytest from dagster import check from dagster.core.execution import create_execution_plan from dagster.utils import load_yaml_from_path, mkdir_p, script_relative_path from dagster_airflow import scaffold_airflow_dag from .test_project.dagster_airflow_demo import define_demo_execution_pipeline from .utils import reload_module IMAGE = 'dagster-airflow-demo' # py2 compat try: FileNotFoundError except NameError: FileNotFoundError = IOError @pytest.fixture(scope='module') def airflow_home(): airflow_home_dir = os.getenv('AIRFLOW_HOME') assert airflow_home_dir, 'No AIRFLOW_HOME set -- is airflow installed?' airflow_home_dir = os.path.abspath(os.path.expanduser(airflow_home_dir)) return airflow_home_dir @pytest.fixture(scope='module') def temp_dir(): dir_path = os.path.join('/tmp', str(uuid.uuid4())) mkdir_p(dir_path) yield dir_path shutil.rmtree(dir_path) @pytest.fixture(scope='module') def docker_client(): try: client = docker.from_env() client.info() except docker.errors.APIError: check.failed('Couldn\'t find docker at {url} -- is it running?'.format(url=client._url(''))) return client @pytest.fixture(scope='module') def docker_image(docker_client): try: docker_client.images.get(IMAGE) except docker.errors.ImageNotFound: check.failed( 'Couldn\'t find docker image {image} required for test: please run the script at ' '{script_path}'.format( image=IMAGE, script_path=script_relative_path('test_project/build.sh') ) ) return IMAGE @pytest.fixture(scope='module') def dags_path(airflow_home): path = os.path.join(airflow_home, 'dags', '') mkdir_p(os.path.abspath(path)) return path @pytest.fixture(scope='module') def plugins_path(airflow_home): path = os.path.join(airflow_home, 'plugins', '') mkdir_p(os.path.abspath(path)) return path @pytest.fixture(scope='module') def host_tmp_dir(): mkdir_p('/tmp/results') return '/tmp/results' @pytest.fixture(scope='module') def airflow_test(docker_image, dags_path, plugins_path, host_tmp_dir): assert docker_image plugin_definition_filename = 'dagster_plugin.py' plugin_path = os.path.abspath(os.path.join(plugins_path, plugin_definition_filename)) temporary_plugin_path = None try: if os.path.exists(plugin_path): temporary_plugin_file = tempfile.NamedTemporaryFile(delete=False) temporary_plugin_file.close() temporary_plugin_path = temporary_plugin_file.name shutil.copyfile(plugin_path, temporary_plugin_path) shutil.copyfile( script_relative_path(os.path.join('..', 'dagster_airflow', plugin_definition_filename)), plugin_path, ) mkdir_p(os.path.abspath(dags_path)) sys.path.append(os.path.abspath(dags_path)) created_init_py = False init_py_path = os.path.join(os.path.abspath(dags_path), '__init__.py') if not os.path.exists(init_py_path): with open(init_py_path, 'a'): pass created_init_py = True subprocess.check_output(['airflow', 'initdb']) # necromancy; follows airflow.operators.__init__ reload_module(airflow.plugins_manager) for operators_module in airflow.plugins_manager.operators_modules: sys.modules[operators_module.__name__] = operators_module globals()[operators_module._name] = operators_module # Test that we can now actually import the DagsterOperator from airflow.operators.dagster_plugin import DagsterOperator del DagsterOperator yield (docker_image, dags_path, host_tmp_dir) finally: if os.path.exists(plugin_path): os.remove(plugin_path) if temporary_plugin_path is not None: shutil.copyfile(temporary_plugin_path, plugin_path) os.remove(temporary_plugin_path) if created_init_py: os.remove(init_py_path) sys.path = sys.path[:-1] @pytest.fixture(scope='module') def scaffold_dag(airflow_test): docker_image, dags_path, _ = airflow_test pipeline = define_demo_execution_pipeline() env_config = load_yaml_from_path(script_relative_path('test_project/env.yml')) tempdir = tempfile.gettempdir() static_path, editable_path = scaffold_airflow_dag( pipeline=pipeline, env_config=env_config, image=docker_image, output_path=tempdir, dag_kwargs={'default_args': {'start_date': datetime.datetime(1900, 1, 1)}}, ) # Ensure that the scaffolded files parse correctly subprocess.check_output(['python', editable_path]) shutil.copyfile( static_path, os.path.abspath(os.path.join(dags_path, os.path.basename(static_path))) ) shutil.copyfile( editable_path, os.path.abspath(os.path.join(dags_path, os.path.basename(editable_path))) ) os.remove(static_path) os.remove(editable_path) execution_date = datetime.datetime.utcnow().strftime('%Y-%m-%d') pipeline_name = pipeline.name execution_plan = create_execution_plan(pipeline, env_config) yield ( pipeline_name, execution_plan, execution_date, os.path.abspath(os.path.join(dags_path, os.path.basename(static_path))), os.path.abspath(os.path.join(dags_path, os.path.basename(editable_path))), ) os.remove(os.path.abspath(os.path.join(dags_path, os.path.basename(static_path)))) os.remove(os.path.abspath(os.path.join(dags_path, os.path.basename(editable_path)))) try: os.remove( os.path.abspath(os.path.join(dags_path, os.path.basename(static_path)[:-3] + '.pyc')) ) os.remove( os.path.abspath(os.path.join(dags_path, os.path.basename(editable_path)[:-3] + '.pyc')) ) except (FileNotFoundError, OSError): pass
true
true
f716fd7bd67ce55c0a030c87f0370590bbe380cb
223
py
Python
tablas_multiplicar.py
robertogonzalezp/X-Serv-Python-Multiplica
d262184c12217ebdfd3bc8f4a3fccc99496fcaa2
[ "Apache-2.0" ]
null
null
null
tablas_multiplicar.py
robertogonzalezp/X-Serv-Python-Multiplica
d262184c12217ebdfd3bc8f4a3fccc99496fcaa2
[ "Apache-2.0" ]
null
null
null
tablas_multiplicar.py
robertogonzalezp/X-Serv-Python-Multiplica
d262184c12217ebdfd3bc8f4a3fccc99496fcaa2
[ "Apache-2.0" ]
null
null
null
for numero1 in range(1, 11): print('\n', "Tabla del " + str(numero1)) print("--------------") for numero2 in range(1, 11): print(str(numero1) + " por " + str(numero2) + " es " + str(numero1 * numero2))
31.857143
86
0.520179
for numero1 in range(1, 11): print('\n', "Tabla del " + str(numero1)) print("--------------") for numero2 in range(1, 11): print(str(numero1) + " por " + str(numero2) + " es " + str(numero1 * numero2))
true
true
f716fda140000ae4d4422e93bc1747b5025d820d
933
py
Python
aioredis_opentracing/instrument.py
Creativelair/AIORedis-Opentracing
eebf81785052faddaec8c00da74c862bbeeafdf9
[ "BSD-3-Clause" ]
null
null
null
aioredis_opentracing/instrument.py
Creativelair/AIORedis-Opentracing
eebf81785052faddaec8c00da74c862bbeeafdf9
[ "BSD-3-Clause" ]
1
2020-10-09T20:23:40.000Z
2020-10-09T20:23:40.000Z
aioredis_opentracing/instrument.py
Creativelair/AIORedis-Opentracing
eebf81785052faddaec8c00da74c862bbeeafdf9
[ "BSD-3-Clause" ]
null
null
null
import opentracing from signalfx_tracing import utils from wrapt import wrap_function_wrapper from aioredis_opentracing import tracing config = utils.Config( tracer=None, ) def instrument(tracer=None): aioredis = utils.get_module('aioredis') if utils.is_instrumented(aioredis): return tracing.init_tracing(tracer=tracer or config.tracer or opentracing.tracer) def traced_client(__init__, client, args, kwargs): __init__(*args, **kwargs) tracing.trace_client(client) wrap_function_wrapper('aioredis', 'Redis.__init__', traced_client) utils.mark_instrumented(aioredis) def uninstrument(): """Will only prevent new clients from registering tracers.""" aioredis = utils.get_module('aioredis') if not utils.is_instrumented(aioredis): return from aioredis import Redis utils.revert_wrapper(Redis, '__init__') utils.mark_uninstrumented(aioredis)
25.916667
78
0.738478
import opentracing from signalfx_tracing import utils from wrapt import wrap_function_wrapper from aioredis_opentracing import tracing config = utils.Config( tracer=None, ) def instrument(tracer=None): aioredis = utils.get_module('aioredis') if utils.is_instrumented(aioredis): return tracing.init_tracing(tracer=tracer or config.tracer or opentracing.tracer) def traced_client(__init__, client, args, kwargs): __init__(*args, **kwargs) tracing.trace_client(client) wrap_function_wrapper('aioredis', 'Redis.__init__', traced_client) utils.mark_instrumented(aioredis) def uninstrument(): aioredis = utils.get_module('aioredis') if not utils.is_instrumented(aioredis): return from aioredis import Redis utils.revert_wrapper(Redis, '__init__') utils.mark_uninstrumented(aioredis)
true
true
f716fdec7574c7c1759ea315c98291d9a23d5771
2,751
py
Python
tests/di/core/test_instance.py
dlski/python-di
04dcdf58f3cf820e2d2ba5086e4e89822ae1f409
[ "MIT" ]
8
2021-02-05T16:17:31.000Z
2022-03-03T00:01:33.000Z
tests/di/core/test_instance.py
dlski/python-di
04dcdf58f3cf820e2d2ba5086e4e89822ae1f409
[ "MIT" ]
null
null
null
tests/di/core/test_instance.py
dlski/python-di
04dcdf58f3cf820e2d2ba5086e4e89822ae1f409
[ "MIT" ]
null
null
null
import pytest from di.core.compose import ApplicationComposer, ComposedApplication from di.core.element import Element from di.core.injection import InjectionSolver from di.core.instance import ( ApplicationInstanceElementNotFound, RecursiveApplicationInstance, RecursiveApplicationInstanceBuilder, RecursiveProvideContext, ) from di.core.module import ModuleElementConsistencyCheck, ModuleImportSolver from tests.di.core.conftest import AppGenerator @pytest.fixture def composer(): return ApplicationComposer( InjectionSolver(), ModuleImportSolver(), ModuleElementConsistencyCheck(), ) @pytest.fixture def composed( app_generator: AppGenerator, composer: ApplicationComposer ) -> ComposedApplication: return composer.compose(app_generator.valid_app) def _build_instance(composed: ComposedApplication) -> RecursiveApplicationInstance: return RecursiveApplicationInstanceBuilder(composed).build() def test_instance(composed: ComposedApplication, app_generator: AppGenerator): gen = app_generator instance = _build_instance(composed) a1e, a2e = gen.a_elements (b1e,) = gen.b_elements c1e, c2e = gen.c_elements c2 = instance.value_of(c2e) c1 = instance.value_of(c1e) b1 = instance.value_of(b1e) a2 = instance.value_of(a2e) a1 = instance.value_of(a1e) assert c2.c1 is c1 assert c1.a2 is a2 assert c1.b1 is b1 assert a2.a1 is a1 def test_instance_not_found(composed: ComposedApplication, app_generator: AppGenerator): gen = app_generator instance = _build_instance(composed) bce = gen.b_cd_elements[-1] with pytest.raises(ApplicationInstanceElementNotFound): instance.value_of(bce) class _RecursiveProvideContext(RecursiveProvideContext): _call_stack = [] primary_sequence = [] def provide(self, element: Element): if not self._call_stack: self.primary_sequence.append(element) self._call_stack.append(self) try: super().provide(element) finally: self._call_stack.pop() class _RecursiveApplicationInstanceBuilder(RecursiveApplicationInstanceBuilder): def _provide_context(self): return _RecursiveProvideContext(self.app) def test_instance_bootstrap(app_generator: AppGenerator, composer: ApplicationComposer): composed = composer.compose(app_generator.bootstrap_app) _RecursiveApplicationInstanceBuilder(composed).build() expected_sequence = [] for module_step in composed.bootstrap_steps: for step in module_step.steps: expected_sequence.extend(step) produced_sequence = _RecursiveProvideContext.primary_sequence assert expected_sequence == produced_sequence
30.230769
88
0.751
import pytest from di.core.compose import ApplicationComposer, ComposedApplication from di.core.element import Element from di.core.injection import InjectionSolver from di.core.instance import ( ApplicationInstanceElementNotFound, RecursiveApplicationInstance, RecursiveApplicationInstanceBuilder, RecursiveProvideContext, ) from di.core.module import ModuleElementConsistencyCheck, ModuleImportSolver from tests.di.core.conftest import AppGenerator @pytest.fixture def composer(): return ApplicationComposer( InjectionSolver(), ModuleImportSolver(), ModuleElementConsistencyCheck(), ) @pytest.fixture def composed( app_generator: AppGenerator, composer: ApplicationComposer ) -> ComposedApplication: return composer.compose(app_generator.valid_app) def _build_instance(composed: ComposedApplication) -> RecursiveApplicationInstance: return RecursiveApplicationInstanceBuilder(composed).build() def test_instance(composed: ComposedApplication, app_generator: AppGenerator): gen = app_generator instance = _build_instance(composed) a1e, a2e = gen.a_elements (b1e,) = gen.b_elements c1e, c2e = gen.c_elements c2 = instance.value_of(c2e) c1 = instance.value_of(c1e) b1 = instance.value_of(b1e) a2 = instance.value_of(a2e) a1 = instance.value_of(a1e) assert c2.c1 is c1 assert c1.a2 is a2 assert c1.b1 is b1 assert a2.a1 is a1 def test_instance_not_found(composed: ComposedApplication, app_generator: AppGenerator): gen = app_generator instance = _build_instance(composed) bce = gen.b_cd_elements[-1] with pytest.raises(ApplicationInstanceElementNotFound): instance.value_of(bce) class _RecursiveProvideContext(RecursiveProvideContext): _call_stack = [] primary_sequence = [] def provide(self, element: Element): if not self._call_stack: self.primary_sequence.append(element) self._call_stack.append(self) try: super().provide(element) finally: self._call_stack.pop() class _RecursiveApplicationInstanceBuilder(RecursiveApplicationInstanceBuilder): def _provide_context(self): return _RecursiveProvideContext(self.app) def test_instance_bootstrap(app_generator: AppGenerator, composer: ApplicationComposer): composed = composer.compose(app_generator.bootstrap_app) _RecursiveApplicationInstanceBuilder(composed).build() expected_sequence = [] for module_step in composed.bootstrap_steps: for step in module_step.steps: expected_sequence.extend(step) produced_sequence = _RecursiveProvideContext.primary_sequence assert expected_sequence == produced_sequence
true
true
f716fe49497e0fa092b298e9bf377c75b13a12bf
1,247
py
Python
servers/python/coweb/auth/public.py
opencoweb/coweb
7b3a87ee9eda735a859447d404ee16edde1c5671
[ "AFL-2.1" ]
83
2015-01-05T19:02:57.000Z
2021-11-19T02:48:09.000Z
servers/python/coweb/auth/public.py
xuelingxiao/coweb
7b3a87ee9eda735a859447d404ee16edde1c5671
[ "AFL-2.1" ]
3
2015-12-16T13:49:33.000Z
2019-06-17T13:38:50.000Z
servers/python/coweb/auth/public.py
xuelingxiao/coweb
7b3a87ee9eda735a859447d404ee16edde1c5671
[ "AFL-2.1" ]
14
2015-04-29T22:36:53.000Z
2021-11-18T03:24:29.000Z
''' Copyright (c) The Dojo Foundation 2011. All Rights Reserved. Copyright (c) IBM Corporation 2008, 2011. All Rights Reserved. ''' from .base import AuthBase class PublicAuth(AuthBase): cookieName = 'coweb.auth.public.username' _userId = 0 def requires_login(self): '''Does not require login. Usernames automatically generated.''' return False def requires_cookies(self): '''Uses tornado's secure cookies.''' return True def get_current_user(self, handler): ''' Generates a unique userXXX for this server instance and stores it in a secure cookie. ''' username = handler.get_secure_cookie(self.cookieName) if not username: # generate a random username and set it with a very short lifetime username = 'user%03d' % self._userId # yes, this might conflict between server restarts but it's dummy # public auth self._userId += 1 handler.set_secure_cookie(self.cookieName, username, expires_days=1) return username def clear_credentials(self, handler): '''Clears the authentication cookie.''' handler.clear_cookie(self.cookieName)
34.638889
80
0.642342
from .base import AuthBase class PublicAuth(AuthBase): cookieName = 'coweb.auth.public.username' _userId = 0 def requires_login(self): return False def requires_cookies(self): return True def get_current_user(self, handler): username = handler.get_secure_cookie(self.cookieName) if not username: username = 'user%03d' % self._userId # public auth self._userId += 1 handler.set_secure_cookie(self.cookieName, username, expires_days=1) return username def clear_credentials(self, handler): handler.clear_cookie(self.cookieName)
true
true
f716fedc6fe525ef992c71afd6110e49481a4bad
566
py
Python
Python/Tests/TestData/DebuggerProject/EvalPseudoType.py
nanshuiyu/pytools
9f9271fe8cf564b4f94e9456d400f4306ea77c23
[ "Apache-2.0" ]
null
null
null
Python/Tests/TestData/DebuggerProject/EvalPseudoType.py
nanshuiyu/pytools
9f9271fe8cf564b4f94e9456d400f4306ea77c23
[ "Apache-2.0" ]
null
null
null
Python/Tests/TestData/DebuggerProject/EvalPseudoType.py
nanshuiyu/pytools
9f9271fe8cf564b4f94e9456d400f4306ea77c23
[ "Apache-2.0" ]
null
null
null
from ctypes import * class PyObject(Structure): _fields_ = [('ob_refcnt', c_size_t), ('ob_type', py_object)] class PseudoTypeType(object): def __getattribute__(self, name): if name == '__repr__': raise Exception() elif name == '__name__': return 'PseudoType' class PseudoType(object): def __repr__(self): return 'pseudo' PseudoType_ptr = cast(id(PseudoType), POINTER(PyObject)) obj = PseudoType() PseudoType_ptr.contents.ob_type = py_object(PseudoTypeType) print()
25.727273
60
0.625442
from ctypes import * class PyObject(Structure): _fields_ = [('ob_refcnt', c_size_t), ('ob_type', py_object)] class PseudoTypeType(object): def __getattribute__(self, name): if name == '__repr__': raise Exception() elif name == '__name__': return 'PseudoType' class PseudoType(object): def __repr__(self): return 'pseudo' PseudoType_ptr = cast(id(PseudoType), POINTER(PyObject)) obj = PseudoType() PseudoType_ptr.contents.ob_type = py_object(PseudoTypeType) print()
true
true
f716ff229a4800b23522336aaaedc9e85c29ccb7
1,115
py
Python
userbot/modules/__init__.py
caerus19/Userator
ff4e6d2461d096d2e9d4c8eda9fdf09d47319a61
[ "MIT" ]
null
null
null
userbot/modules/__init__.py
caerus19/Userator
ff4e6d2461d096d2e9d4c8eda9fdf09d47319a61
[ "MIT" ]
null
null
null
userbot/modules/__init__.py
caerus19/Userator
ff4e6d2461d096d2e9d4c8eda9fdf09d47319a61
[ "MIT" ]
1
2021-11-12T18:00:49.000Z
2021-11-12T18:00:49.000Z
# U S Σ R Δ T O R / Ümüd """ U S Σ R Δ T O R """ from userbot import LOGS from telethon.tl.types import DocumentAttributeFilename def __list_all_modules(): from os.path import dirname, basename, isfile import glob mod_paths = glob.glob(dirname(__file__) + "/*.py") all_modules = [ basename(f)[:-3] for f in mod_paths if isfile(f) and f.endswith(".py") and not f.endswith("__init__.py") ] return all_modules ALL_MODULES = sorted(__list_all_modules()) LOGS.info("Yüklənəcək modullar: %s", str(ALL_MODULES)) __all__ = ALL_MODULES + ["ALL_MODULES"] async def MEDIACHECK(reply): type = "img" if reply and reply.media: if reply.photo: data = reply.photo elif reply.document: if DocumentAttributeFilename(file_name='AnimatedSticker.tgs') in reply.media.document.attributes: return False if reply.gif or reply.video: type = "vid" if reply.audio or reply.voice: return False data = reply.media.document else: return False else: return False if not data or data is None: return False else: return (data, type)
24.777778
101
0.675336
from userbot import LOGS from telethon.tl.types import DocumentAttributeFilename def __list_all_modules(): from os.path import dirname, basename, isfile import glob mod_paths = glob.glob(dirname(__file__) + "/*.py") all_modules = [ basename(f)[:-3] for f in mod_paths if isfile(f) and f.endswith(".py") and not f.endswith("__init__.py") ] return all_modules ALL_MODULES = sorted(__list_all_modules()) LOGS.info("Yüklənəcək modullar: %s", str(ALL_MODULES)) __all__ = ALL_MODULES + ["ALL_MODULES"] async def MEDIACHECK(reply): type = "img" if reply and reply.media: if reply.photo: data = reply.photo elif reply.document: if DocumentAttributeFilename(file_name='AnimatedSticker.tgs') in reply.media.document.attributes: return False if reply.gif or reply.video: type = "vid" if reply.audio or reply.voice: return False data = reply.media.document else: return False else: return False if not data or data is None: return False else: return (data, type)
true
true
f716ff4ad1f5c83780f6a28d11c0f4b0f33e91fd
513
py
Python
happyml/graphs/viz.py
guiferviz/happyml-py
4252d0cff27461e38da404553772dafbc74f3eaa
[ "BSD-Source-Code" ]
1
2016-08-15T13:27:48.000Z
2016-08-15T13:27:48.000Z
happyml/graphs/viz.py
guiferviz/happyml-py
4252d0cff27461e38da404553772dafbc74f3eaa
[ "BSD-Source-Code" ]
null
null
null
happyml/graphs/viz.py
guiferviz/happyml-py
4252d0cff27461e38da404553772dafbc74f3eaa
[ "BSD-Source-Code" ]
null
null
null
# FIXME: not required dependency. from graphviz import Digraph def graph2dot(x, **kwargs): dot = Digraph(body=["rankdir=LR;"], **kwargs) path = x.get_computation_path() for i in path: if i.is_input: dot.node(str(i.id), i.name, color="green") elif i.is_parameter: dot.node(str(i.id), i.name, color="gold") else: dot.node(str(i.id), i.name) for ii in i.inputs: dot.edge(str(ii.id), str(i.id)) return dot
22.304348
54
0.549708
from graphviz import Digraph def graph2dot(x, **kwargs): dot = Digraph(body=["rankdir=LR;"], **kwargs) path = x.get_computation_path() for i in path: if i.is_input: dot.node(str(i.id), i.name, color="green") elif i.is_parameter: dot.node(str(i.id), i.name, color="gold") else: dot.node(str(i.id), i.name) for ii in i.inputs: dot.edge(str(ii.id), str(i.id)) return dot
true
true
f716ff4c138f17dceedf81771978e6db1119f04a
1,758
py
Python
setup.py
hoogerheide/reductus
fcc78c06900cff89faceadb7b4eed7b87914c0af
[ "Unlicense" ]
1
2021-06-11T19:24:49.000Z
2021-06-11T19:24:49.000Z
setup.py
hoogerheide/reductus
fcc78c06900cff89faceadb7b4eed7b87914c0af
[ "Unlicense" ]
null
null
null
setup.py
hoogerheide/reductus
fcc78c06900cff89faceadb7b4eed7b87914c0af
[ "Unlicense" ]
null
null
null
#!/usr/bin/env python import sys import os from setuptools import setup, find_packages if len(sys.argv) == 1: sys.argv.append('install') if sys.argv[1] == 'test': from subprocess import call sys.exit(call([sys.executable, '-m', 'pytest'] + sys.argv[2:])) # Create the resource file dataflow/git_revision if os.system('"{sys.executable}" dataflow/rev.py'.format(sys=sys)) != 0: print("setup.py failed to build dataflow/git_revision", file=sys.stderr) sys.exit(1) packages = find_packages(exclude=['reflbin']) #sys.dont_write_bytecode = False dist = setup( name='reductus', version='0.1b2', author='Paul Kienzle', author_email='paul.kienzle@nist.gov', url='http://github.com/reductus/reductus', description='Data reduction for neutron scattering', long_description=open('README.rst').read(), classifiers=[ 'Development Status :: 4 - Beta', 'Environment :: Console', 'Intended Audience :: Science/Research', 'License :: Public Domain', 'Operating System :: OS Independent', 'Programming Language :: Python', 'Programming Language :: Python :: 3.7', 'Topic :: Scientific/Engineering', 'Topic :: Scientific/Engineering :: Chemistry', 'Topic :: Scientific/Engineering :: Physics', ], zip_safe=False, packages=packages, include_package_data=True, entry_points = { 'console_scripts': ['reductus=web_gui.run:main'], }, install_requires=[ 'scipy', 'numpy', 'h5py', 'uncertainties', 'docutils', 'wheel', 'pytz', 'msgpack-python', 'flask', ], extras_require={ 'masked_curve_fit': ['numdifftools'], }, tests_require=['pytest'], ) # End of file
29.79661
76
0.631968
import sys import os from setuptools import setup, find_packages if len(sys.argv) == 1: sys.argv.append('install') if sys.argv[1] == 'test': from subprocess import call sys.exit(call([sys.executable, '-m', 'pytest'] + sys.argv[2:])) if os.system('"{sys.executable}" dataflow/rev.py'.format(sys=sys)) != 0: print("setup.py failed to build dataflow/git_revision", file=sys.stderr) sys.exit(1) packages = find_packages(exclude=['reflbin']) dist = setup( name='reductus', version='0.1b2', author='Paul Kienzle', author_email='paul.kienzle@nist.gov', url='http://github.com/reductus/reductus', description='Data reduction for neutron scattering', long_description=open('README.rst').read(), classifiers=[ 'Development Status :: 4 - Beta', 'Environment :: Console', 'Intended Audience :: Science/Research', 'License :: Public Domain', 'Operating System :: OS Independent', 'Programming Language :: Python', 'Programming Language :: Python :: 3.7', 'Topic :: Scientific/Engineering', 'Topic :: Scientific/Engineering :: Chemistry', 'Topic :: Scientific/Engineering :: Physics', ], zip_safe=False, packages=packages, include_package_data=True, entry_points = { 'console_scripts': ['reductus=web_gui.run:main'], }, install_requires=[ 'scipy', 'numpy', 'h5py', 'uncertainties', 'docutils', 'wheel', 'pytz', 'msgpack-python', 'flask', ], extras_require={ 'masked_curve_fit': ['numdifftools'], }, tests_require=['pytest'], )
true
true
f716ff87b1f5b661462123694cbcb3fe6c7ec595
1,524
py
Python
Lesson09/x.py
PacktPublishing/Python-Fundamentals
f24569826b1b7f97e3d54630a34ae61110ca12da
[ "MIT" ]
1
2021-04-23T14:01:56.000Z
2021-04-23T14:01:56.000Z
Lesson09/x.py
PacktPublishing/Python-Fundamentals
f24569826b1b7f97e3d54630a34ae61110ca12da
[ "MIT" ]
null
null
null
Lesson09/x.py
PacktPublishing/Python-Fundamentals
f24569826b1b7f97e3d54630a34ae61110ca12da
[ "MIT" ]
4
2021-06-29T05:57:44.000Z
2021-09-02T10:14:55.000Z
def remove_punctuation(st, case='l'): """ takes in a string and returns a list of words with no punctuation""" punctuation = '!"#$%&\'()*+,-./:;<=>?@[\\]^_`{|}“”‘’~' all_words = st.split() cap_words = [] for c in punctuation: st = st.replace(c, '') if case == 'u': for word in all_words: if word.istitle(): cap_words.append(word) return cap_words if case == 'l': return all_words def frequency_dictionary(words): freq_dict = {} for word in words: freq_dict[word] = words.count(word) return freq_dict def strip_common_words(words): unique_words = [] with open('1000words.txt', 'r') as common_words: common_words = common_words.read() for word in words: if word not in common_words: unique_words.append(word) return unique_words def print_ranked_dictionary(dictionary, min_count=20): rankedList = sorted(dictionary, key=dictionary.get, reverse=True) # ranking the dictionary (print out ranked words) for i in range(0, len(rankedList)): key = rankedList[i] value = dictionary[key] if value > min_count: print(key, ' repeats ', value, ' times') def main(): with open('HarryPotter.txt', 'r') as text: text = text.read() word_list = remove_punctuation(text) #print(word_list) dictionary = frequency_dictionary(word_list) print_ranked_dictionary(dictionary) main()
28.222222
76
0.602362
def remove_punctuation(st, case='l'): punctuation = '!"#$%&\'()*+,-./:;<=>?@[\\]^_`{|}“”‘’~' all_words = st.split() cap_words = [] for c in punctuation: st = st.replace(c, '') if case == 'u': for word in all_words: if word.istitle(): cap_words.append(word) return cap_words if case == 'l': return all_words def frequency_dictionary(words): freq_dict = {} for word in words: freq_dict[word] = words.count(word) return freq_dict def strip_common_words(words): unique_words = [] with open('1000words.txt', 'r') as common_words: common_words = common_words.read() for word in words: if word not in common_words: unique_words.append(word) return unique_words def print_ranked_dictionary(dictionary, min_count=20): rankedList = sorted(dictionary, key=dictionary.get, reverse=True) # ranking the dictionary (print out ranked words) for i in range(0, len(rankedList)): key = rankedList[i] value = dictionary[key] if value > min_count: print(key, ' repeats ', value, ' times') def main(): with open('HarryPotter.txt', 'r') as text: text = text.read() word_list = remove_punctuation(text) #print(word_list) dictionary = frequency_dictionary(word_list) print_ranked_dictionary(dictionary) main()
true
true
f716fffcedc3cbaba6d963cd4a7e2061ef83cc34
4,109
py
Python
mdeepctr/models/xdeepfm.py
TS-SE-GROUP/icme2019
7eefdb7de6a7ff3bec1721fafb822d80d80dbba3
[ "MIT" ]
78
2019-02-21T12:44:11.000Z
2022-03-30T11:42:33.000Z
mdeepctr/models/xdeepfm.py
rightnowandholdon/icme2019
fe9b31db7bf19b08d5e5d41a259f0a297eb21766
[ "MIT" ]
6
2019-04-11T13:14:46.000Z
2021-05-19T14:36:07.000Z
mdeepctr/models/xdeepfm.py
rightnowandholdon/icme2019
fe9b31db7bf19b08d5e5d41a259f0a297eb21766
[ "MIT" ]
22
2019-02-21T02:51:54.000Z
2021-12-10T02:04:28.000Z
# -*- coding:utf-8 -*- """ Author: Weichen Shen,wcshen1994@163.com Reference: [1] Lian J, Zhou X, Zhang F, et al. xDeepFM: Combining Explicit and Implicit Feature Interactions for Recommender Systems[J]. arXiv preprint arXiv:1803.05170, 2018.(https://arxiv.org/pdf/1803.05170.pdf) """ import tensorflow as tf from ..input_embedding import preprocess_input_embedding from ..layers.core import PredictionLayer, MLP from ..layers.interaction import CIN from ..utils import check_feature_config_dict from ..layers.utils import concat_fun def xDeepFM(feature_dim_dict, embedding_size=8, hidden_size=(256, 256), cin_layer_size=(128, 128,), cin_split_half=True, cin_activation='relu', l2_reg_linear=0.00001, l2_reg_embedding=0.00001, l2_reg_deep=0, init_std=0.0001, seed=1024, keep_prob=1, activation='relu', final_activation='sigmoid', use_bn=False, output_dim=1,): """Instantiates the xDeepFM architecture. :param feature_dim_dict: dict,to indicate sparse field and dense field like {'sparse':{'field_1':4,'field_2':3,'field_3':2},'dense':['field_4','field_5']} :param embedding_size: positive integer,sparse feature embedding_size :param hidden_size: list,list of positive integer or empty list, the layer number and units in each layer of deep net :param cin_layer_size: list,list of positive integer or empty list, the feature maps in each hidden layer of Compressed Interaction Network :param cin_split_half: bool.if set to True, half of the feature maps in each hidden will connect to output unit :param cin_activation: activation function used on feature maps :param l2_reg_linear: float. L2 regularizer strength applied to linear part :param l2_reg_embedding: L2 regularizer strength applied to embedding vector :param l2_reg_deep: L2 regularizer strength applied to deep net :param init_std: float,to use as the initialize std of embedding vector :param seed: integer ,to use as random seed. :param keep_prob: float in (0,1]. keep_prob used in deep net :param activation: Activation function to use in deep net :param final_activation: str,output activation,usually ``'sigmoid'`` or ``'linear'`` :param use_bn: bool. Whether use BatchNormalization before activation or not.in deep net :return: A Keras model instance. """ check_feature_config_dict(feature_dim_dict) deep_emb_list, linear_logit, inputs_list = preprocess_input_embedding(feature_dim_dict, embedding_size, l2_reg_embedding, l2_reg_linear, init_std, seed, True) fm_input = concat_fun(deep_emb_list, axis=1) if len(cin_layer_size) > 0: exFM_out = CIN(cin_layer_size, cin_activation, cin_split_half, seed)(fm_input) exFM_logit = tf.keras.layers.Dense(1, activation=None,)(exFM_out) deep_input = tf.keras.layers.Flatten()(fm_input) output=[] for _ in range(output_dim): deep_out = MLP(hidden_size, activation, l2_reg_deep, keep_prob, use_bn, seed)(deep_input) deep_logit = tf.keras.layers.Dense( 1, use_bias=False, activation=None)(deep_out) if len(hidden_size) == 0 and len(cin_layer_size) == 0: # only linear final_logit = linear_logit elif len(hidden_size) == 0 and len(cin_layer_size) > 0: # linear + CIN final_logit = tf.keras.layers.add([linear_logit, exFM_logit]) elif len(hidden_size) > 0 and len(cin_layer_size) == 0: # linear + Deep final_logit = tf.keras.layers.add([linear_logit, deep_logit]) elif len(hidden_size) > 0 and len(cin_layer_size) > 0: # linear + CIN + Deep final_logit = tf.keras.layers.add( [linear_logit, deep_logit, exFM_logit]) else: raise NotImplementedError output.append(PredictionLayer(final_activation)(final_logit)) model = tf.keras.models.Model(inputs=inputs_list, outputs=output) return model
54.065789
325
0.690436
import tensorflow as tf from ..input_embedding import preprocess_input_embedding from ..layers.core import PredictionLayer, MLP from ..layers.interaction import CIN from ..utils import check_feature_config_dict from ..layers.utils import concat_fun def xDeepFM(feature_dim_dict, embedding_size=8, hidden_size=(256, 256), cin_layer_size=(128, 128,), cin_split_half=True, cin_activation='relu', l2_reg_linear=0.00001, l2_reg_embedding=0.00001, l2_reg_deep=0, init_std=0.0001, seed=1024, keep_prob=1, activation='relu', final_activation='sigmoid', use_bn=False, output_dim=1,): check_feature_config_dict(feature_dim_dict) deep_emb_list, linear_logit, inputs_list = preprocess_input_embedding(feature_dim_dict, embedding_size, l2_reg_embedding, l2_reg_linear, init_std, seed, True) fm_input = concat_fun(deep_emb_list, axis=1) if len(cin_layer_size) > 0: exFM_out = CIN(cin_layer_size, cin_activation, cin_split_half, seed)(fm_input) exFM_logit = tf.keras.layers.Dense(1, activation=None,)(exFM_out) deep_input = tf.keras.layers.Flatten()(fm_input) output=[] for _ in range(output_dim): deep_out = MLP(hidden_size, activation, l2_reg_deep, keep_prob, use_bn, seed)(deep_input) deep_logit = tf.keras.layers.Dense( 1, use_bias=False, activation=None)(deep_out) if len(hidden_size) == 0 and len(cin_layer_size) == 0: final_logit = linear_logit elif len(hidden_size) == 0 and len(cin_layer_size) > 0: final_logit = tf.keras.layers.add([linear_logit, exFM_logit]) elif len(hidden_size) > 0 and len(cin_layer_size) == 0: final_logit = tf.keras.layers.add([linear_logit, deep_logit]) elif len(hidden_size) > 0 and len(cin_layer_size) > 0: final_logit = tf.keras.layers.add( [linear_logit, deep_logit, exFM_logit]) else: raise NotImplementedError output.append(PredictionLayer(final_activation)(final_logit)) model = tf.keras.models.Model(inputs=inputs_list, outputs=output) return model
true
true