blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 69 | license_type stringclasses 2 values | repo_name stringlengths 5 118 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringlengths 4 63 | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 2.91k 686M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 23 values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 220 values | src_encoding stringclasses 30 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 2 10.3M | extension stringclasses 257 values | content stringlengths 2 10.3M | authors listlengths 1 1 | author_id stringlengths 0 212 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
bf896d27d0957d9c1b5952ca011f6569797647a3 | 7c1c1f156299d8da8135ee41d8076e9ea38dce6a | /backend/manage.py | 62f3a31db53e84f0bd4e130f6618874c6b84ec9c | [] | no_license | crowdbotics-apps/spell-15623 | 479832f65626f5963ec837285c13ba2acc85e64d | 3df3d7fbf2048b8cd0b4eae4cd0b89c6f72bc0c2 | refs/heads/master | 2023-02-06T06:32:29.867764 | 2020-04-09T01:30:03 | 2020-04-09T01:30:03 | 254,241,135 | 0 | 0 | null | 2023-01-24T03:34:31 | 2020-04-09T01:29:09 | JavaScript | UTF-8 | Python | false | false | 631 | py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'spell_15623.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| [
"team@crowdbotics.com"
] | team@crowdbotics.com |
aff89f0b2bd9f4a3b58372e9c62cf95ee59439a6 | c7980ab081ce890924accc0459509bde243159c6 | /src/tests/views.py | e67ff607d71291339e84a37f48ee475736abc1af | [] | no_license | Aliflail/MiniProject | 7226e1ceb7e36c0432b6c00e2632b6d6fb8634f9 | 722a3595f5885b862f55bdd540d630abb142d308 | refs/heads/master | 2021-01-09T06:23:54.525514 | 2017-04-06T03:38:02 | 2017-04-06T03:38:02 | 80,981,833 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 8,573 | py | from django.shortcuts import render,redirect,get_object_or_404,HttpResponseRedirect,HttpResponse
from django.views import View
from .models import Apt_Test,Testscore,Answers,Apt_Qns,Correct
from django.urls import reverse
from django.contrib.auth import get_user_model
from .forms import TestForm,QuestionForm,AnswerForm,checkedAnswerform
from django.http import HttpResponseNotAllowed
from django.contrib import messages
from datetime import timedelta
from accounts import models
from oncomp import models as omodels
from django.http import JsonResponse
user=get_user_model()
class testpage(View):
template_name = "test.html"
def get(self, request, test_id):
if not request.user.is_authenticated:
return redirect(reverse("accounts:index"))
test = get_object_or_404(Apt_Test, pk=test_id)
if not (Testscore.objects.filter(user=request.user,test=test).exists()) and test.apt_qns_set.count()>0 :
score = Testscore.objects.create(user=request.user, test=test)
request.session['Testscore_question']=1
if test.apt_qns_set.filter(id=request.session['Testscore_question']).exists():
q = test.apt_qns_set.get(id=request.session['Testscore_question'])
else:
while (request.session['Testscore_question'] <= test.apt_qns_set.count() and not (test.apt_qns_set.filter(id=request.session['Testscore_question']).exists())):
request.session['Testscore_question'] +=1
q = test.apt_qns_set.get(id=request.session['Testscore_question'])
score.question=request.session['Testscore_question']
score.itime = test.time
context = {
"q": q,
"atest": score,
"test_id": test_id
}
return render(request, self.template_name, context)
elif request.session.has_key('Testscore_question') and test.apt_qns_set.filter(pk=request.session['Testscore_question']).exists():
score = Testscore.objects.get(user=request.user, test=test)
q = test.apt_qns_set.get(id=request.session['Testscore_question'])
context = {
"q": q,
"atest":score,
"test_id":test_id
}
return render(request,self.template_name,context)
else:
return HttpResponseRedirect(reverse('oncomp:ctest',args=(test_id)))
def post(self, request, test_id):
test = get_object_or_404(Apt_Test, pk=test_id)
if Testscore.objects.filter(user=request.user).exists():
score = Testscore.objects.get(user=request.user, test=test)
else:
score = Testscore.objects.create(user=request.user, test=test)
try:
q=test.apt_qns_set.get(pk=request.session['Testscore_question'])
print request.POST['time']
score.itime=timedelta(seconds=int(request.POST['time']))
selectedchoice = q.answers_set.get(pk=request.POST['choice'])
except(KeyError, Answers.DoesNotExist):
context = {
"t": test,
"error": "you didnt select a choice"
}
return render(request, self.template_name, context)
else:
if selectedchoice.correct_set.all().exists():
score.score += 1
request.session['Testscore_question'] += 1
if test.apt_qns_set.filter(id=request.session['Testscore_question']).exists():
q = test.apt_qns_set.get(id=request.session['Testscore_question'])
else:
while (request.session['Testscore_question'] <= test.apt_qns_set.count() and not (test.apt_qns_set.filter(id=request.session['Testscore_question']).exists())):
request.session['Testscore_question'] += 1
score.save()
return HttpResponseRedirect(reverse('test', args=(test_id)))
else:
request.session['Testscore_question'] += 1
if test.apt_qns_set.filter(id=request.session['Testscore_question']).exists():
q = test.apt_qns_set.get(id=request.session['Testscore_question'])
else:
while (request.session['Testscore_question'] <= test.apt_qns_set.count() and not (test.apt_qns_set.filter(id=request.session['Testscore_question']).exists())):
request.session['Testscore_question'] += 1
score.save()
return HttpResponseRedirect(reverse('test', args=(test_id)))
class resultpage(View):
template_name="results.html"
def get(self,request,test_id):
if not request.user.is_authenticated:
return redirect(reverse("accounts:index"))
test = get_object_or_404(Apt_Test, pk=test_id)
score = get_object_or_404(Testscore, test=test,user=request.user)
context={
"score":score,
}
return render(request,self.template_name,context)
class createtest(View):
createtestt = "createtest.html"
def get(self, request):
if not request.user.is_authenticated:
return redirect(reverse("accounts:index"))
return render(request, self.createtestt, {"tform": TestForm(),
"profile":models.Profile.objects.get(user=request.user),
"createlink":"active"})
def post(self, request):
t = TestForm(request.POST)
if t.is_valid():
object = t.save()
request.session['Apt_Test_id'] = object.id
messages.success(request, ' test created ')
return redirect(reverse('createquestion'))
class createquestion(View):
createquestion = "createquestions.html"
def get(self,request,*args,**kwargs):
if not request.user.is_authenticated:
return redirect(reverse("accounts:index"))
q = QuestionForm({"test_id":request.session['Apt_Test_id']})
return render(request,self.createquestion,{"qform":q})
def post(self,request):
q=QuestionForm(request.POST)
if q.is_valid():
t=q.save()
request.session['Apt_Qns_id']= t.id
messages.success(request, 'answer created ')
return redirect(reverse('createanswer'))
return render(request, self.createquestion, {"qform": q})
class createanswer(View):
createanswer = "createanswers.html"
def get(self,request,*args,**kwargs):
if not request.user.is_authenticated:
return redirect(reverse("accounts:index"))
a=checkedAnswerform({"qn_id":request.session['Apt_Qns_id']})
return render(request,self.createanswer,{"aform":a})
def post(self,request):
a = checkedAnswerform(request.POST)
if a.is_valid():
if request.POST.get('islast'):
t = a.save()
if request.POST.get('iscorrect'):
q=Apt_Qns.objects.get(id=request.session['Apt_Qns_id'])
Correct.objects.create(ans_id=t, qn_id=q)
return redirect(reverse('accounts:home'))
else:
if request.POST.get('nextq'):
t = a.save()
if request.POST.get('iscorrect'):
q = Apt_Qns.objects.get(id=request.session['Apt_Qns_id'])
Correct.objects.create(ans_id=t, qn_id=q)
return redirect(reverse('createquestion'))
else:
t = a.save()
if request.POST.get('iscorrect'):
q = Apt_Qns.objects.get(id=request.session['Apt_Qns_id'])
Correct.objects.create(ans_id=t, qn_id=q)
return redirect(reverse('createanswer'))
else:
return redirect(reverse('createanswer'))
def testexpire(request):
if not request.is_ajax() or not request.method == 'POST':
return HttpResponseNotAllowed(['POST'])
request.session['Testscore_question'] = 0
return HttpResponse('Sorry expired')
def updatetime(request):
if not request.is_ajax() or not request.method == 'POST':
return HttpResponseNotAllowed(['POST'])
test = get_object_or_404(Apt_Test, pk=request.POST["test_id"])
score = Testscore.objects.get(user=request.user, test=test)
score.itime = timedelta(seconds=int(request.POST["timer"]))
score.save()
return HttpResponse("ok") | [
"albinpaul.ks@gmail.com"
] | albinpaul.ks@gmail.com |
9a98493bd0082a21a5f6dda7254b3c58a8d15f7b | 560c18c656edfbb423d306366a6b036b3876dc56 | /day4/6-lambda_dict.py | ed681e049c8d29d9fb139c1a637ddf3e3db4e933 | [] | no_license | Mahiuha/skaehub-assignment | e5f0ff59a40f27e6d47e9a4c4ec0023f41b63a08 | e20b6ec68a75a7b66bf0ce7a0358ea869164eca8 | refs/heads/main | 2023-06-15T18:13:46.391586 | 2021-06-30T20:47:28 | 2021-06-30T20:47:28 | 379,258,402 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 175 | py | #a simple dictionary
my_dict = {2: 10, 1: 2, -3: 1234}
#sorts my_dict using lambda
sorted_dict = dict(sorted(my_dict.items(), key=lambda item: item[0]))
print(sorted_dict)
| [
"tiasho.mahiuha@gmail.com"
] | tiasho.mahiuha@gmail.com |
7b0357e1b2efeb828faeaa9ec80719e834364a29 | cb96922c78549f54bd27966151e0e696fe16b82a | /python/Flatten.py | fe0b9b41b253c482d868a0f4286fa01e6721884d | [] | no_license | erickhouse/practice | f86d006c13620f330cd9281f7e0690fa04461bc9 | 7734a30103d1eaa2dd2aa96520bc6265ae09e21e | refs/heads/master | 2021-06-02T06:24:41.173475 | 2020-08-29T20:44:56 | 2020-08-29T20:44:56 | 76,145,833 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 718 | py | # {
# a: 1,
# b: 2,
# c: {
# d: 4,
# e: {
# g: 10,
# h: 12
# }
# }
# }
#
# {
# a: 1,
# b: 2,
# c.d: 4,
# c.e.g: 10,
# c.e.h: 12
# }
nested = { 'a': 1,'b': 2,'c': {'d': 4,'e': {'g': 10,'h': 12} } }
def flatten(nested):
result = {}
while(nested):
for key,val in nested.items():
if(type(val) is int):
nested.pop(key)
result[key] = val
else:
for child_key, child_val in val.items():
nested[key + '.' + child_key] = child_val
nested.pop(key)
return result
print(flatten(nested))
def test(cool):
for c in cool:
if c:
print(c)
| [
"erickhouse01@gmail.com"
] | erickhouse01@gmail.com |
5f839e6297a28bbe1d2fc16fae3eb3409d2e3673 | fa582c5773c9fb68d7b2fc683ee890ae286b8f14 | /PyTorch/SpeechSynthesis/Tacotron2/tacotron2/model_prosody.py | b27cd358b454e55b4e1f1b323dc7a91d88b35e6b | [
"BSD-3-Clause"
] | permissive | Hweemyoung/DeepLearningExamples | eadd38250c8568d6e9040ace9e1401fe75b720b0 | 6a3bc2e2e02928c5bda874cbd664a1033db75b76 | refs/heads/master | 2020-08-07T08:03:46.259949 | 2019-10-20T08:39:59 | 2019-10-20T08:39:59 | 213,364,586 | 0 | 0 | null | 2019-10-07T11:23:22 | 2019-10-07T11:23:21 | null | UTF-8 | Python | false | false | 4,392 | py | from math import sqrt
import torch
from torch.autograd import Variable
from torch import nn
from torch.nn import functional as F
from torch.nn.modules import MultiheadAttention
import sys
from os.path import abspath, dirname
# enabling modules discovery from global entrypoint
sys.path.append(abspath(dirname(__file__)+'/../'))
from common.layers_prosody import *
from common.utils import to_gpu, get_mask_from_lengths
class ScaledDotProductAttention(nn.Module):
def __init__(self, scale_scalar):
super(ScaledDotProductAttention, self).__init__()
self.MatMul = MatMul()
self.Scale = Scale(scale_scalar)
self.Mask = Mask()
self.mask_valid = True
def mask_on(self):
self.mask_valid = True
def mask_off(self):
self.mask_valid = False
def forward(self, Q, K, V):
'''
:param Q: 3-d Tensor
size([batch_size, dim_Q, T_Q])
:param K: 3-d Tensor
size([batch_size, dim_K, T_K])
:param V: 3-d Tensor
size([batch_size, dim_V, T_V])
:return:
'''
x = self.MatMul(Q, K)
x = self.Scale(x)
x = self.Mask(x) if self.mask_valid == True
x = torch.softmax(x, dim=2) # x.size([batch_size, T_Q, T_K])
x = self.MatMul(x, V.transpose(1, 2)) # x.size([batch_size, T_Q, d_v])
class MultiheadAttention(nn.Module):
def __init__(self, num_heads):
super(MultiheadAttention, self).__init__()
class StyleAttention(nn.Module):
def __init__(self,
mode='multihead',
modes_list=['content_based', 'dot_product', 'location_based', 'multihead']
):
super(StyleAttention, self).__init__()
if mode not in modes_list:
raise ValueError('Invalid style sttention mode selected.')
if mode == 'multihead':
return
class StyleTokenLayer(nn.Module):
def __init__(self, dim_style_embedding, num_tokens,):
super(StyleTokenLayer, self).__init__()
# initialize GSTs with randomly initialized embeddings
self.GlobalStyleTokens = torch.randn([dim_style_embedding, num_tokens])
self.attention =
def forward(self, *input):
class SpeakerEmbeddingLookup:
def __init__(self, speaker_embeddings):
'''
:param speaker_embeddings: 2-d Tensor.
Given speaker embedding. size([dim_speaker_embedding, total number of speakers])
'''
self.speaker_embeddings = speaker_embeddings
def __call__(self, speaker_id):
'''
Look up speaker embedding corresponding to given speaker id.
:param speaker_id: int
# of speaker id.
:return: 1-d Tensor
size([dim_speaker_embedding])
'''
return self.speaker_embeddings[:, speaker_id]
class ReferenceEncoder(nn.Module):
def __init__(self, dim_ref, len_ref, in_sizes=(1, 32, 32, 64, 64, 128), out_sizes=(32, 32, 64, 64, 128, 128), rnn_mode='GRU', rnn_units=128, dim_prosody=128):
super(ReferenceEncoder, self).__init__()
# 6-Layer Strided Conv2D w/ BatchNorm
assert(len(in_sizes) == len(out_sizes))
self.layers = nn.ModuleList()
layers_per_block = 3
for (in_size, out_size) in zip(in_sizes, out_sizes):
self.layers.append(
Conv2DNorm(in_size, out_size)
)
self.layers.append(
nn.ReLU()
)
self.layers.append(
nn.BatchNorm2d(num_features=out_size)
)
# 128-unit GRU
self.rnn = nn.RNNBase(mode=rnn_mode, input_size=out_sizes[-1] * int(dim_r / 2**(len(self.layers) / layers_per_block)),hidden_size=rnn_units)
self.Linear = LinearNorm(in_dim=rnn_units, out_dim=dim_prosody)
def forward(self, x):
print(x.size())
for layer in self.layers:
x = layer(x)
print(x.size())
x = x.flatten(1, 2)
print(x.size())
x = x.transpose(1, 2)
print(x.size())
x, _ = self.rnn(x)
print(x.size())
x = self.Linear(x)
# get final output
x = x[:, -1, :]
print(x.size())
print(x)
x = torch.tanh(x)
return x
dim_r = 128
L_r = 256
refenc = ReferenceEncoder(dim_r,L_r)
refinput = torch.rand(size=[10, 1, dim_r, L_r])
refenc(refinput) | [
"hweemyoung@gmail.com"
] | hweemyoung@gmail.com |
122d9d89d8b4a635610d0e0ab630a9107009ec01 | a349e493472777973532a92d82c32828b74ab2cf | /posts/views.py | d0cbfb3c66dfff7904e009793310cef8b5cdb232 | [] | no_license | stephen-lakes/blog-api | 57daa7609936b73b2988a44670d2bc3bd1d42ddc | 32f3546c98962849989f1940ea9494b8892510fd | refs/heads/master | 2023-07-09T04:01:00.849999 | 2021-08-11T17:31:53 | 2021-08-11T17:31:53 | 391,179,531 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 642 | py | #from django.shortcuts import render
from django.contrib.auth import get_user_model
from django.db.models import query
from rest_framework import viewsets
from .models import Post
from .permissions import IsAuthorOrReadOnly # new
from .serializers import PostSerializer, UserSerializer
class PostViewSet(viewsets.ModelViewSet):
permission_classes = (IsAuthorOrReadOnly,)
queryset = Post.objects.all()
serializer_class = PostSerializer
class UserViewSet(viewsets.ModelViewSet):
permission_classes = (IsAuthorOrReadOnly,) # new
queryset = Post.objects.all()
serializer_class = PostSerializer
| [
"oluyomiolamilekan99@gmail.com"
] | oluyomiolamilekan99@gmail.com |
24dfac46657f278b609024a9bfb666724ab17f09 | e5c9a513779713339d27a3d477475c075d3e611e | /album_cover_flipbook.py | 3728291d81760995f935cbeaba99abd80cae2c67 | [] | no_license | innes213/AlbumFlipBook | 3d6a182c40348999dd65034f482b38ddedaf1325 | 9c71ff4c922964ed8186caada60fcd31b5c2ee85 | refs/heads/master | 2020-04-19T02:54:36.335090 | 2019-01-28T07:24:56 | 2019-01-28T07:24:56 | 167,918,423 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,534 | py | '''
Simple script that grabs your Discogs collection,
sorts and downloads the images then compiles them
into a video using ffmpeg
'''
from math import floor, log10
import os
import urllib3
import discogs_client
USER_TOKEN = '<<<DISCOGS USER TOKEN>>>'
UA_STRING = '<<<MEADINGFUL USER AGENT STRING>>>'
def curl_urls(urls):
num_digits = floor(log10(len(urls))) + 1
if not os.path.exists('./images'):
os.makedirs('images')
for count in range(1, len(urls) + 1):
numstr = str(count)
while len(numstr) < num_digits:
numstr = '0' + numstr
filename = './images/image-%s.jpg' % numstr
os.system("curl -s '%s' -o %s" % (urls[count-1], filename))
print('Finished writing %s' % filename)
# This is the "right" way but the urls are funky and curl works better
# Writign the stream to a file directly might work better but I doubt it.
def download_images(urls):
num_digits = floor(log10(len(urls))) + 1
if not os.path.exists('./images'):
os.makedirs('images')
for count in range(1, len(urls)):
numstr = str(count)
while len(numstr) < num_digits:
numstr = '0' + numstr
filename = './images/image-%s.jpg' % numstr
f = open(filename, 'wb')
connection_pool = urllib3.PoolManager()
res = connection_pool.request('GET', urls[count-1])
f.write(res.data)
f.close()
res.release_conn()
print('Finished writing %s' % filename)
if __name__ == '__main__':
print("Fetching collection data")
d = discogs_client.Client(UA_STRING, user_token=USER_TOKEN)
me = d.identity()
releases = me.collection_folders[0].releases
album_data = [d.data['basic_information'] for d in releases]
print("Sorting collection by artist")
for a in album_data:
a['artists'][0]['name'] = a['artists'][0]['name'].replace('The ','').replace('A ', '').lower()
album_data = sorted(album_data, key = lambda k: k['artists'][0]['name'])
cover_art = [d['cover_image'] for d in album_data]
print("Downloading images for %d albums" % len(cover_art))
curl_urls(cover_art)
# Create a manifest file (doesn't seem to work with urls)
# f = open("input.txt", "w")
# for url in cover_art:
# f.write("file %s\n" % url)
# f.write("duration 2\n")
# f.write("file %s\n" % cover_art[-1])
# f.close
# ffmpeg is pretty flaky. -r 5 works but -r 3 and 4 don't, even when spec'ing
# framerate, filets, etc..
# Create a slide show showing 5 images per second
command_str = "ffmpeg -r 5 -i ./images/image-%03d.jpg -c:v mpeg4 out.mp4"
os.system(command_str)
| [
"innes213@yahoo.com"
] | innes213@yahoo.com |
9340ea406a2992c1528ff2265f2414cfc823cd00 | 80fcadb5d843ce5aaffe91b6177070ae5530c7db | /website/website/urls.py | c04eab4bf273713c9fadb246b4cfd327292954e0 | [] | no_license | g-prog/second-django-project-website | 9496f2b9adfb9a769076391c58c63b3fdcdd7b91 | 0156871d1ae5a5f58d7cc63cca5e1d51dad15256 | refs/heads/master | 2022-11-17T19:16:23.581826 | 2020-07-17T14:16:50 | 2020-07-17T14:16:50 | 280,439,975 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 990 | py | """website URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path,include
from django.conf.urls.static import static
from django.conf import settings
urlpatterns = [
path('admin/', admin.site.urls),
path('', include('newsapp.urls')),
path('', include('accounts.urls'))
]
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| [
"alaobukky2@gmail.com"
] | alaobukky2@gmail.com |
305da2da6f4b22e89e4160f93e3e470090d20926 | d78dfc5089717fc242bbd7097f507d811abb4260 | /USA/script.icechannel.Usefile.settings/default.py | bdf16309ca6cef9b0506ee0fee1844dc07dd00bb | [] | no_license | tustxk/AddOnRepo | 995b980a9ec737e2c25bed423fc83f710c697e40 | 6b86a06cb37e6e10b4119584dd7311ebc2318e54 | refs/heads/master | 2022-10-08T21:34:34.632346 | 2016-10-28T09:48:01 | 2016-10-28T09:48:01 | 70,684,775 | 1 | 1 | null | 2022-10-01T16:27:13 | 2016-10-12T09:31:16 | Python | UTF-8 | Python | false | false | 163 | py | addon_id="script.icechannel.Usefile.settings"
addon_name="iStream - Usefile - Settings"
import xbmcaddon
addon = xbmcaddon.Addon(id=addon_id)
addon.openSettings()
| [
"ke.xiao@netxeon.com"
] | ke.xiao@netxeon.com |
e68399de2cbde8fb459d31a5b0f26802eedc02c9 | eb047689833040ac6a46a8495b8c133d22d6caeb | /mainapp/migrations/0005_volunteer_about.py | 375f711dcb8646677a15c436b7fd2a63999b240c | [] | no_license | VadzimIlyukevich/Kitty-brotherhood | f203a973ad80f7e6375336a9b75b1a7ad1ac7d29 | e22991e9d7761c77423cf9f141699eecef15224c | refs/heads/master | 2023-05-05T21:08:21.044061 | 2021-05-19T13:56:23 | 2021-05-19T13:56:23 | 350,036,692 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 491 | py | # Generated by Django 3.1.7 on 2021-03-07 15:26
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('mainapp', '0004_auto_20210307_1729'),
]
operations = [
migrations.AddField(
model_name='volunteer',
name='about',
field=models.CharField(max_length=255, verbose_name='Коммантарий'),
preserve_default=False,
),
]
| [
"mr.vadikru@mail.ru"
] | mr.vadikru@mail.ru |
5c83cbbae4b95517c03a9be67385a5ebb1f0cdd1 | aaf894645141fec4f75a857d7cd0f742919ec47e | /Q2/Q2_2.py | eede438b699e1df3ea3cf969b52382521a8b29e3 | [] | no_license | hyk10/HPC | b04488117d6c6e93c214f1edafae8d9bcc3e7ba0 | abb59f731dd665cdd4ef796f0820cb7f32680f7b | refs/heads/master | 2021-01-10T10:14:09.643011 | 2016-03-27T20:22:04 | 2016-03-27T20:22:04 | 53,947,195 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,877 | py | # -*- coding: utf-8 -*-
"""
Created on Sun Mar 27 14:28:34 2016
@author: hyk10
"""
import matplotlib.pyplot as plt
import random
import math
import subprocess
import yaml
print(1)
L_d = '1.0'
dt = []
dx = []
rmse = []
#compiling c++ code
for i in range(int(math.ceil(random.uniform(1.0, 1000.0)))):
#generating random arguments but fixing L for convenience
N_x_i = str(int(math.ceil(random.uniform(10.0, 50.0))))
T_d = str(math.ceil(random.uniform(5.0, 50.0)))
N_t_d = str(math.ceil(random.uniform(50.0, 10000.0)))
alpha_d = str(math.ceil(random.uniform(1.0, 5.0)))
#running c++ code with random numbers
args = ['./a.out', L_d , N_x_i, T_d, N_t_d, alpha_d]
subprocess.call (args, shell=True)
#importing dt,dx,rmse from c++ output txt file
text_file = open("dtdx.txt", "r")
varDxDt = text_file.read().split(',')
text_file.close()
#changing string to float format
dt.append(yaml.load(varDxDt[0]))
dx.append(yaml.load(varDxDt[1]))
rmse.append(yaml.load(varDxDt[2]))
#plotting RMSE v.s dt and dx
plt.scatter(dx,rmse)
plt.title(r'RMSE v.s dx')
plt.xlabel(r'dx, unit length')
plt.ylabel(r'Root Mean Square Error')
plt.show()
plt.scatter(dt,rmse)
plt.title(r'RMSE v.s dt')
plt.xlabel(r'dt(sec)')
plt.ylabel(r'Root Mean Square Error')
plt.show()
#p = subprocess.Popen('./a.out', stdin=subprocess.PIPE)
#p.communicate(os.linesep.join([L_d, N_x_i, T_d, N_t_d, alpha_d]))
# ERROR occurs as the expected type is bytes not string
# however, when switched to ints and converted to bytes, cannot use p.communicate() due to the fact that it can only be called once
#using stdin.write does not behave as desired either, as it does not input the values to the program invoked - p
#p.stdin.write(bytes(L_d))
#p.stdin.write(bytes(N_x_i))
#p.stdin.write(bytes(T_d))
#p.stdin.write(bytes(N_t_d))
#p.stdin.write(bytes(alpha_d)) | [
"hong.kim10@imperial.ac.uk"
] | hong.kim10@imperial.ac.uk |
ac6f0205b3ef4340feb2cca61438f21573f250fa | b8fa117bc2f7f4707ca0e0d77c5dca34481a0f7a | /tests/test_looper.py | 71cffb1a22e8ba578e192a7afbfb83573f490b07 | [
"MIT"
] | permissive | tds333/tempita-lite | 0e8f9bbadfc82ad89cbcb901136ecf4a39881981 | 1f7188a7e11031d972bdc9b02e43c297c12a189d | refs/heads/master | 2021-01-10T02:51:31.505001 | 2016-01-17T14:34:18 | 2016-01-17T14:34:18 | 49,818,840 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 441 | py | # -*- coding: utf-8 -*-
from pytest import raises
from tempita_lite import *
def test_looper():
seq = ['apple', 'asparagus', 'Banana', 'orange']
result = [(1, 'apple'), (2, 'asparagus'), (3, 'Banana'), (4, 'orange')]
for loop, item in looper(seq):
if item == 'apple':
assert loop.first
elif item == 'orange':
assert loop.last
assert result[loop.number-1] == (loop.number, item)
| [
"tds333@users.noreply.github.com"
] | tds333@users.noreply.github.com |
16f5a85d699528ebd3ae47296de7a318a8c7ec12 | 6118416057aac5ad2c4d2cf2bbe020f653d2b6d6 | /ProxyPool/proxypool/crawler.py | d085f4e5135275508c10d78062eef98a26b58af8 | [] | no_license | CarySun/Question_Answering_for_Recruitment_Based_on_Knowledge_Graph | 19a065df25bb889356d305aeb834a88d5efd3df6 | bbcf6cff74d62bc5676bfec716107003455c9bff | refs/heads/master | 2020-07-04T10:11:28.727729 | 2019-09-01T05:32:33 | 2019-09-01T05:32:33 | 202,252,503 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,858 | py | # coding:utf-8
import json
import re
from pyquery import PyQuery as pq
from .utils import get_page
class ProxyMetaclass(type):
def __new__(cls, name, bases, attrs):
count = 0
attrs['__CrawlFunc__'] = []
for k, _ in attrs.items():
if 'crawl_' in k:
attrs['__CrawlFunc__'].append(k)
count += 1
attrs['__CrawlFuncCount__'] = count
return type.__new__(cls, name, bases, attrs)
class Crawler(object, metaclass=ProxyMetaclass):
def get_proxies(self, callback):
proxies = []
# eval 去除字符串并变成相应类型
for proxy in eval("self.{}()".format(callback)):
print('成功获取到代理', proxy)
proxies.append(proxy)
return proxies
def crawl_daili66(self, page_count=4):
"""
获取代理66
:param page_count: 页码
:return: 代理
"""
start_url = 'http://www.66ip.cn/{}.html'
urls = [start_url.format(page) for page in range(1, page_count + 1)]
for url in urls:
print('Crawling', url)
html = get_page(url)
if html:
doc = pq(html)
trs = doc('.containerbox table tr:gt(0)').items()
for tr in trs:
ip = tr.find('td:nth-child(1)').text()
port = tr.find('td:nth-child(2)').text()
yield ':'.join([ip, port])
def crawl_ip3366(self):
for page in range(1, 4):
start_url = 'http://www.ip3366.net/free/?stype=1&page={}'.format(page)
html = get_page(start_url)
ip_address = re.compile('<tr>\s*<td>(.*?)</td>\s*<td>(.*?)</td>')
# \s * 匹配空格,起到换行作用
re_ip_address = ip_address.findall(html)
for address, port in re_ip_address:
result = address+':'+ port
yield result.replace(' ', '')
def crawl_kuaidaili(self):
for i in range(1, 4):
start_url = 'http://www.kuaidaili.com/free/inha/{}/'.format(i)
html = get_page(start_url)
if html:
ip_address = re.compile('<td data-title="IP">(.*?)</td>')
re_ip_address = ip_address.findall(html)
port = re.compile('<td data-title="PORT">(.*?)</td>')
re_port = port.findall(html)
for address,port in zip(re_ip_address, re_port):
address_port = address+':'+port
yield address_port.replace(' ','')
def crawl_xicidaili(self):
for i in range(1, 3):
start_url = 'http://www.xicidaili.com/nn/{}'.format(i)
headers = {
'Accept':'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
'Cookie':'_free_proxy_session=BAh7B0kiD3Nlc3Npb25faWQGOgZFVEkiJWRjYzc5MmM1MTBiMDMzYTUzNTZjNzA4NjBhNWRjZjliBjsAVEkiEF9jc3JmX3Rva2VuBjsARkkiMUp6S2tXT3g5a0FCT01ndzlmWWZqRVJNek1WanRuUDBCbTJUN21GMTBKd3M9BjsARg%3D%3D--2a69429cb2115c6a0cc9a86e0ebe2800c0d471b3',
'Host':'www.xicidaili.com',
'Referer':'http://www.xicidaili.com/nn/3',
'Upgrade-Insecure-Requests':'1',
}
html = get_page(start_url, options=headers)
if html:
find_trs = re.compile('<tr class.*?>(.*?)</tr>', re.S)
trs = find_trs.findall(html)
for tr in trs:
find_ip = re.compile('<td>(\d+\.\d+\.\d+\.\d+)</td>')
re_ip_address = find_ip.findall(tr)
find_port = re.compile('<td>(\d+)</td>')
re_port = find_port.findall(tr)
for address,port in zip(re_ip_address, re_port):
address_port = address+':'+port
yield address_port.replace(' ','')
def crawl_iphai(self):
start_url = 'http://www.iphai.com/'
html = get_page(start_url)
if html:
find_tr = re.compile('<tr>(.*?)</tr>', re.S)
trs = find_tr.findall(html)
for s in range(1, len(trs)):
find_ip = re.compile('<td>\s+(\d+\.\d+\.\d+\.\d+)\s+</td>', re.S)
re_ip_address = find_ip.findall(trs[s])
find_port = re.compile('<td>\s+(\d+)\s+</td>', re.S)
re_port = find_port.findall(trs[s])
for address,port in zip(re_ip_address, re_port):
address_port = address+':'+port
yield address_port.replace(' ','')
def crawl_data5u(self):
start_url = 'http://www.data5u.com/free/gngn/index.shtml'
headers = {
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
'Accept-Encoding': 'gzip, deflate',
'Accept-Language': 'en-US,en;q=0.9,zh-CN;q=0.8,zh;q=0.7',
'Cache-Control': 'max-age=0',
'Connection': 'keep-alive',
'Cookie': 'JSESSIONID=47AA0C887112A2D83EE040405F837A86',
'Host': 'www.data5u.com',
'Referer': 'http://www.data5u.com/free/index.shtml',
'Upgrade-Insecure-Requests': '1',
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.108 Safari/537.36',
}
html = get_page(start_url, options=headers)
if html:
ip_address = re.compile('<span><li>(\d+\.\d+\.\d+\.\d+)</li>.*?<li class=\"port.*?>(\d+)</li>', re.S)
re_ip_address = ip_address.findall(html)
for address, port in re_ip_address:
result = address + ':' + port
yield result.replace(' ', '')
| [
"caryjimmy@outlook.com"
] | caryjimmy@outlook.com |
13d94fbc8c529347564e68ae352e8791e6f3f999 | 21db32a9f83bb6fc8cb5e7b9fe7ddbfdfee05fdd | /harrysCoffee/harrysCoffee/urls.py | d782f776c2967ce85ed9594201c4fc74eb69e919 | [] | no_license | ijhan21/RAPA_project_QRCodeCafe | 90de73988b6b8f2af925fe5005aa5e868a056149 | 2b7489056e59c15ae934ba92c9ec7cbb02ae8f71 | refs/heads/main | 2023-08-26T21:28:43.223744 | 2021-11-07T13:26:10 | 2021-11-07T13:26:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,384 | py | """harrysCoffee URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
from userprofile import views
# from apps.common.views import HomeView, SignUpView, DashboardView, ProfileUpdateView, ProfileView
from django.contrib.auth import views as auth_views
urlpatterns = [
path('admin/', admin.site.urls),
# path('',views.HomeView.as_view(), name='home'),
path('', include('userprofile.urls')),
# path('order/', include('order.urls')),
# path('purchase/', include('purchase.urls')),
# path('shopping/', include('shopping.urls')),
# path('', HomeView.as_view(), name='home'),
# path('dashboard/', DashboardView.as_view(
# template_name='example.html'
# ),
# name='dashboard'
# ),
# path('profile-update/', ProfileUpdateView.as_view(), name='profile-update'),
# path('profile/', ProfileView.as_view(), name='profile'),
# # Authentication
# path('register/', SignUpView.as_view(), name="register"),
# path('login/', auth_views.LoginView.as_view(
# template_name='common/login.html'
# ),
# name='login'
# ),
# path('logout/', auth_views.LogoutView.as_view(
# next_page='home'
# ),
# name='logout'
# ),
# path(
# 'change-password/',
# auth_views.PasswordChangeView.as_view(
# template_name='common/change-password.html',
# success_url='/'
# ),
# name='change-password'
# ),
# # Forget Password
# path('password-reset/',
# auth_views.PasswordResetView.as_view(
# template_name='common/password-reset/password_reset.html',
# subject_template_name='common/password-reset/password_reset_subject.txt',
# email_template_name='common/password-reset/password_reset_email.html',
# # success_url='/login/'
# ),
# name='password_reset'),
# path('password-reset/done/',
# auth_views.PasswordResetDoneView.as_view(
# template_name='common/password-reset/password_reset_done.html'
# ),
# name='password_reset_done'),
# path('password-reset-confirm/<uidb64>/<token>/',
# auth_views.PasswordResetConfirmView.as_view(
# template_name='common/password-reset/password_reset_confirm.html'
# ),
# name='password_reset_confirm'),
# path('password-reset-complete/',
# auth_views.PasswordResetCompleteView.as_view(
# template_name='common/password-reset/password_reset_complete.html'
# ),
# name='password_reset_complete'),
# path('oauth/', include('social_django.urls', namespace='social')), # <-- here
]
| [
"reinforcehan@gmail.com"
] | reinforcehan@gmail.com |
5aa1a0019409474b6fc59471225e3bba912b4d8a | 9979bd49c4bdd12e58282562847a76bd9830c100 | /tests/test_alpha.py | bb50e65677616b1b6dabb8d7ed8bdabc8a897abc | [] | no_license | prasertcbs/basic_pytest | 76a3de6192fdccd0f7f4b599e0481ae16bccc3f3 | f4b8f68328cc7c1422d4b8c308f144b7b26995cf | refs/heads/main | 2023-02-07T11:37:18.974152 | 2021-01-03T06:49:26 | 2021-01-03T06:49:26 | 322,767,166 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 582 | py | # doc https://docs.pytest.org/en/stable/index.html
from src.area import rectangle, triangle
from src.volume import *
import pytest
def test_one():
assert rectangle(5, 2) == 10
assert rectangle(5, 2) == 10
assert rectangle(5, 4.5) == 22.5
assert triangle(5, 2) == 5
def test_volume():
assert cubic(3) == 27
assert cubic(4) == 64
assert cubic(1.7) == pytest.approx(4.912, .001)
assert round(cubic(1.7), 2) == 4.91
assert cylinder(1, 10) == 31.41592653589793
assert cylinder(1, 10) == pytest.approx(31.4159, .0001)
| [
"bluecotton.k@gmail.com"
] | bluecotton.k@gmail.com |
a0f369cb6dbc35d2c1dcb49d0b2b181c956a5d1b | 4b704bf97694f64b114727473cb4e3a8981015a8 | /TDD_django/lists/migrations/0001_initial.py | d5255d879bf157ce5e7afe5919ab1da002a2a7a8 | [] | no_license | smartfish007/githubtest | a07fbbb80de2684ac79f29c25feed90bf2a5fee8 | 226c2cc9a2a0e7b0b39bef2106b0ee1a1545d002 | refs/heads/master | 2020-05-22T15:17:38.924749 | 2019-10-28T02:43:18 | 2019-10-28T02:43:18 | 186,404,201 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 470 | py | # Generated by Django 2.2.6 on 2019-10-24 02:57
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Item',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('text', models.TextField()),
],
),
]
| [
"1354275149@qq.com"
] | 1354275149@qq.com |
01ccab3887279b745e87952b9cf90ff74ac68252 | bbea4bdeb66a6249cd3606214708f87acb1bbe77 | /modeling/model_random_forest.py | c1dfcc485c4b1ade144149ea72f6e500710bfd29 | [] | no_license | mikochen0107/hospitalization-prediction | 7e397e05945818e6aa6a0f19aa8d39d840f5c5e5 | e760d208ac1d930c0717d8b8249a2a5d2e1ab213 | refs/heads/main | 2023-03-20T09:09:17.741019 | 2021-03-18T07:10:36 | 2021-03-18T07:10:36 | 348,963,593 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,204 | py | import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import os
# import training data
X_train = np.genfromtxt(r"M:\UCSF_ARS\michael_thesis\processed_data\X_train_knn.csv", delimiter=',')
y_train = np.genfromtxt(r"M:\UCSF_ARS\michael_thesis\processed_data\y_train.csv", delimiter=',')
y_train = y_train[1:,1]
# hyperparameter optimization with CV
from sklearn.model_selection import RandomizedSearchCV
from sklearn.ensemble import RandomForestClassifier
# Number of trees in random forest
n_estimators = [int(x) for x in np.linspace(start = 200, stop = 2000, num = 10)]
# Maximum number of levels in tree
max_depth = [int(x) for x in np.linspace(10, 110, num = 11)]
max_depth.append(None)
# Minimum number of samples required to split a node
min_samples_split = [2, 5, 10]
# Minimum number of samples required at each leaf node
min_samples_leaf = [1, 2, 4]
# Method of selecting samples for training each tree
bootstrap = [True, False]
# Create the random grid
random_grid = {'n_estimators': n_estimators,
'max_depth': max_depth,
'min_samples_split': min_samples_split,
'min_samples_leaf': min_samples_leaf,
'bootstrap': bootstrap}
rf = RandomForestClassifier()
rf_random = RandomizedSearchCV(estimator = rf, param_distributions = random_grid, n_iter=100, cv=5,
scoring=['roc_auc', 'precision'], refit='precision',
verbose=3, random_state=7, n_jobs = -1)
rf_random.fit(X_train, y_train)
# CV results: best params, best score, and the cv results
cv_results = pd.DataFrame(rf_random.cv_results_)
# ranking based on roc auc
cv_results.sort_values(by=['rank_test_roc_auc'])['params']
cv_results.sort_values(by=['rank_test_roc_auc'])['mean_test_roc_auc']
# ranking based on precision
cv_results.sort_values(by=['rank_test_precision'])['params']
cv_results.sort_values(by=['rank_test_precision'])['mean_test_precision']
cv_results.to_csv(r'M:\UCSF_ARS\michael_thesis\cross_validation\random_forest_CV.csv')
# selecting the 'best' hyperparams from CV to train model
# 36 from the hyperparams cv results
rf = RandomForestClassifier(n_estimators=1400, min_samples_split=5, min_samples_leaf=4,
max_depth=None, bootstrap=True, random_state=7, n_jobs=-1)
rf.fit(X_train, y_train)
# model calibration
from sklearn.calibration import CalibratedClassifierCV
calibrated_clf = CalibratedClassifierCV(base_estimator=rf, cv=5)
calibrated_clf.fit(X_train, y_train)
y_pred_rf = rf.predict(X_train) # get the predictions (0/1)
y_prob_rf = calibrated_clf.predict_proba(X_train)[:, 1] # get the prob for predicting 1s
# metrics for training set
model_metrics(y_train, y_pred_rf, y_prob_rf)
# evaluate with test set
X_test = np.genfromtxt(r"M:\UCSF_ARS\michael_thesis\processed_data\X_test_knn.csv", delimiter=',')
y_test = np.genfromtxt(r"M:\UCSF_ARS\michael_thesis\processed_data\y_test.csv", delimiter=',')
y_test = y_test[1:,1]
y_pred_rf_test = rf.predict(X_test) # get the predictions (0/1)
y_prob_rf_test = calibrated_clf.predict_proba(X_test)[:, 1] # get the prob for predicting 1s
model_metrics(y_test, y_pred_rf_test, y_prob_rf_test)
| [
"noreply@github.com"
] | mikochen0107.noreply@github.com |
6c3781d38ce07a5781ef98ec34f4ca29f730e085 | 3f3b03d60187d35277706997caea0fbf9ba51c50 | /data/utils/add_ballot_set.py | 0147d955e9560320813b0ba6d194c73f0a31d4c6 | [
"LicenseRef-scancode-warranty-disclaimer"
] | no_license | jamespenn/tabbycat | 9ffc1df302b49845ac2bd450977751a845f747be | 01d116436c79de6aa79ba74533928ff030b04409 | refs/heads/master | 2021-01-21T14:40:02.105816 | 2015-02-27T23:41:54 | 2015-02-27T23:41:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,725 | py | """Adds a randomly generated ballot set to the given debates."""
import header
import debate.models as m
from django.contrib.auth.models import User
from debate.result import BallotSet
import random
SUBMITTER_TYPE_MAP = {
'tabroom': m.BallotSubmission.SUBMITTER_TABROOM,
'public': m.BallotSubmission.SUBMITTER_PUBLIC
}
def add_ballot_set(debate, submitter_type, user, discarded=False, confirmed=False, min_score=72, max_score=78):
if discarded and confirmed:
raise ValueError("Ballot can't be both discarded and confirmed!")
# Create a new BallotSubmission
bsub = m.BallotSubmission(submitter_type=submitter_type, debate=debate)
if submitter_type == m.BallotSubmission.SUBMITTER_TABROOM:
bsub.user = user
bsub.save()
def gen_results():
r = {'aff': (0,), 'neg': (0,)}
def do():
s = [random.randint(min_score, max_score) for i in range(3)]
s.append(random.randint(min_score, max_score)/2.0)
return s
while sum(r['aff']) == sum(r['neg']):
r['aff'] = do()
r['neg'] = do()
return r
rr = dict()
for adj in debate.adjudicators.list:
rr[adj] = gen_results()
# Create relevant scores
bset = BallotSet(bsub)
for side in ('aff', 'neg'):
speakers = getattr(debate, '%s_team' % side).speakers
for i in range(1, 4):
bset.set_speaker(
side = side,
pos = i,
speaker = speakers[i - 1],
)
bset.set_speaker(
side = side,
pos = 4,
speaker = speakers[0]
)
for adj in debate.adjudicators.list:
for pos in range(1, 5):
bset.set_score(adj, side, pos, rr[adj][side][pos-1])
# Pick a motion
motions = debate.round.motion_set.all()
if motions:
motion = random.choice(motions)
bset.motion = motion
bset.discarded = discarded
bset.confirmed = confirmed
bset.save()
# If the ballot is confirmed, the debate should be too.
if confirmed:
debate.result_status = m.Debate.STATUS_CONFIRMED
debate.save()
return bset
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument("debate", type=int, nargs='+', help="Debate ID(s) to add to")
parser.add_argument("-t", "--type", type=str, help="'tabroom' or 'public'", choices=SUBMITTER_TYPE_MAP.keys(), default="tabroom")
parser.add_argument("-u", "--user", type=str, help="User ID", default="original")
status = parser.add_mutually_exclusive_group(required=True)
status.add_argument("-d", "--discarded", action="store_true", help="Ballot set is discarded")
status.add_argument("-c", "--confirmed", action="store_true", help="Ballot set is confirmed")
parser.add_argument("-m", "--min-score", type=float, help="Minimum speaker score (for substantive)", default=72)
parser.add_argument("-M", "--max-score", type=float, help="Maximum speaker score (for substantive)", default=78)
args = parser.parse_args()
submitter_type = SUBMITTER_TYPE_MAP[args.type]
if submitter_type == m.BallotSubmission.SUBMITTER_TABROOM:
user = User.objects.get(username=args.user)
else:
user = None
for debate_id in args.debate:
debate = m.Debate.objects.get(id=debate_id)
print debate
try:
bset = add_ballot_set(debate, submitter_type, user, args.discarded, args.confirmed, args.min_score, args.max_score)
except ValueError, e:
print "Error:", e
print " Won by", bset.aff_win and "affirmative" or "negative" | [
"czlee@stanford.edu"
] | czlee@stanford.edu |
12d37e32bdf2c73f7d04fd1ee807c5fab5b654a9 | c70f67510e650fbd39a265dba1675249c4db4b59 | /images/src/cvcs.py | 8b9375de94c69038852ffa7f97d608e35696979d | [
"MIT"
] | permissive | captainobvious62/euroscipy-git-tutorial | 07a25ccfd8bb89f9ef15e67856ac79c08909d27a | b787ac07fe0ceee340cc2cc29470eb04eacee23c | refs/heads/master | 2021-05-03T07:08:17.619835 | 2017-09-02T14:48:06 | 2017-09-02T14:48:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,634 | py | from pyx import canvas, color, deco, deformer, path, style, text, trafo, unit
def server(r, servercolor=color.rgb(0.5, 0.5, 0.8)):
c = canvas.canvas()
c.fill(path.circle(0, 0, r), [servercolor, trafo.scale(1, 0.5)])
h = 2*r
p = path.path(path.moveto(-r, 0),
path.lineto(-r, -h),
path.arc(0, -h, r, 180, 0),
path.lineto(r, 0),
path.arcn(0, 0, r, 0, 180),
path.closepath())
c.fill(p, [servercolor, trafo.scale(1, 0.5).translated(0, -0.08*r)])
return c
def client(clientcolor=color.rgb(0.8, 0.5, 0.5)):
c = canvas.canvas()
r = 0.3
c.fill(path.circle(0, 0, r), [clientcolor])
r = 0.5
p = path.path(path.moveto(-r, 0),
path.curveto(-r, r, r, r, r, 0),
path.closepath())
c.fill(p, [clientcolor, trafo.translate(0, -1.3*r)])
return c
arrowcolor = color.grey(0.5)
text.set(text.LatexRunner)
text.preamble(r'\usepackage{arev}\usepackage[T1]{fontenc}')
unit.set(xscale=1.3)
c = canvas.canvas()
r = 1
c.insert(server(r))
c.text(0, 0.5*r+0.3, 'central server', [text.halign.center])
h = 1.7
l = 2
for phi in (-30, 0, 30):
c.stroke(path.line(0, -h, 0, -h-l), [arrowcolor, style.linewidth.THICK,
deco.barrow.LArge, deco.earrow.LArge,
trafo.rotate(phi)])
for dx, dy in ((-2, -3.7), (0, -4.2), (2, -3.7)):
c.insert(client(), [trafo.translate(dx, dy)])
c.text(0, -5.5, 'clients', [text.halign.center])
dy = 0.8
dx = 1.5
versionoff = 1.5
cf = canvas.canvas()
hueoff = 0.17
nr_revisions = 0
for nr, (name, versions) in enumerate((('file 1', (0, 2, 4, 5)),
('file 2', (0, 1, 2, 3, 5)),
('file 3', (1, 4, 5)))):
nr_revisions = max(nr_revisions, max(versions))
hue = hueoff+nr/3
cf.text(0, -nr*dy, name, [color.hsb(hue, 1, 0.5), text.valign.middle])
for nver, (v1, v2) in enumerate(zip(versions[:-1], versions[1:])):
y = -(nr+0.4)*dy
lv = len(versions)-1
cf.fill(path.rect(v1*dx+versionoff+0.1, y, (v2-v1)*dx-0.2, 0.8*dy),
[color.hsb(hue, 1-(lv-1-nver)/(lv-1)*0.7, 0.6)])
for n in range(nr_revisions):
cf.text((n+0.5)*dx+versionoff, 0.5, 'r{}'.format(n+1), [text.halign.center])
cf.stroke(path.rect(3*dx+versionoff, -2.6*dy, dx, 2.6*dy+1.0),
[style.linewidth.THIck, deformer.smoothed(0.3)])
c.insert(cf, [trafo.translate(4.5, -2)])
c.writePDFfile()
| [
"gert.ingold@physik.uni-augsburg.de"
] | gert.ingold@physik.uni-augsburg.de |
0a4b8ca43554cab15c582bb8547a78e501b55484 | 312f16ba4fa6af5e6a59b83b399a04a3fb3d733f | /cj.py | f8ce421b3ec55ec7ce870c3162aa499a6be452ba | [] | no_license | darkzone964/cj | 1e17a52967692d31e5ef6115bb4c3faaddfd0991 | 215dd8ee7c807cacaa17e5b30568edb26bfc056e | refs/heads/main | 2023-08-12T15:16:25.795548 | 2021-10-14T01:13:46 | 2021-10-14T01:13:46 | 416,946,331 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,652 | py | import random
import requests
import pyfiglet
import time
Z = '\033[1;31m' #احمر
X = '\033[1;33m' #اصفر
Z1 = '\033[2;31m' #احمر ثاني
F = '\033[2;32m' #اخضر
A = '\033[2;34m'#ازرق
C = '\033[2;35m' #وردي
B = '\033[2;36m'#سمائي
Y = '\033[1;34m' #ازرق فاتح
logo = pyfiglet.figlet_format("$ DARK $")
print(Z+logo)
time.sleep(2)
cj = pyfiglet.figlet_format("Ah shit, here we go again")
print(F+cj)
ID = input('ID ==»: ')
print(' ')
token = input(' TOKEN ==» : ')
print('''
''')
print(C+'''[1] USER 4 & 5
[2] USER 4
[3] USER 5
[4] USER 6
''')
H = input(X+' CHOOSE : ')
ku = ('{"account_created": false, "errors": {"email": [{"message": "Too many accounts are using a@gmail.com.", "code": "email_sharing_limit"}], "__all__": [{"message": "Create a password at least 6 characters long.", "code": "too_short_password"}]}, "dryrun_passed": false, "username_suggestions": [], "status": "ok", "error_type": "form_validation_error"}')
if H == '1':
uus = '._irteaszxcv._1234567890'
while True:
dark = str(''.join(random.choice(uus) for i in range(5)))
url = 'https://www.instagram.com/accounts/web_create_ajax/attempt/'
headers_kai={
'accept': '*/*',
'accept-encoding': 'gzip, deflate, br',
'accept-language': 'ar,en-US;q=0.9,en;q=0.8,ar-SA;q=0.7',
'content-length': '61',
'content-type': 'application/x-www-form-urlencoded',
'cookie': 'ig_cb=2; ig_did=BB52B198-B05A-424E-BA07-B15F3D4C3893; mid=YAlcaQALAAHzmX6nvD8dWMRVYFCO; shbid=15012; rur=PRN; shbts=1612894029.7666144; csrftoken=CPKow8myeXW9AuB3Lny0wNxx0EzoDQoI',
'origin': 'https://www.instagram.com',
'referer': 'https://www.instagram.com/accounts/emailsignup/',
'sec-ch-ua': '"Google Chrome";v="87", " Not;A Brand";v="99", "Chromium";v="87"',
'sec-ch-ua-mobile': '?0',
'sec-fetch-dest': 'empty',
'sec-fetch-mode': 'cors',
'sec-fetch-site': 'same-origin',
'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.141 Safari/537.36',
'x-csrftoken': 'CPKow8myeXW9AuB3Lny0wNxx0EzoDQoI',
'x-ig-app-id': '936619743392459',
'x-ig-www-claim': 'hmac.AR0Plwj5om112fwzrrYnMNjMLPnyWfFFq1tG7MCcMv5_vN9M',
'x-instagram-ajax': '72bda6b1d047',
'x-requested-with': 'XMLHttpRequest'
}
datas_kai={
'email' : 'a@gmail.com',
'username': f'{dark}',
'first_name': 'AA',
'opt_into_one_tap': 'false'
}
kd = requests.post(url,headers=headers_kai,data=datas_kai).text
if ku in kd:
print(F + ' yes HA HA HA HA ==»: ' + dark)
tlg =(f'''https://api.telegram.org/bot{token}/sendMessage?chat_id={ID}&text=
صدتلك يوزر شبه رباعي
@{dark} ✓
@Ft_r5 π https://t.me/justpython1''')
i = requests.post(tlg)
else:
print(Z + ' poop 💩 ==»: ' + dark)
if H == '2':
uus = 'qwertyuiopasdfghjklzxcvbnm._1234567890'
while True:
Ft_r5 = str(''.join(random.choice(uus) for i in range(4)))
url = 'https://www.instagram.com/accounts/web_create_ajax/attempt/'
headers_kai={
'accept': '*/*',
'accept-encoding': 'gzip, deflate, br',
'accept-language': 'ar,en-US;q=0.9,en;q=0.8,ar-SA;q=0.7',
'content-length': '61',
'content-type': 'application/x-www-form-urlencoded',
'cookie': 'ig_cb=2; ig_did=BB52B198-B05A-424E-BA07-B15F3D4C3893; mid=YAlcaQALAAHzmX6nvD8dWMRVYFCO; shbid=15012; rur=PRN; shbts=1612894029.7666144; csrftoken=CPKow8myeXW9AuB3Lny0wNxx0EzoDQoI',
'origin': 'https://www.instagram.com',
'referer': 'https://www.instagram.com/accounts/emailsignup/',
'sec-ch-ua': '"Google Chrome";v="87", " Not;A Brand";v="99", "Chromium";v="87"',
'sec-ch-ua-mobile': '?0',
'sec-fetch-dest': 'empty',
'sec-fetch-mode': 'cors',
'sec-fetch-site': 'same-origin',
'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.141 Safari/537.36',
'x-csrftoken': 'CPKow8myeXW9AuB3Lny0wNxx0EzoDQoI',
'x-ig-app-id': '936619743392459',
'x-ig-www-claim': 'hmac.AR0Plwj5om112fwzrrYnMNjMLPnyWfFFq1tG7MCcMv5_vN9M',
'x-instagram-ajax': '72bda6b1d047',
'x-requested-with': 'XMLHttpRequest'
}
datas_kai={
'email' : 'a@gmail.com',
'username': f'{Ft_r5}',
'first_name': 'AA',
'opt_into_one_tap': 'false'
}
kd = requests.post(url,headers=headers_kai,data=datas_kai).text
if ku in kd:
print(F + ' yes HA HA HA 😎 ==»: ' + Ft_r5)
tlg =(f'''https://api.telegram.org/bot{token}/sendMessage?chat_id={ID}&text=
صدتلك يوزر رباعي
@{Ft_r5} ✓
@Ft_r5 $ https://t.me/justpython1''')
i = requests.post(tlg)
else:
print(Z + ' زربة ===»: ~ ' + Ft_r5)
if H == '3':
uus = 'ertuioasjlzxcvn1234567890._'
while True:
nice = str(''.join(random.choice(uus) for i in range(5)))
url = 'https://www.instagram.com/accounts/web_create_ajax/attempt/'
headers_kai={
'accept': '*/*',
'accept-encoding': 'gzip, deflate, br',
'accept-language': 'ar,en-US;q=0.9,en;q=0.8,ar-SA;q=0.7',
'content-length': '61',
'content-type': 'application/x-www-form-urlencoded',
'cookie': 'ig_cb=2; ig_did=BB52B198-B05A-424E-BA07-B15F3D4C3893; mid=YAlcaQALAAHzmX6nvD8dWMRVYFCO; shbid=15012; rur=PRN; shbts=1612894029.7666144; csrftoken=CPKow8myeXW9AuB3Lny0wNxx0EzoDQoI',
'origin': 'https://www.instagram.com',
'referer': 'https://www.instagram.com/accounts/emailsignup/',
'sec-ch-ua': '"Google Chrome";v="87", " Not;A Brand";v="99", "Chromium";v="87"',
'sec-ch-ua-mobile': '?0',
'sec-fetch-dest': 'empty',
'sec-fetch-mode': 'cors',
'sec-fetch-site': 'same-origin',
'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.141 Safari/537.36',
'x-csrftoken': 'CPKow8myeXW9AuB3Lny0wNxx0EzoDQoI',
'x-ig-app-id': '936619743392459',
'x-ig-www-claim': 'hmac.AR0Plwj5om112fwzrrYnMNjMLPnyWfFFq1tG7MCcMv5_vN9M',
'x-instagram-ajax': '72bda6b1d047',
'x-requested-with': 'XMLHttpRequest'
}
datas_kai={
'email' : 'a@gmail.com',
'username': f'{nice}',
'first_name': 'AA',
'opt_into_one_tap': 'false'
}
kd = requests.post(url,headers=headers_kai,data=datas_kai).text
if ku in kd:
print(F + ' Y ~ ' + nice)
tlg =(f'''https://api.telegram.org/bot{token}/sendMessage?chat_id={ID}&text=
صدتلك يوزر خماسي
@{nice} ✓
@Ft_r5 ♡ https://t.me/justpython1''')
i = requests.post(tlg)
else:
print(Z + ' NOO ~ ' + nice)
if H == '4':
uus = 'ertuioasjlzxcvn1234567890._'
while True:
help = str(''.join(random.choice(uus) for i in range(6)))
url = 'https://www.instagram.com/accounts/web_create_ajax/attempt/'
headers_kai={
'accept': '*/*',
'accept-encoding': 'gzip, deflate, br',
'accept-language': 'ar,en-US;q=0.9,en;q=0.8,ar-SA;q=0.7',
'content-length': '61',
'content-type': 'application/x-www-form-urlencoded',
'cookie': 'ig_cb=2; ig_did=BB52B198-B05A-424E-BA07-B15F3D4C3893; mid=YAlcaQALAAHzmX6nvD8dWMRVYFCO; shbid=15012; rur=PRN; shbts=1612894029.7666144; csrftoken=CPKow8myeXW9AuB3Lny0wNxx0EzoDQoI',
'origin': 'https://www.instagram.com',
'referer': 'https://www.instagram.com/accounts/emailsignup/',
'sec-ch-ua': '"Google Chrome";v="87", " Not;A Brand";v="99", "Chromium";v="87"',
'sec-ch-ua-mobile': '?0',
'sec-fetch-dest': 'empty',
'sec-fetch-mode': 'cors',
'sec-fetch-site': 'same-origin',
'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.141 Safari/537.36',
'x-csrftoken': 'CPKow8myeXW9AuB3Lny0wNxx0EzoDQoI',
'x-ig-app-id': '936619743392459',
'x-ig-www-claim': 'hmac.AR0Plwj5om112fwzrrYnMNjMLPnyWfFFq1tG7MCcMv5_vN9M',
'x-instagram-ajax': '72bda6b1d047',
'x-requested-with': 'XMLHttpRequest'
}
datas_kai={
'email' : 'a@gmail.com',
'username': f'{help}',
'first_name': 'AA',
'opt_into_one_tap': 'false'
}
kd = requests.post(url,headers=headers_kai,data=datas_kai).text
if ku in kd:
print(F + ' Y ~ ' + help)
tlg =(f'''https://api.telegram.org/bot{token}/sendMessage?chat_id={ID}&text=
صدتلك يوزر سداسي
@{help} ✓
@Ft_r5 ༺https://t.me/justpython1༻''')
i = requests.post(tlg)
else:
print(Z + ' NOO ~ ' + help)
| [
"noreply@github.com"
] | darkzone964.noreply@github.com |
ea85b7a01c35594aa879e8c19cf32d713ae7b570 | e27b8e2cff428e9d868658a62f541d4ec1b2c9bb | /client.py | 81b8bd1bba9c8d25dc98f2a14a282683f1cf8579 | [] | no_license | KooKaik/YDays-Python | 14f2af7078cac11efac969934b2317375005b1c2 | fa2f960cc416415067a29a330d4ae18f242dcfe6 | refs/heads/main | 2023-02-25T17:34:59.786630 | 2021-01-20T15:28:31 | 2021-01-20T15:28:31 | 331,334,121 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 881 | py | import socket, sys
HEADER_LENGTH = 128
HOST = '127.0.0.1'
PORT = 5000
pseudo = input("Username: ")
# Creation du socket
client_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# Liaison de l'IP et du Port
client_socket.connect((HOST, PORT))
# Encode et envois le pseudo sur le serveur
username = pseudo.encode('utf-8')
username_header = f"{len(username):<{HEADER_LENGTH}}".encode('utf-8')
client_socket.send(username_header + username)
# Message de bienvenue
print(f"Bienvenue à toi aventurier {pseudo}")
while True:
# En attente d'un message
message = input(f'{pseudo} > ')
if message:
# Encode le message et l'envois sur le serveur
message = message.encode('utf-8')
message_header = f"{len(message):<{HEADER_LENGTH}}".encode('utf-8')
client_socket.send(message_header + message)
| [
"noreply@github.com"
] | KooKaik.noreply@github.com |
2fe2b0bec6d4e99d94157d4fce5d7ca7f970acf0 | 4a56cce6ab7d931cb80258dba608eff9413ea8fe | /dnac_pnp/site_handler.py | 77279b3d9753f075b2c5389cdf45381ca7c690a0 | [
"BSD-3-Clause"
] | permissive | nttde/dnac_pnp | cf91e8eacdcb69894eb59a4c48ae12e17954e606 | 248dc397fc083ff13f5a0106b1cde77a9aaead55 | refs/heads/dev | 2023-01-02T06:08:30.494584 | 2020-10-28T09:49:42 | 2020-10-28T09:49:42 | 303,541,117 | 5 | 1 | BSD-3-Clause | 2020-10-28T09:52:00 | 2020-10-12T23:58:48 | Python | UTF-8 | Python | false | false | 6,454 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Site handler functions"""
# Import builtin python libraries
import json
import logging
import sys
# import external python libraries
import click
from yaml import load
import yaml
# Import custom (local) python packages
from .api_call_handler import call_api_endpoint, get_response
from .api_endpoint_handler import generate_api_url
from .dnac_params import area_essentials, building_essentials, floor_essentials
from .dnac_token_generator import generate_token
from .header_handler import get_headers
from .utils import divider, goodbye
# Source code meta data
__author__ = "Dalwar Hossain"
__email__ = "dalwar.hossain@global.ntt"
# Check dict keys
def _check_dict_keys(dict_to_check=None, dnac_site_type=None):
"""
This private function checks dict keys
:param dict_to_check: (dict) Dictionary that is being checked
:param dnac_site_type: (str) Cisco DNA center site type (area, building, floor)
:returns: (dict) Checked and modified dictionary
"""
click.secho(f"[$] Checking config file keys.....", fg="blue")
dict_status = False
site_type_map = {
"area": area_essentials,
"building": building_essentials,
"floor": floor_essentials,
}
try:
for item in site_type_map[dnac_site_type]:
if item in dict_to_check.keys():
dict_status = True
else:
click.secho(
f"[x] [{item}] key is missing for site type: [{dnac_site_type}]!",
fg="red",
)
sys.exit(1)
except KeyError:
click.secho(f"[x] Essential key is missing from site configuration!", fg="red")
sys.exit(1)
return dict_status
# Generate site payload
def _generate_site_payload(site=None):
"""
This private function generates site payload
:param site: (dict) Single site config as python dict
:returns: (dict) payload for api call
"""
site_name = list(site.keys())[0]
site_type = site[site_name]["type"]
logging.debug(f"Site Name: {site_name}, Site Type: {site_type}")
divider(text=f"Adding {site_type} [{site_name}]", char="-")
payload = {"type": site_type}
if site_type == "floor":
# If any keys are not present leave it blank
site_dict_status = _check_dict_keys(
dict_to_check=site[site_name], dnac_site_type=site_type
)
if site_dict_status:
payload["site"] = {
"floor": {
key: value
for key, value in site[site_name].items()
if not key.startswith("type")
}
}
elif site_type == "building":
# If any keys are not present leave it blank
site_dict_status = _check_dict_keys(
dict_to_check=site[site_name], dnac_site_type=site_type
)
if site_dict_status:
payload["site"] = {
"building": {
key: value
for key, value in site[site_name].items()
if not key.startswith("type")
}
}
elif site_type == "area":
# If any keys are not present leave it blank
site_dict_status = _check_dict_keys(
dict_to_check=site[site_name], dnac_site_type=site_type
)
if site_dict_status:
payload["site"] = {
"area": {
"name": site[site_name]["name"],
"parentName": site[site_name]["parentName"],
}
}
return payload
# Read sites configuration
def _read_site_configs(file_to_read=None):
"""This private function reads sites configurations file"""
try:
with open(file_to_read, "r") as stream:
configs = load(stream, Loader=yaml.FullLoader)
return configs
except Exception as err:
click.secho(f"[x] Sites configuration read error!", fg="red")
click.secho(f"[x] ERROR: {err}", fg="red")
sys.exit(1)
# Site management
def add_site(dnac_auth_configs=None, locations_file_path=None):
"""
This function adds site(s) to DNA center
:param dnac_auth_configs: (dict) DNA Center authentication configurations
:param locations_file_path: (str) Full file path to the sites configuration
:returns: (stdOut) Output on screen
"""
# Read site configurations
logging.debug(f"Location File: {locations_file_path}")
site_configs = _read_site_configs(file_to_read=locations_file_path)
logging.debug(f"Site Configurations: {json.dumps(site_configs, indent=4)}")
if "sites" in site_configs.keys():
sites = site_configs["sites"]
else:
click.secho(f"[x] Site configuration file is malformed", fg="red")
sys.exit(1)
# Authentication token
token = generate_token(configs=dnac_auth_configs)
headers = get_headers(auth_token=token)
headers["__runsync"] = "true"
headers["__persistbapioutput"] = "true"
method, api_url, parameters = generate_api_url(api_type="add-site")
divider("Adding Site(s)")
click.secho(f"[$] Attempting to add sites.....", fg="blue")
for item in sites:
payload = _generate_site_payload(site=item)
api_response = call_api_endpoint(
method=method,
api_url=api_url,
data=payload,
api_headers=headers,
check_payload=False,
)
response_status, response_body = get_response(response=api_response)
# Response header is in plain/text so try to convert it into json
try:
json_response_body = json.loads(response_body)
if response_status:
site_status = json_response_body["status"]
if site_status:
site_msg = json_response_body["result"]["result"]["progress"]
prefix = "[#] "
color = "green"
else:
site_msg = json_response_body["result"]["result"]
prefix = "[x] "
color = "red"
click.secho(f"{prefix}{site_msg}", fg=color)
else:
click.secho(f"[x] {json_response_body['result']['result']}", fg="red")
except Exception as err:
click.secho(f"[x] ERROR: {err}")
sys.exit(1)
goodbye()
| [
"dalwar.hossain@global.ntt"
] | dalwar.hossain@global.ntt |
220337e9db7609dda127a36c6ec0295396649b7e | 3d5ca4c6979f263e1cffa00d05c5b20ce7b991bf | /Photos/tests.py | 44bb715b4ff41fb29993ab66dbfb0463070e0240 | [] | no_license | UmuhireAnuarithe/Personal_Gallery | 9851d27b23e97f4bdab7ccc8cdde78f16f0ad148 | 4e06b203ea0f244a5e7400bf7eedde6361563872 | refs/heads/master | 2021-06-24T08:12:20.964784 | 2019-10-14T10:33:19 | 2019-10-14T10:33:19 | 213,857,897 | 0 | 0 | null | 2021-06-10T22:04:37 | 2019-10-09T08:04:55 | Python | UTF-8 | Python | false | false | 3,902 | py | from django.test import TestCase
from .models import Image,Category ,Location
# Create your tests here.
class ImageTestClass(TestCase):
# Set up method
def setUp(self):
self.animal= Image(image = 'passion.jpeg', name ='Animal', description='animal image')
self.technology = Category(category='Technology')
# Testing instance
def test_instance(self):
self.assertTrue(isinstance(self.animal,Image))
def test_save_image(self):
self.travel = Image(image='travel.jpeg',name='Travel',description='travel image')
self.travel.save_image()
images = Image.objects.all()
self.assertTrue(len(images)>0)
def test_update_image(self):
self.car = Image(image='travel.jepg',name='Toyota',description='made in Japan')
self.car.save_image()
cars =Image.objects.filter(name='Toyota').first()
update= Image.objects.filter(id=cars.id).update(name='Ritico')
updated = Image.objects.filter(name = 'Ritico').first()
self.assertNotEqual(cars.name , updated.name)
def test_delete_image(self):
self.cat = Image(image='travel.jepg',name='Vox',description='made in Japan')
self.cat.save_image()
cat = Image.objects.filter(name='Vox').first()
catts = Image.objects.filter(id =cat.id).delete()
cats =Image.objects.all()
# self.assertTrue(len(cats) == 0)
def test_search_method(self):
self.technology.save_category()
images = Image.search_by_category('Technology')
self.assertTrue(len(images) == 0)
class LocationTestClass(TestCase):
# Set up method
def setUp(self):
self.kigali= Location(location ='Kigali')
# Testing instance
def test_instance(self):
self.assertTrue(isinstance(self.kigali,Location))
def test_save_location(self):
self.musanze = Location(location ='Musanze')
self.musanze.save_location()
locations =Location.objects.all()
self.assertTrue(len(locations)>0)
def test_update_location(self):
self.burera = Location(location='Burera')
self.burera.save_location()
cars =Location.objects.filter(location='Burera').first()
update= Location.objects.filter(id=cars.id).update(location='Kinoni')
updated = Location.objects.filter(location='Kinoni').first()
self.assertNotEqual(cars.location , updated.location)
def test_delete_location(self):
self.burera = Location(location='Burera')
self.burera.save_location()
location = Location.objects.filter(location='Burera').first()
locations = Location.objects.filter(id =location.id).delete()
locations =Location.objects.all()
class CategoryTestClass(TestCase):
# Set up method
def setUp(self):
self.animal= Category(category='Animal')
# Testing instance
def test_instance(self):
self.assertTrue(isinstance(self.animal,Category))
def test_save_category(self):
self.car = Category(category='Cars')
self.car.save_category()
cars =Category.objects.all()
self.assertTrue(len(cars)>0)
def test_update_category(self):
self.food = Category(category='Pizza')
self.food.save_category()
pizza =Category.objects.filter(category='Pizza').first()
update= Category.objects.filter(id=pizza.id).update(category='Cassava')
updated = Category.objects.filter(category='Cassava').first()
self.assertNotEqual(pizza.category , updated.category)
def test_delete_category(self):
self.banana = Category(category='Fruit')
self.banana.save_category()
banana = Category.objects.filter(category='Fruit').first()
fruits = Category.objects.filter(id =banana.id).delete()
fruits =Category.objects.all() | [
"anuarithemuhire@gmail.com.com"
] | anuarithemuhire@gmail.com.com |
29c17bf2b044a349ce48555b1f3f2079419ec58a | a4ba9bd7f3e5cf6af3f505cc2c760e763c32442f | /main.py | f0df6132a624264b9741c57b361b1f5d12d319ca | [] | no_license | rohinisyed/GuessTheNumber | 41736f625d4c7ebafcb666c3a8c985817458beac | aeedf9c8434724f43f75b5d52a188a76db0c586e | refs/heads/master | 2023-01-08T00:22:50.279101 | 2020-11-08T18:12:07 | 2020-11-08T18:12:07 | 310,939,739 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 471 | py | from random import randint
play_game=True
random_number = randint(1,100)
while play_game:
player_guess = int(input("Guess the number: "))
if player_guess > random_number:
print ("Too High, Try again!")
elif player_guess < random_number:
print ("Too low, Try Again!")
else:
play_again = input ("You won, Want to play again Y/N?:")
if play_again == "Y":
random_number = randint(1,100)
elif play_again == "N":
play_game=False | [
"syedrohini1@gmail.com"
] | syedrohini1@gmail.com |
6ac04c2edad83957138b8eee41a319c63031aff4 | 853882976cffe5cd615a899dd9b438a2b78f3c34 | /SampleFeature/FeatureVector.py | eaf288ca8c9ca423cb5223b50859e9f4018fc67b | [] | no_license | promisivia/experience-oim-under-dcm | 9814d026e15685b7a00085777d2ae9aa430400e1 | 0bac6aef6cbc9b4af152d228b9f2c172c8ce49d1 | refs/heads/main | 2023-02-19T16:21:13.680028 | 2021-01-22T17:56:16 | 2021-01-22T17:56:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 619 | py | import pickle
import random
import numpy as np
featureDic = {}
dimension = 5
dataset = 'NetHEPT'
# --------------------------- DC probability ----------------------------------- #
G = pickle.load(open('../datasets/' + dataset + '/graph.G', 'rb'))
for (u, v) in G.edges():
featureVector = np.array([np.random.normal(-1, 1, 1)[0] for i in range(dimension)])
l2_norm = np.linalg.norm(featureVector, ord=2)
featureVector = featureVector / l2_norm
featureDic[u, v] = [*featureVector]
print('fv dic:', featureDic)
pickle.dump(featureDic, open('../datasets/' + dataset + '/random-edgeFeatures.dic', "wb"))
| [
"fangnuowu@gmail.com"
] | fangnuowu@gmail.com |
31d493116b2e621b5d93a3977480ec7ae3fd48cf | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2157/60749/252819.py | e40d7365ee8419681863874c71cda7f98525182e | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 510 | py | str1=input()
def RomaToNum(str1):
dic={'I':1, 'V':5, 'X':10, 'L':50,'C':100,'D':500,'M':1000}
res=[]
for h in str1:
res.append(dic[h])
max1=0
for t in res:
max1=max(t,max1)
max_index=res.index(max1)
result=0
for h in range(0,max_index):
result-=res[h]
for h in range(max_index, len(res)-1):
if res[h]>=res[h+1]:
result+=res[h]
else:
result-=res[h]
result+=res[-1]
return result
print(RomaToNum(str1)) | [
"1069583789@qq.com"
] | 1069583789@qq.com |
6236d13610ac9b00017917536dbe4c9b7eddaf73 | 369689c1d709326999824afe323378bb381ee722 | /hakem_crawler/threaded_hakem_crawler.py | 7ef05a5ae50662e184b167633cbef24a5b85c5f4 | [] | no_license | folmez/TFF_crawler | 80cd0e5b58d8449558a8f024141e56249f48bbde | 04dc18ebe6a9a61e68930daa42a412e38a0fcb81 | refs/heads/master | 2020-04-06T14:34:50.459162 | 2018-12-14T12:36:36 | 2018-12-14T12:36:36 | 157,546,459 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,467 | py | import hakem_csv_tools
import hakem_info_extractor
import TFF_hakem
import queue
import threading
import csv
import time
from selenium import webdriver
from selenium.common.exceptions import WebDriverException
DEFAULT_MATCH_INPUT_FILENAME = '/home/folmez/Dropbox/Documents/WWW/not_public/sample_matches_1000_1010.csv'
THREAD_OPEN_WAIT = 10
lock = threading.Lock()
def threaded_crawler(num_worker_threads, silent=False, \
use_selenium=False, match_input_filename=DEFAULT_MATCH_INPUT_FILENAME):
# Get hakem output filename
hakem_output_filename = match_input_filename[:-4] + '_HAKEMLER.csv'
# Create hakem ID queue and fill it
hakem_id_queue = queue.Queue()
hakem_list = hakem_csv_tools.get_hakem_id_from_match_output(match_input_filename)
for hakem_id in hakem_list:
hakem_id_queue.put(hakem_id)
# Write the header row into the hakem output file
header_row = hakem_csv_tools.get_header_row()
with open(hakem_output_filename, 'w') as f:
file_writer = csv.writer(f, delimiter=',', quoting=csv.QUOTE_MINIMAL)
file_writer.writerow(header_row)
# Create crawler workers
crawler_threads = []
for i in range(num_worker_threads):
t = threading.Thread(target=crawler_worker, args=(hakem_id_queue,\
hakem_output_filename,\
silent, use_selenium))
t.daemon = True
t.start()
crawler_threads.append(t)
time.sleep(THREAD_OPEN_WAIT)
# Block the main thread until workers processed everything
hakem_id_queue.join()
print('Hakem queue has been completely crawled.')
# Stop crawler workers
print('Stopping crawlers...')
for t in crawler_threads:
t.join()
print('All crawlers stopped.')
# Single crawler worker, to be used in the threaded module
def crawler_worker(hakem_id_queue, hakem_output_filename, silent, use_selenium=False):
# open a browser
driver_path = '/usr/lib/chromium-browser/chromedriver'
browser = webdriver.Chrome(driver_path)
time.sleep(THREAD_OPEN_WAIT)
while not hakem_id_queue.empty():
hakem_id = hakem_id_queue.get()
hakem_url = hakem_info_extractor.get_hakem_url_string_from_int(hakem_id)
print('Getting:', hakem_url)
SELENIUM_WAIT_TIMEOUT = 20
short_wait = 2
k = 0
browser.get(hakem_url)
while True:
k = k + 1
time.sleep(short_wait)
inner_HTML = browser.execute_script("return document.body.innerHTML")
if this_is_a_good_html(inner_HTML) or \
this_is_an_error_page(inner_HTML) or \
k == SELENIUM_WAIT_TIMEOUT/short_wait:
break
hakem_site_str = inner_HTML
if this_is_an_error_page(hakem_site_str):
if not silent:
print('This URL goes to error page: ', hakem_url)
hakem_output = None # invalid URL
elif not this_is_a_good_html(hakem_site_str):
if not silent:
print('This page is not recognized: ', hakem_url)
hakem_output = None # unrecognized content
else:
this_hakem = TFF_hakem.hakem(hakem_site_str, hakem_id)
this_hakem.print_summary(silent)
hakem_output = this_hakem.all_info_in_one_line()
hakem_id_queue.task_done()
# Write match output to a new file with thread lock
if hakem_output is not None:
lock.acquire()
with open(hakem_output_filename, 'a') as f:
file_writer = csv.writer(f, delimiter=',', \
quoting=csv.QUOTE_MINIMAL)
file_writer.writerow([hakem_output])
lock.release()
browser.close()
def this_is_a_good_html(html_output_str):
return hakem_info_extractor.NAME_SEARCH_STR in html_output_str \
and hakem_info_extractor.OCCUPATION_SEARCH_STR in html_output_str \
and hakem_info_extractor.LISANS_SEARCH_STR in html_output_str \
and hakem_info_extractor.KLASMAN_SEARCH_STR in html_output_str \
and hakem_info_extractor.AREA_SEARCH_STR in html_output_str
def this_is_an_error_page(html_output_str):
error_indicator = 'Images/TFF/Error/tff.hatalogosu.gif'
return error_indicator in html_output_str
| [
"folmez@gmail.com"
] | folmez@gmail.com |
98977ef8cf14cb3eacaaa82bf32eb3c854d0ca8d | 6923f79f1eaaba0ab28b25337ba6cb56be97d32d | /CFD_from_scratch_Lignell/cfd.py | 759f232c789c441088b4d6a0d5ae6faa654aacdf | [] | no_license | burakbayramli/books | 9fe7ba0cabf06e113eb125d62fe16d4946f4a4f0 | 5e9a0e03aa7ddf5e5ddf89943ccc68d94b539e95 | refs/heads/master | 2023-08-17T05:31:08.885134 | 2023-08-14T10:05:37 | 2023-08-14T10:05:37 | 72,460,321 | 223 | 174 | null | 2022-10-24T12:15:06 | 2016-10-31T17:24:00 | Jupyter Notebook | UTF-8 | Python | false | false | 10,504 | py | import numpy as np
from scipy.sparse import diags
from scipy.sparse.linalg import spsolve
import matplotlib.pyplot as plt
IJ = np.ix_
def set_user_specifications():
global nx, ny, Lx, Ly, ν, ubc_t, ubc_b, vbc_r, vbc_l, n_τ_run, cfl
nx = 40 # number of grid points in x direction (P-grid)
ny = 60 # number of grid points in the y direction (P-grid)
Lx = 1.0 # domain length in x
Ly = 1.5 # domain length in y
ν = 1.0 # kinematic viscosity
ubc_t = 10.0 # u on top boundary
ubc_b = 0.0 # u on bottom boundary
vbc_r = 10.0 # v on right boundary
vbc_l = 0.0 # v on left boundary
n_τ_run = 2 # number of box timescales to run for
cfl = 0.05 # timestep size factor
def set_grid_time_vars():
global x, y, Δx, Δy, tend, Δt, nsteps, u, v, P
x = np.linspace(0,Lx,nx) # x-grid
y = np.linspace(0,Ly,ny) # y-grid
Δx = x[1] - x[0] # x-grid spacing
Δy = y[1] - y[0] # y-grid spacing
τ_box = (Lx+Ly)/2 / np.max(np.abs([ubc_t, ubc_b, vbc_r, vbc_l])) # box timescale
tend = τ_box * n_τ_run # simulation run time
Δt = cfl*np.min([Δx,Δy])/np.max(np.abs([ubc_t,ubc_b,vbc_r,vbc_l])) # timestep size
nsteps = int(tend/Δt) # number of timesteps
Δt = tend/nsteps # timestep size
#-------------------- set solution variables
u = np.zeros((nx+1,ny))
v = np.zeros((nx,ny+1))
P = np.zeros((nx,ny)) # P = P/ρ (P_hat)
def get_Hu():
"""
ue, uw, un, us, vn, vs are values on u-cell faces
These arrays include ALL u-cells (nx+1,ny) for convenience,
but only interior u-cells (and corresponding face values) are set.
"""
ue = np.zeros((nx+1,ny))
uw = np.zeros((nx+1,ny))
un = np.zeros((nx+1,ny))
us = np.zeros((nx+1,ny))
vn = np.zeros((nx+1,ny))
vs = np.zeros((nx+1,ny))
τxxe = np.zeros((nx+1,ny))
τxxw = np.zeros((nx+1,ny))
τxyn = np.zeros((nx+1,ny))
τxys = np.zeros((nx+1,ny))
Hu = np.zeros((nx+1,ny))
i = np.arange(1,nx) # u-cell centers in domain interior
ue[i,:] = (u[i+1,:] + u[i,:])/2
uw[i,:] = (u[i,:] + u[i-1,:])/2
j = np.arange(0,ny-1)
un[IJ(i,j)] = (u[IJ(i,j+1)] + u[IJ(i,j)])/2
un[i,ny-1] = ubc_t
j = np.arange(1,ny)
us[IJ(i,j)] = (u[IJ(i,j)] + u[IJ(i,j-1)])/2
us[i,0] = ubc_b
j = np.arange(0,ny)
vn[IJ(i,j)] = (v[IJ(i-1,j+1)]+v[IJ(i,j+1)])/2
vs[IJ(i,j)] = (v[IJ(i-1,j)] +v[IJ(i,j)]) /2
τxxe[i,:] = -2*ν*(u[i+1,:] - u[i,:]) /Δx
τxxw[i,:] = -2*ν*(u[i,:] - u[i-1,:])/Δx
j = np.arange(0,ny-1)
τxyn[IJ(i,j)] = -ν*(u[IJ(i,j+1)]-u[IJ(i,j)])/Δy - ν*(v[IJ(i,j+1)]-v[IJ(i-1,j+1)])/Δx
τxyn[i,ny-1] = -ν*(ubc_t-u[i,ny-1])/(Δy/2) - ν*(v[i,ny]-v[i-1,ny])/Δx
j = np.arange(1,ny)
τxys[IJ(i,j)] = -ν*(u[IJ(i,j)]-u[IJ(i,j-1)])/Δy - ν*(v[IJ(i,j)]-v[IJ(i-1,j)])/Δx
τxys[i,0] = -ν*(u[i,0]-ubc_b)/(Δy/2) - ν*(v[i,0]-v[i-1,0])/Δx
Hu[i,:] = -((ue[i,:]*ue[i,:] - uw[i,:]*uw[i,:])/Δx + (un[i,:]*vn[i,:] - us[i,:]*vs[i,:])/Δy) \
-((τxxe[i,:] - τxxw[i,:])/Δx + (τxyn[i,:] - τxys[i,:])/Δy)
return Hu
def get_Hv():
"""
vn, vs, ve, vw, ue, uw are values on v-cell faces
These arrays include ALL v-cells (nx,ny+1) for convenience,
but only interior v-cells (and corresponding face values) are set.
"""
vn = np.zeros((nx,ny+1))
vs = np.zeros((nx,ny+1))
ve = np.zeros((nx,ny+1))
vw = np.zeros((nx,ny+1))
ue = np.zeros((nx,ny+1))
uw = np.zeros((nx,ny+1))
τyyn = np.zeros((nx,ny+1))
τyys = np.zeros((nx,ny+1))
τyxe = np.zeros((nx,ny+1))
τyxw = np.zeros((nx,ny+1))
Hv = np.zeros((nx,ny+1))
j = np.arange(1,ny) # v-cell centers in domain interior
vn[:,j] = (v[:,j+1] + v[:,j])/2
vs[:,j] = (v[:,j] + v[:,j-1])/2
i = np.arange(0,nx-1)
ve[IJ(i,j)] = (v[IJ(i+1,j)] + v[IJ(i,j)])/2
ve[nx-1,j] = vbc_r
i = np.arange(1,nx)
vw[IJ(i,j)] = (v[IJ(i,j)] + v[IJ(i-1,j)])/2
vw[0,j] = vbc_l
i = np.arange(0,nx)
ue[IJ(i,j)] = (u[IJ(i+1,j-1)] + u[IJ(i+1,j)])/2
uw[IJ(i,j)] = (u[IJ(i,j-1)] + u[IJ(i,j)]) /2
τyyn[:,j] = -2*ν*(v[:,j+1] - v[:,j]) /Δy
τyys[:,j] = -2*ν*(v[:,j] - v[:,j-1])/Δy
i = np.arange(0,nx-1)
τyxe[IJ(i,j)] = -ν*(v[IJ(i+1,j)]-v[IJ(i,j)])/Δx - ν*(u[IJ(i+1,j)]-u[IJ(i+1,j-1)])/Δy
τyxe[nx-1,j] = -ν*(vbc_r-v[nx-1,j])/(Δx/2) - ν*(u[nx,j]-u[nx,j-1])/Δy
i = np.arange(1,nx)
τyxw[IJ(i,j)] = -ν*(v[IJ(i,j)]-v[IJ(i-1,j)])/Δx - ν*(u[IJ(i,j)]-u[IJ(i,j-1)])/Δy
τyxw[0,j] = -ν*(v[0,j]-vbc_l)/(Δx/2) - ν*(u[0,j]-u[0,j-1])/Δy
Hv[:,j] = -((vn[:,j]*vn[:,j] - vs[:,j]*vs[:,j])/Δy + (ve[:,j]*ue[:,j] - vw[:,j]*uw[:,j])/Δx) \
-((τyyn[:,j] - τyys[:,j])/Δy + (τyxe[:,j] - τyxw[:,j])/Δx)
return Hv
def solve_P(h):
"""
Set up and solve the AP=b system, where A is a matrix, P (=Phat) and b are vectors.
"""
nP = nx*ny # total grid points solved (all P-grid cells)
b = np.zeros((nx,ny)) # set below
cP = np.zeros((nx,ny)) # coefficient of P_i,j; set below
cPjm = np.full((nx,ny),-h*Δx/Δy) # coefficient of P_i,j-1; initialized here, specialized below
cPim = np.full((nx,ny),-h*Δy/Δx) # coefficient of P_i-1,j; initialized here, specialized below
cPip = np.full((nx,ny),-h*Δy/Δx) # coefficient of P_i+1,j; initialized here, specialized below
cPjp = np.full((nx,ny),-h*Δx/Δy) # coefficient of P_i,j+1; initialized here, specialized below
#--------------------
# Interior
i = np.arange(1,nx-1); j = np.arange(1,ny-1)
b[IJ(i,j)] = -Δy*(u[IJ(i+1,j)]+h*Hu[IJ(i+1,j)]) + Δy*(u[IJ(i,j)]+h*Hu[IJ(i,j)]) - Δx*(v[IJ(i,j+1)]+h*Hv[IJ(i,j+1)]) + Δx*(v[IJ(i,j)]+h*Hv[IJ(i,j)])
cP[IJ(i,j)] = 2*h*Δy/Δx + 2*h*Δx/Δy
# Corner bottom left
i = 0; j = 0
b[i,j] = -Δy*(u[i+1,j]+h*Hu[i+1,j]) + Δy*u[i,j] - Δx*(v[i,j+1]+h*Hv[i,j+1]) + Δx*v[i,j]
cP[i,j] = h*Δy/Δx + h*Δx/Δy
cPjm[i,j] = 0.0
cPim[i,j] = 0.0
# Side bottom
i = np.arange(1,nx-1); j = 0
b[i,j] = -Δy*(u[i+1,j]+h*Hu[i+1,j]) + Δy*(u[i,j]+h*Hu[i,j]) - Δx*(v[i,j+1]+h*Hv[i,j+1]) + Δx*v[i,j]
cP[i,j] = 2*h*Δy/Δx + h*Δx/Δy
cPjm[i,j] = 0.0
# Corner bottom right
i = nx-1; j = 0
b[i,j] = -Δy*u[i+1,j] + Δy*(u[i,j]+h*Hu[i,j]) - Δx*(v[i,j+1]+h*Hv[i,j+1]) + Δx*v[i,j]
cP[i,j] = h*Δy/Δx + h*Δx/Δy
cPjm[i,j] = 0.0
cPip[i,j] = 0.0
# Side left
i = 0; j = np.arange(1,ny-1)
b[i,j] = -Δy*(u[i+1,j]+h*Hu[i+1,j]) + Δy*u[i,j] - Δx*(v[i,j+1]+h*Hv[i,j+1]) + Δx*(v[i,j]+h*Hv[i,j])
cP[i,j] = h*Δy/Δx + 2*h*Δx/Δy
cPim[i,j] = 0.0
# Side right
i = nx-1; j = np.arange(1,ny-1)
b[i,j] = -Δy*u[i+1,j] + Δy*(u[i,j]+h*Hu[i,j]) - Δx*(v[i,j+1]+h*Hv[i,j+1]) + Δx*(v[i,j]+h*Hv[i,j])
cP[i,j] = h*Δy/Δx + 2*h*Δx/Δy
cPip[i,j] = 0.0
# Corner top left
i = 0; j = ny-1
b[i,j] = -Δy*(u[i+1,j]+h*Hu[i+1,j]) + Δy*u[i,j] - Δx*v[i,j+1] + Δx*(v[i,j]+h*Hv[i,j])
cP[i,j] = h*Δy/Δx + h*Δx/Δy
cPim[i,j] = 0.0
cPjp[i,j] = 0.0
# Side top
i = np.arange(1,nx-1); j = ny-1
b[i,j] = -Δy*(u[i+1,j]+h*Hu[i+1,j]) + Δy*(u[i,j]+h*Hu[i,j]) - Δx*v[i,j+1] + Δx*(v[i,j]+h*Hv[i,j])
cP[i,j] = 2*h*Δy/Δx + h*Δx/Δy
cPjp[i,j] = 0.0
# Corner top right
i = nx-1; j = ny-1
b[i,j] = -Δy*u[i+1,j] + Δy*(u[i,j]+h*Hu[i,j]) - Δx*v[i,j+1] + Δx*(v[i,j]+h*Hv[i,j])
cP[i,j] = h*Δy/Δx + h*Δx/Δy
cPip[i,j] = 0.0
cPjp[i,j] = 0.0
#---------------------------------
b = np.reshape(b, nP, order='F')
cP = np.reshape(cP, nP, order='F')
cPjm = np.reshape(cPjm, nP, order='F')
cPim = np.reshape(cPim, nP, order='F')
cPip = np.reshape(cPip, nP, order='F')
cPjp = np.reshape(cPjp, nP, order='F')
A = diags([cPjm[nx:], cPim[1:], cP, cPip[:-1], cPjp[:-nx]], [-nx, -1, 0, 1, nx], format='csr')
#---------------------------------
P = spsolve(A,b)
P -= np.average(P)
P = np.reshape(P, (nx,ny),order='F')
return P
set_user_specifications()
set_grid_time_vars()
ke = np.zeros(nsteps+1)
times = np.linspace(0,tend,nsteps+1)
for k in range(nsteps):
Hu = get_Hu()
Hv = get_Hv()
P = solve_P(Δt)
i = np.arange(1,nx)
u[i,:] = u[i,:] + Δt*Hu[i,:] - Δt*(P[i,:]-P[i-1,:])/Δx
j = np.arange(1,ny)
v[:,j] = v[:,j] + Δt*Hv[:,j] - Δt*(P[:,j]-P[:,j-1])/Δy
#-----------------------------
U = (u[:-1,:] + u[1:,:])/2
V = (v[:,:-1] + v[:,1:])/2
velmag = np.sqrt(U*U + V*V)
ke[k+1] = 0.5*(np.average(velmag**2))
#----------- interpolate velocities to the P-grid
U = (u[:-1,:] + u[1:,:])/2 # u-velocity on the P-grid
V = (v[:,:-1] + v[:,1:])/2 # v-velocity on the P-grid
velmag = np.sqrt(U*U + V*V) # velocity magnitude.
X,Y = np.meshgrid(x,y)
plt.rc('font', size=14)
fig, (ax1, ax2) = plt.subplots(1,2, figsize=(10,10))
ax1.set_aspect('equal', adjustable='box')
ax1.streamplot(x,y,U.T,V.T, density=2.5, linewidth=1, arrowsize=0.001, color=velmag.T)
ax1.set_title(r'$|\vec{v}|$')
ax1.set_xlim([0,Lx])
ax1.set_ylim([0,Ly])
ax1.set_xticks([])
ax1.set_yticks([]);
ax2.set_aspect('equal', adjustable='box')
ax2.contourf(X,Y,P.T,40)
ax2.set_title(r'$P/\rho$')
ax2.set_xlim([0,Lx])
ax2.set_ylim([0,Ly])
ax2.set_xticks([])
ax2.set_yticks([]);
plt.figure(figsize=(3.5,3))
plt.plot(times,ke)
plt.xlabel('time')
plt.ylabel('KE');
plt.show()
| [
"me@yomama.com"
] | me@yomama.com |
13dcd06c73d918ab2d6a73de07a00a3c7590e697 | 484da6ff9bda06183c3d3bbda70c6d11e1ad6b67 | /.history/stats_20191010223658.py | 7c0eae6ac44c7b062031bcdfb01ecde134b2d698 | [] | no_license | Shynar88/TSP | 009a88bbddb29214921de4d0cf1761dea61b7b75 | 889751ab7d6a91469e86c6583f3c91b85857edd9 | refs/heads/master | 2020-08-06T22:40:49.217474 | 2020-01-14T13:41:44 | 2020-01-14T13:41:44 | 213,185,830 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 514 | py | import matplotlib.pyplot as plt;
import numpy as np
def parse_log_data():
generations = []
distances = []
for line in open("logs.log", "r"):
info = line.split(" ")
generations.append(info[0])
distances.append(info[1])
return generations, distances
if __name__ == "__main__":
(generations, distances) = parse_log_data()
plt.plot(generations, distances)
plt.axis([0, 50, 0, 35000])
plt.ylabel("Distance")
plt.title("Runtime statistics")
plt.show()
| [
"shynartorekhan@gmail.com"
] | shynartorekhan@gmail.com |
b8bf6a1519b5ce449199a0019b7744432090e238 | 52c1e2d5dadb27ef614e964c1c4ae5e1e2bff669 | /scripts/model_generation.py | d0b5ed6a3ea44b2ec723f8da9f5b95d476fa3491 | [] | no_license | Yelvd/PPAM22-Building-Analytical-Performance-Models-for-Complex-Applications | e189e1705e05a75bc8c161676ba46b1ab16a1979 | 91e67b1be17243176631d26f91eb896e61098bd7 | refs/heads/main | 2023-04-13T10:49:30.913672 | 2022-09-08T14:30:59 | 2022-09-08T14:30:59 | 483,641,783 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,939 | py | # %matplotlib widget
import getopt
import sys
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.lines import Line2D
import glob
from scipy import stats
from scipy.optimize import curve_fit
import scipy
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import cm
import read_data
from scipy.stats import itemfreq
import pandas as pd
import numpy as np
import glob
import re
import matplotlib.pyplot as plt
from matplotlib.lines import Line2D
import subprocess
import json
plt.rcParams.update({
"text.usetex": True,
"font.family": "sans-serif",
"font.sans-serif": ["Helvetica"]})
plt.rcParams.update({'font.size': 15})
CB_color_cycle = ['#377eb8', '#ff7f00', '#4daf4a',
'#f781bf', '#a65628', '#984ea3',
'#999999', '#e41a1c', '#dede00']
results_dir = "../figs/"
machine=""
fig_name= "model_prediction_{}"
iterations = 1
def run_model(models, size, RBCs):
total = 0
for component in ["collideAndStream", "setExternalVector"]:
res = models[component]
total += res.intercept + res.slope * size[0] * size[1] * size[2]
for component in ["collideAndStream_comm"]:
res = models[component]
total += res.intercept + res.slope * 2*((size[0]*size[1])+(size[0]*size[2])+(size[1]*size[2]))
for component in ["syncEnvelopes", "advanceParticles", "applyConstitutiveModel", "deleteNonLocalParticles", "spreadParticleForce", "interpolateFluidVelocity"]:
res = models[component]
total += res.intercept + res.slope * RBCs
for component in ["syncEnvelopes_comm"]:
popt = models[component]
total += popt[0] + 2*((size[0]*size[1])+(size[0]*size[2])+(size[1]*size[2]))*popt[1] + RBCs*popt[2]
return total
def calibrate_model(fitting_df):
""" Calibrate the model by linear fitting the data to the matched relevant parameter(s) """
models = {}
for component in ["collideAndStream", "setExternalVector"]:
tmpdf = fitting_df.loc[fitting_df['component'] == component]
res = stats.linregress(tmpdf['N'], tmpdf['comp'])
models[component] = res
for component in ["collideAndStream"]:
tmpdf = fitting_df.loc[fitting_df['component'] == component]
res = stats.linregress(tmpdf['area'], tmpdf['comm'])
models[component+"_comm"] = res
for component in ["syncEnvelopes", "applyConstitutiveModel", "deleteNonLocalParticles", "spreadParticleForce", "interpolateFluidVelocity", "advanceParticles"]:
tmpdf = fitting_df.loc[fitting_df['component'] == component]
res = stats.linregress(tmpdf['RBCs'], tmpdf['comp'])
models[component] = res
def function_calc(x, a, b, c):
return a + b*x[0] + c*x[1]
for component in ["syncEnvelopes"]:
tmpdf = fitting_df.loc[fitting_df['component'] == component]
popt, pcov = curve_fit(function_calc, [tmpdf['area'],tmpdf['RBCs']], tmpdf['comm'])
models[component+"_comm"] = popt
return models
def model_to_json(models):
""" Convert the model given by calibrate_model to a json style model """
smodel = {}
for k in models.keys():
for component in ["collideAndStream", "setExternalVector"]:
res = models[component]
smodel[component] = {'offset': res.intercept, 'N': res.slope}
for component in ["collideAndStream_comm"]:
res = models[component]
smodel[component] = {'offset': res.intercept, 'area': res.slope}
for component in ["syncEnvelopes_comm"]:
res = models[component]
smodel[component] = {'offset': res[0], 'area': res[1], 'RBCs': res[2]}
for component in ["syncEnvelopes", "advanceParticles", "applyConstitutiveModel", "deleteNonLocalParticles", "spreadParticleForce", "interpolateFluidVelocity"]:
res = models[component]
smodel[component] = {'offset': res.intercept, 'RBCs': res.slope}
for k in smodel.keys():
for x in smodel[k].keys():
smodel[k][x] = smodel[k][x] / iterations
return smodel
def load_data(resultsdir):
# Read Data
results_df, exp_df = read_data.gen_df(resultsdir)
exp_df['N'] = [x * y * z for (x, y, z) in exp_df['largest_subdomain']]
exp_df['area'] = [ 2*(x*y + x*z + y*z) for (x, y, z) in exp_df['largest_subdomain']]
exp_df['RBCs-total'] = exp_df["RBCs"]
exp_df['RBCs'] = exp_df['RBCs-total'] / exp_df['atomicblocks']
return results_df, exp_df
def unique(x, axis=0):
seen = []
new = []
for tmp in x:
if tmp[axis] not in seen:
seen.append(tmp[axis])
new.append(tmp)
return np.sort(np.array(new, dtype=object), axis=0)
def plot_validation(testing_df, model, name):
""" Plot the results of prediction the testing_df in results_dir + name + .pdf """
# grap the longest running thread as the total running time of the application
testing_df = testing_df.loc[testing_df['component'] == 'total']
testing_df = testing_df.loc[testing_df.groupby('jobid', sort=False)['total'].idxmax()]
testing_df['sizestr'] = ["({}, {}, {})".format(x, y, z) for (x, y, z) in testing_df['largest_subdomain']]
# ax1 = fig.add_subplot(2, 1, 1)
# ax2 = ax1.twinx()
fig, (ax1, ax2) = plt.subplots(nrows=1, ncols=2, figsize=(20, 6))
plt.rcParams['font.size'] = 20
plt.rcParams.update({'font.weight': 'bold'})
fit_exp_df = testing_df.sort_values("N")
width = .7
stride = width / np.unique(np.sort(fit_exp_df['h'])).size
for i, sizestr in enumerate(pd.unique(fit_exp_df['sizestr'])):
s = np.array(fit_exp_df.loc[fit_exp_df['sizestr'] == sizestr]['largest_subdomain'])[0]
legend_handels = []
offset = width/2
for hi, H in enumerate(np.unique(np.sort(fit_exp_df['h']))):
t = fit_exp_df.loc[fit_exp_df['sizestr'] == sizestr]
r = np.array(t.loc[t['h'] == H]['RBCs'])[0]
tmp = testing_df.loc[testing_df['h'] == H]
tmp = tmp.loc[tmp['largest_subdomain'] == s]
ax1.errorbar(i - offset, np.mean(tmp['total']), yerr=np.std(tmp['total']), ms=30, color=CB_color_cycle[hi], fmt=".", capsize=5, lw=1)
ax1.plot(i - offset, run_model(model, s, r), 'x', color=CB_color_cycle[hi], ms=20)
pred_error = np.abs(run_model(model, s, r) - np.mean(tmp['total'])) * (100 / np.mean(tmp['total']))
ax2.plot(i - offset, pred_error, 'X', color=CB_color_cycle[hi], ms=20)
offset -= stride
legend_handels.append( Line2D([0], [0], color=CB_color_cycle[hi], lw=0, marker='o', ms=10, label='H{}\%'.format(H)))
# legend_handels.append( Line2D([0], [0], color=CB_color_cycle[hi], lw=0, marker='x', label='H{}\ Prediction%'.format(H)))
legend_tmp = legend_handels.copy()
legend_tmp.insert(0, Line2D([0], [0], color='k', lw=0, marker='x', ms=10, label='Prediction'))
legend_tmp.insert(0, Line2D([0], [0], color='k', lw=0, marker='o', ms=10, label='Empirical Results'))
legend_tmp2 = legend_handels.copy()
legend_tmp2.insert(0, Line2D([0], [0], color='k', lw=0, marker='X', ms=10, label='Prediction Error'))
# plt.grid(True, which="both", ls="-", color='0.65')
# plt.rcParams.update({'axes.linewidth': 5})
# plt.rcParams.update({'font.size': 20})
ax1.legend(handles=legend_tmp, loc='upper left', prop={'size': 15})
ax2.legend(handles=legend_tmp2, loc='upper right', prop={'size': 15})
# ax.set_yscale('log')
ax1.set_ylim(0.01, 700)
# ax1.set_yticks(fontsize=25)
# ax2.set_yticks(fontsize=25)
ax1.tick_params(axis='both', which='major', labelsize=25)
ax1.tick_params(axis='both', which='minor', labelsize=25)
ax2.tick_params(axis='both', which='major', labelsize=25)
ax2.tick_params(axis='both', which='minor', labelsize=25)
ax1.set_ylabel("Time in Seconds", fontsize=25)
ax2.set_xlabel("Domain Size in µm (x, y, z)", fontsize=25)
ax1.set_xlabel("Domain Size in µm (x, y, z)", fontsize=25)
ax2.set_ylabel('Prediction Error [\%]', fontsize=25) # we already handled the x-label with ax1
ax2.set_ylim(0, 30)
# ax2.plot(t, data2, color=color)
# ax2.tick_params(axis='y')
# plt.title("Model Verification on DAS-6 (1 node, 24 processes)")
# plt.xticks(, pd.unique(fit_exp_df['sizestr']), rotation='vertical')
ax1.set_xticks(range(np.unique(np.sort(fit_exp_df['largest_subdomain'])).size), [ "({:g}, {:g}, {:g})".format(0.5 * x[0], 0.5 * x[1],0.5 * x[2]) for x in pd.unique(fit_exp_df['largest_subdomain'])], fontsize=25)
ax2.set_xticks(range(np.unique(np.sort(fit_exp_df['largest_subdomain'])).size), [ "({:g}, {:g}, {:g})".format(0.5 * x[0], 0.5 * x[1],0.5 * x[2]) for x in pd.unique(fit_exp_df['largest_subdomain'])], fontsize=25)
plt.tight_layout()
plt.savefig(results_dir + name + ".pdf", bbox_inches='tight')
# plt.savefig(results_dir + "model-prediction_das6.svg", bbox_inches='tight')
# plt.savefig(results_dir + "model-prediction_das6.pdf", bbox_inches='tight')
def gen_validation_table(testing_df, model):
"""Print latex table for the results and prediction of the testing set
size & hematocrit & result +- std & prediction & prediction error """
# grap the longest running thread as the total running time of the application
testing_df = testing_df.loc[testing_df['component'] == 'total']
testing_df = testing_df.loc[testing_df.groupby('jobid', sort=False)['total'].idxmax()]
print(testing_df)
testing_df['sizestr'] = ["({}, {}, {})".format(x, y, z) for (x, y, z) in testing_df['largest_subdomain']]
fit_exp_df = testing_df.sort_values("N")
error = 0
count = 0
for i, sizestr in enumerate(pd.unique(fit_exp_df['sizestr'])):
s = np.array(fit_exp_df.loc[fit_exp_df['sizestr'] == sizestr]['largest_subdomain'])[0]
for hi, H in enumerate(np.unique(np.sort(fit_exp_df['h']))):
t = fit_exp_df.loc[fit_exp_df['sizestr'] == sizestr]
r = np.array(t.loc[t['h'] == H]['RBCs'])[0]
tmp = testing_df.loc[testing_df['h'] == H]
tmp = tmp.loc[tmp['largest_subdomain'] == s]
pred = run_model(model, s, r)
res = np.mean(tmp['total'])
std = np.std(tmp['total'])
err = np.abs(pred - res) * (100 / res)
st = np.array(tmp["largest_subdomain"])[0]
stmp = "({}, {}, {})".format(st[0] *.5, st[1] *.5, st[2] *.5)
tmpstr = "{} & {}\% ".format(stmp, H, )
tmpstr += "& $\\num{{{0:.2f}}}".format(res)
tmpstr += "\pm \\num{{{0:.2f}}}".format(std)
tmpstr += "$& $\\num{{{0:.2f}}}".format(pred)
tmpstr += "$ & $\\num{{{0:.2f}}}$\\\\".format(err)
print(tmpstr)
error += err
count += 1
print()
print("avg error: {}".format(error / count) )
def print_model_latex(model):
model = model_to_json(model)
for k in model.keys():
tmpstr = '{} & '.format(k)
tmpstr += "$"
tmpstr += "\\num{{{0:.2g}}}".format(model[k]['offset'])
if 'N' in model[k].keys():
tmpstr += " + V \\times \\num{{{0:.2g}}}".format(model[k]['N'])
if 'area' in model[k].keys():
tmpstr += " + SA \\times \\num{{{0:.2g}}}".format(model[k]['area'])
if 'RBCs' in model[k].keys():
tmpstr += " + RBCs \\times \\num{{{0:.2g}}}".format(model[k]['RBCs'])
tmpstr += "$\\\\"
print(tmpstr)
pass
def validate_model(testing_df, model):
gen_validation_table(testing_df, model)
plot_validation(testing_df, model, fig_name)
def main(argv):
global fig_name
global machine
global results_dir
global iterations
datadir = ""
try:
opts, args = getopt.getopt(argv,"m:r:o:ds",["ifile=","ofile="])
except getopt.GetoptError:
print ('test.py -i <inputfile> -o <outputfile>')
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
sys.exit()
elif opt in ("-o"):
results_dir = arg
elif opt in ("-d"):
machine = "das6"
iterations = 500
elif opt in ("-s"):
machine = "snellius"
iterations = 500
elif opt in ("-r"):
datadir = arg
fig_name = fig_name.format(machine)
datadir = "../results/{}/model/output/".format(machine)
if datadir == "":
print("no data dir given")
sys.exit(1)
if machine == "":
print("no machine name given")
sys.exit(1)
results_df, exp_df = load_data(datadir)
if machine == "das6":
fitting_sizes = ["s5", "s1", "s3", "s6", "s7", 's8']
testing_sizes = ["s2", "s4", "s8", "s9"]
# testing_sizes = testing_sizes + fitting_sizes
if machine == "snellius":
fitting_sizes = ["s1", "s3", "s5", "s8", "s9", "s11", "s12", 's10']
testing_sizes = ["s2", "s4", "s7", "s14"]
# testing_sizes = testing_sizes + fitting_sizes
print(np.unique(exp_df['s']))
fitting_jobs = exp_df.loc[exp_df['s'].isin(fitting_sizes)]['jobid']
testing_jobs = exp_df.loc[exp_df['s'].isin(testing_sizes)]['jobid']
fitting_df = results_df.loc[results_df['jobid'].isin(fitting_jobs)]
testing_df = results_df.loc[results_df['jobid'].isin(testing_jobs)]
fitting_df = pd.merge(fitting_df, exp_df, on=['jobid'], how="left")
testing_df = pd.merge(testing_df, exp_df, on=['jobid'], how="left")
print(fitting_df)
model = calibrate_model(fitting_df)
validate_model(testing_df, model)
print()
print(json.dumps(model_to_json(model)))
print()
print_model_latex(model)
if __name__ == "__main__":
main(sys.argv[1:]) | [
"jelle.van.dijk@uva.nl"
] | jelle.van.dijk@uva.nl |
5d78ddfa44406d1b5fea5e775662f57a46d90688 | 7b3b859dd633eb2240d987b37e487ea8388e2f8d | /empirical/Chen2010/Chen2010.py | 0fe08644c1f533bdae0bdb3a81b33691ee231b19 | [] | no_license | yszhuang/assetPricing2 | 96956638f6c26e4e7d33e0abffe5c5c14460000a | 10af01a66bcd13cb516920e9cb1b46d8cfa6b598 | refs/heads/master | 2022-01-13T02:00:09.070100 | 2018-09-01T02:28:21 | 2018-09-01T02:28:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,023 | py | # -*-coding: utf-8 -*-
# Python 3.6
# Author:Zhang Haitao
# Email:13163385579@163.com
# TIME:2018-05-09 16:23
# NAME:assetPricing2-Chen2010.py
from core.constructFactor import single_sorting_factor
from core.myff5 import regression_details_5x5, ts_panel, model_performance
from data.dataApi import Database, Benchmark
from data.dataTools import read_unfiltered
from data.din import parse_financial_report, quaterly2monthly
from tool import assign_port_id, my_average, multi_processing, newey_west
from zht.data.gta.api import read_gta
import os
import pandas as pd
import matplotlib.pyplot as plt
from multiprocessing.pool import Pool
import numpy as np
BENCH=Benchmark()
direc= r'D:\zht\database\quantDb\researchTopics\assetPricing2\data\FI'
dirData= r'D:\zht\database\quantDb\researchTopics\assetPricing2\data\FI\data'
figPath= r'D:\zht\database\quantDb\researchTopics\assetPricing2\data\FI\fig'
factorPath= r'D:\zht\database\quantDb\researchTopics\assetPricing2\data\FI\factor'
dirFactor_database=r'D:\zht\database\quantDb\researchTopics\assetPricing2\data\FI\factor_database'
dirFig_database=r'D:\zht\database\quantDb\researchTopics\assetPricing2\data\FI\fig_database'
dirChen=r'D:\zht\database\quantDb\researchTopics\assetPricing2\data\FI\Chen2010'
dirPanels=r'D:\zht\database\quantDb\researchTopics\assetPricing2\data\FI\bivariate_panels'
def _save(df,name):
df.to_pickle(os.path.join(dirData, name + '.pkl'))
def _read(name):
return pd.read_pickle(os.path.join(dirData, name + '.pkl'))
#parse all the financial indicators
def _filter_indicators(lst):
newlst=[]
mark=[]
for ele in lst:
if ele[-1].isdigit():
newlst.append(ele)
else:
if ele[:-1] not in mark:
newlst.append(ele)
mark.append(ele[:-1])
return newlst
def parse_all_financial_indicators():
tbnames=['FI_T{}'.format(i) for i in range(1,12)]
for tbname in tbnames:
df=read_gta(tbname)
varnames=[col for col in df.columns if col not in
['Accper','Indcd','Stkcd','Typrep']]
if 'Typrep' in df.columns:
consolidated=True
else:
consolidated=False
varnames=_filter_indicators(varnames)
for varname in varnames:
df=parse_financial_report(tbname,varname,consolidated=consolidated)
df=quaterly2monthly(df)
_save(df,'{}__{}'.format(tbname,varname))
print(tbname,varname)
def indicator2factor(indicator):
sampleControl=False
q=5
# data lagged
df = _read(indicator)
s = df.stack()
s.name = indicator
weight = Database(sample_control=sampleControl).by_indicators(['weight'])
datalagged = pd.concat([s, weight], axis=1)
datalagged = datalagged.groupby('sid').shift(1)
# data t
datat = Database(sample_control=sampleControl).by_indicators(['stockEretM'])
comb = pd.concat([datalagged, datat], axis=1)
comb = comb.dropna()
comb['g'] = comb.groupby('t', group_keys=False).apply(
lambda df: assign_port_id(df[indicator], q))
panel = comb.groupby(['t', 'g']).apply(
lambda df: my_average(df, 'stockEretM', wname='weight')) \
.unstack(level=['g'])
factor = panel[q] - panel[1]
factor.name=indicator
factor.to_pickle(os.path.join(factorPath, '{}.pkl'.format(indicator)))
def _task(indicator):
try:
indicator2factor(indicator)
except:
with open(os.path.join(direc, 'failed.txt'), 'a') as f:
f.write(indicator+'\n')
print(indicator)
def multi_indicator2factor():
indicators=[ind[:-4] for ind in os.listdir(dirData)]
p=Pool(6)
p.map(_task,indicators)
def analyse_corr():
'''
analyse the correlation between different factors constructed by soritng
financial indicators.Since there may be some indicators share the same value.
Returns:
'''
fns=os.listdir(factorPath)
ss=[pd.read_pickle(os.path.join(factorPath, fn)) for fn in fns]
comb=pd.concat(ss,axis=1)
corr=comb.corr()
cc=corr.corr()
tri=corr.mask(np.triu(np.ones(corr.shape),k=0).astype(bool))
tri=tri.stack().sort_values(ascending=False)
tri=tri.reset_index()
thresh=0.9
def plot_all(factorPath,figPath):
fns = os.listdir(factorPath)
ss = [pd.read_pickle(os.path.join(factorPath, fn)) for fn in fns]
comb = pd.concat(ss, axis=1)
tup=[]
for col,s in comb.items():
sharpe_abs=abs(s.mean()/s.std())
tup.append((col,sharpe_abs))
tup=sorted(tup,key=lambda x:x[1],reverse=True)
for i,ele in enumerate(tup):
indicator=ele[0]
s=comb[indicator]
s=s.dropna()
fig=plt.figure()
plt.plot(s.index,s.cumsum())
fig.savefig(os.path.join(figPath, indicator + '.png'))
print(i)
sp=pd.DataFrame(tup,columns=['indicator','sharpe'])
return sp
def select_a_model():
sharpe=pd.read_pickle(os.path.join(direc, 'sharpe.pkl'))
indicator=sharpe['indicator'][0]
factor=pd.read_pickle(os.path.join(factorPath, indicator + '.pkl'))
ff3=read_unfiltered('ff3M')
model=pd.concat([ff3[['rp','smb']],factor],axis=1)
model=model.dropna()
return model
def compare_model_with_ff3():
ff3=BENCH.by_benchmark('ff3M')
model=select_a_model()
tableas=[]
tablets=[]
for bench in [ff3,model]:
tablea,tablet=regression_details_5x5(bench)
tableas.append(tablea)
tablets.append(tablet)
comba=pd.concat(tableas,axis=0,keys=['ff3','myModel'])
combt=pd.concat(tablets,axis=0,keys=['ff3','myModel'])
comba.to_csv(os.path.join(direc,'comba.csv'))
combt.to_csv(os.path.join(direc,'combt.csv'))
# find anomalies
def get_all_factor_from_database():
info=Database().info
indicators=[ele for l in info.values() for ele in l]
for indicator in indicators:
factor=single_sorting_factor(indicator,q=10)
factor.name=indicator
factor.to_pickle(os.path.join(dirFactor_database,indicator+'.pkl'))
print(indicator)
# get_all_factor_from_database()
indicators=['idio__idioVol_ff3_1M__D',
'liquidity__amihud',
'liquidity__turnover1',
'momentum__R3M',
'reversal__reversal',
'skewness__skew_24M__D',
'idio__idioVol_capm_1M__D',
'inv__inv',
'value__bm',
'beta__D_1M',
'op__op',
'roe__roe'
]
# table III of Chen, L., and Zhang, L. (2010). A better three-factor model that explains more anomalies. Journal of Finance 65, 563–595.
def _riskAdjust(s,bench=None):
s.name = 'y'
s=s.to_frame()
if bench is not None:
df=pd.concat([s,bench],axis=1)
formula='y ~ {}'.format(' + '.join(bench.columns.tolist()))
nw=newey_west(formula,df)
return nw['Intercept']['t']
else:
formula='y ~ 1'
nw = newey_west(formula, s)
return nw['Intercept']['t']
def compare_different_models():
# hxz is best
ts=[]
for factor in indicators:
s=pd.read_pickle(os.path.join(dirFactor_database,factor+'.pkl'))
mymodel=select_a_model()
bs=list(Benchmark().info.keys())
names=['pure','my']+bs
benchs=[None,mymodel]+[Benchmark().by_benchmark(r) for r in bs]
t=pd.Series([_riskAdjust(s,bench) for bench in benchs],index=names)
ts.append(t)
print(factor)
df=pd.concat(ts, axis=1, keys=indicators)
df.to_csv(os.path.join(dirChen,'intercept_tvalue.csv'))
dic={}
for col,s in df.items():
m=s.abs().idxmin()
if m in dic:
dic[m]+=1
else:
dic[m]=1
def get_bivariate_panel(v1, v2='size__size'):
sampleControl = False
q = 5
ss=[]
for v in [v1,v2]:
if v in Database(sample_control=sampleControl).all_indicators:
s=Database(sample_control=sampleControl).by_indicators([v])
else:
s=_read(v).stack()
s.name=v
ss.append(s)
# data lagged
weight = Database(sample_control=sampleControl).by_indicators(['weight'])
datalagged = pd.concat(ss+[weight], axis=1)
datalagged = datalagged.groupby('sid').shift(1)
# data t
datat = Database(sample_control=sampleControl).by_indicators(['stockEretM'])
comb = pd.concat([datalagged, datat], axis=1)
comb = comb.dropna()
comb['g1'] = comb.groupby('t', group_keys=False).apply(
lambda df: assign_port_id(df[v1], q))
comb['g2'] = comb.groupby('t', group_keys=False).apply(
lambda df: assign_port_id(df[v2], q))
panel = comb.groupby(['t', 'g1', 'g2']).apply(
lambda df: my_average(df, 'stockEretM', wname='weight'))\
.unstack(level=['g1','g2'])
print(v1)
return panel
def _get_panel(indicator):
panel=get_bivariate_panel(indicator)
panel.to_pickle(os.path.join(dirPanels,'{}.pkl'.format(indicator)))
def multi_get_panel():
multi_processing(_get_panel,indicators,pool_size=5)
def compare_models_based_on_bivariate_assets():
resultLst=[]
psLst=[]
for indicator in indicators:
assets=pd.read_pickle(os.path.join(dirPanels,'{}.pkl'.format(indicator)))
mymodel = select_a_model()
bs = list(Benchmark().info.keys())
benchNames = ['pure', 'my'] + bs
benchs = [None, mymodel] + [Benchmark().by_benchmark(r) for r in bs]
rs=[]
ps=[]
for bench in benchs:
r=ts_panel(assets,bench)
rs.append(r)
if bench is not None:
p=model_performance(assets.copy(),bench)
ps.append(p)
resultLst.append(pd.concat(rs,axis=0,keys=benchNames))
psLst.append(pd.concat(ps,axis=1,keys=benchNames[1:]).T)
print(indicator)
pd.concat(resultLst,axis=0,keys=indicators).to_csv(r'e:\a\result.csv')
pd.concat(psLst,axis=0,keys=indicators).to_csv(r'e:\a\performance.csv')
# if __name__ == '__main__':
# multi_get_panel()
| [
"13163385579@163.com"
] | 13163385579@163.com |
ba53c86084ab7132c23a4ca3328f60329d7f26db | 7737ecae1022b942c8c81d300319e86761448712 | /q5/groups_reducer.py | 2c77f6329571bd553c570bd59fee8b1b7fe05a0d | [] | no_license | yongjili/CS-5364-Information-Retrieval | 2c8d0f70c3fc37bc3d75ac07744bd83523b1bb95 | 2fbac90af08d98ea584f5e96d211498e0ade7894 | refs/heads/master | 2020-12-25T03:21:26.450190 | 2016-03-12T06:02:08 | 2016-03-12T06:02:08 | 52,094,801 | 0 | 0 | null | 2016-02-19T14:57:29 | 2016-02-19T14:57:29 | null | UTF-8 | Python | false | false | 399 | py | import sys
oldKey = None
posters = []
for line in sys.stdin:
data_mapped = line.strip().split("\t")
if len(data_mapped) != 2:
continue
thisKey, author_id = data_mapped
if oldKey and oldKey != thisKey:
print oldKey, "\t", posters
oldKey = thisKey
posters = []
oldKey = thisKey
posters.append(int(author_id))
if oldKey != None:
print oldKey, "\t", posters
| [
"yongji.li@ttu.edu"
] | yongji.li@ttu.edu |
e2c04d3168034ac14b89491c4c87aa3e7a2b81ba | b4f728e9e1a2d799df956605880fbad4e82e2420 | /execrise_4.py | fc703abd6ebea14a99f6306ac9b1668e12684ede | [] | no_license | shaik882/execrises | 6d9cbffc3a29d81125af4ea144467a7bfa01b425 | 28c6fd1991a7bd8c5ffdd1244780041ae7deff7f | refs/heads/master | 2023-05-24T02:42:11.012970 | 2021-06-09T12:23:04 | 2021-06-09T12:23:04 | 267,795,596 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 211 | py | #Program to play Dice game
import random
class Dice:
def roll(self):
first = random.randint(1, 6)
second = random.randint(1, 6)
return first, second
dice = Dice()
print(dice.roll()) | [
"shaik882@gmail.com"
] | shaik882@gmail.com |
8a677bd1b870436d692001e743366f641277934d | 14b280c2d987649469cc740e0f8fd865849c310a | /django_polls/urls.py | 41a5e0bcc674cb9719b031babbd9ccdc53519057 | [] | no_license | Vishu9/django_polls | b1452df453f8f2f214357df246ebb2586eee4612 | d9dd742abe6b624d8a875597690f61a47c08e329 | refs/heads/main | 2023-02-21T09:08:17.116796 | 2020-12-17T09:02:14 | 2020-12-17T09:02:14 | 322,238,387 | 0 | 0 | null | 2021-01-23T08:02:53 | 2020-12-17T09:04:08 | Python | UTF-8 | Python | false | false | 224 | py |
from django.contrib import admin
from django.urls import path, include
import polls.views
from django.views import generic
urlpatterns = [
path('polls/', include('polls.urls')),
path('admin/', admin.site.urls),
] | [
"mr.vishalww@yahoo.com"
] | mr.vishalww@yahoo.com |
5c6fc7d522c9bf98823daaf72e654584b662ff66 | c26a5638c0166885fbe499d65373abb42bebaabe | /src/pydane/core/__init__.py | 7297e2a80008ec9aa88d81c5f84d0218990c2be9 | [
"ISC"
] | permissive | avaiss/pydane | 128d74e6e738865ea89aa4c776111f23da786de2 | 3d9449ab8c32fa2f5b988de3778b68ee3b942240 | refs/heads/master | 2021-05-28T02:50:20.738632 | 2014-11-23T18:40:30 | 2014-11-23T22:10:52 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 779 | py | # coding=utf-8
# Copyright (C) 2014, Alexandre Vaissière
#
# Permission to use, copy, modify, and/or distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
# OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
# CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
| [
"avaiss@fmiw.org"
] | avaiss@fmiw.org |
9be4a47a1465c09c4e475b51bae8a2ea45f791fa | 067f393ca7943c2e6b6ba336fdf87a431a76dff8 | /tests/test_graphical_units/test_img_and_svg.py | 2e3775722e89b408161d8d2309238e781549bfbb | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | ffintg3v7/manim | c7c5603891c9c075fcbcba899ef7026be9522fcd | 7621c6ab8e9169a86b36bf7f9b862aa42995ce86 | refs/heads/master | 2023-04-02T06:03:42.033382 | 2021-04-09T12:06:33 | 2021-04-09T12:06:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,344 | py | import sys
from pathlib import Path
import pytest
from manim import *
from ..helpers.graphical_units import set_test_scene
from ..helpers.path_utils import get_project_root
from ..utils.GraphicalUnitTester import GraphicalUnitTester
from ..utils.testing_utils import get_scenes_to_test
def get_test_resource(filename):
return str(
get_project_root() / "tests/test_graphical_units/img_svg_resources" / filename
)
# Tests break down into two kinds: one where the SVG is simple enough to step through
# and ones where the SVG is realistically complex, and the output should be visually inspected.
# First are the simple tests.
class CubicPathTest(Scene):
def construct(self):
cubic_demo = SVGMobject(get_test_resource("cubic_demo.svg"))
self.add(cubic_demo)
self.wait()
class CubicAndLinetoTest(Scene):
def construct(self):
cubic_lineto = SVGMobject(get_test_resource("cubic_and_lineto.svg"))
self.add(cubic_lineto)
self.wait()
class RhomboidTest(Scene):
"""Test the default fill and parsed stroke of a rhomboid"""
def construct(self):
rhomboid = SVGMobject(get_test_resource("rhomboid.svg")).scale(0.5)
rhomboid_fill = rhomboid.copy().set_fill(opacity=1).shift(UP * 2)
rhomboid_no_fill = rhomboid.copy().set_fill(opacity=0).shift(DOWN * 2)
self.add(rhomboid, rhomboid_fill, rhomboid_no_fill)
self.wait()
class InheritanceTest(Scene):
"""Ensure SVG inheritance is followed"""
def construct(self):
three_arrows = SVGMobject(get_test_resource("inheritance_test.svg")).scale(0.5)
self.add(three_arrows)
self.wait()
class MultiPartPathTest(Scene):
def construct(self):
mpp = SVGMobject(get_test_resource("multi_part_path.svg"))
self.add(mpp)
self.wait()
class QuadraticPathTest(Scene):
def construct(self):
quad = SVGMobject(get_test_resource("qcurve_demo.svg"))
self.add(quad)
self.wait()
class SmoothCurvesTest(Scene):
def construct(self):
smooths = SVGMobject(get_test_resource("smooth_curves.svg"))
self.add(smooths)
self.wait()
class WatchTheDecimals(Scene):
def construct(self):
decimal = SVGMobject(get_test_resource("watch_the_decimals.svg"))
self.add(decimal)
self.wait()
class UseTagInheritanceTest(Scene):
def construct(self):
aabbb = SVGMobject(get_test_resource("aabbb.svg"))
self.add(aabbb)
self.wait()
# Second are the visual tests - these are probably too complex to verify step-by-step, so
# these are really more of a spot-check
class WeightSVGTest(Scene):
def construct(self):
path = get_test_resource("weight.svg")
svg_obj = SVGMobject(path)
self.add(svg_obj)
self.wait()
class BrachistochroneCurveTest(Scene):
def construct(self):
brach_curve = SVGMobject(get_test_resource("curve.svg"))
self.add(brach_curve)
self.wait()
class DesmosGraph1Test(Scene):
def construct(self):
dgraph = SVGMobject(get_test_resource("desmos-graph_1.svg")).scale(3)
self.add(dgraph)
self.wait()
class PenroseTest(Scene):
def construct(self):
penrose = SVGMobject(get_test_resource("penrose.svg"))
self.add(penrose)
self.wait()
class ManimLogoTest(Scene):
def construct(self):
background_rect = Rectangle(color=WHITE, fill_opacity=1).scale(2)
manim_logo = SVGMobject(get_test_resource("manim-logo-sidebar.svg"))
self.add(background_rect, manim_logo)
self.wait()
class UKFlagTest(Scene):
def construct(self):
uk_flag = SVGMobject(get_test_resource("united-kingdom.svg"))
self.add(uk_flag)
self.wait()
class SingleUSStateTest(Scene):
def construct(self):
single_state = SVGMobject(get_test_resource("single_state.svg"))
self.add(single_state)
self.wait()
class ContiguousUSMapTest(Scene):
def construct(self):
states = SVGMobject(get_test_resource("states_map.svg")).scale(3)
self.add(states)
self.wait()
class PixelizedTextTest(Scene):
def construct(self):
background_rect = Rectangle(color=WHITE, fill_opacity=1).scale(2)
rgb_svg = SVGMobject(get_test_resource("pixelated_text.svg"))
self.add(background_rect, rgb_svg)
self.wait()
class VideoIconTest(Scene):
def construct(self):
video_icon = SVGMobject(get_test_resource("video_icon.svg"))
self.add(video_icon)
self.wait()
class MultipleTransformTest(Scene):
def construct(self):
svg_obj = SVGMobject(get_test_resource("multiple_transforms.svg"))
self.add(svg_obj)
self.wait()
class MatrixTransformTest(Scene):
def construct(self):
svg_obj = SVGMobject(get_test_resource("matrix.svg"))
self.add(svg_obj)
self.wait()
class ScaleTransformTest(Scene):
def construct(self):
svg_obj = SVGMobject(get_test_resource("scale.svg"))
self.add(svg_obj)
self.wait()
class TranslateTransformTest(Scene):
def construct(self):
svg_obj = SVGMobject(get_test_resource("translate.svg"))
self.add(svg_obj)
self.wait()
class SkewXTransformTest(Scene):
def construct(self):
svg_obj = SVGMobject(get_test_resource("skewX.svg"))
self.add(svg_obj)
self.wait()
class SkewYTransformTest(Scene):
def construct(self):
svg_obj = SVGMobject(get_test_resource("skewY.svg"))
self.add(svg_obj)
self.wait()
class RotateTransformTest(Scene):
def construct(self):
svg_obj = SVGMobject(get_test_resource("rotate.svg"))
self.add(svg_obj)
self.wait()
class ImageMobjectTest(Scene):
def construct(self):
file_path = get_test_resource("tree_img_640x351.png")
im1 = ImageMobject(file_path).shift(4 * LEFT + UP)
im2 = ImageMobject(file_path, scale_to_resolution=1080).shift(
4 * LEFT + 2 * DOWN
)
im3 = ImageMobject(file_path, scale_to_resolution=540).shift(4 * RIGHT)
self.add(im1, im2, im3)
self.wait(1)
class ImageInterpolationTest(Scene):
def construct(self):
img = ImageMobject(
np.uint8([[63, 0, 0, 0], [0, 127, 0, 0], [0, 0, 191, 0], [0, 0, 0, 255]])
)
img.height = 2
img1 = img.copy()
img2 = img.copy()
img3 = img.copy()
img4 = img.copy()
img5 = img.copy()
img1.set_resampling_algorithm(RESAMPLING_ALGORITHMS["nearest"])
img2.set_resampling_algorithm(RESAMPLING_ALGORITHMS["lanczos"])
img3.set_resampling_algorithm(RESAMPLING_ALGORITHMS["linear"])
img4.set_resampling_algorithm(RESAMPLING_ALGORITHMS["cubic"])
img5.set_resampling_algorithm(RESAMPLING_ALGORITHMS["box"])
self.add(img1, img2, img3, img4, img5)
[s.shift(4 * LEFT + pos * 2 * RIGHT) for pos, s in enumerate(self.mobjects)]
self.wait()
MODULE_NAME = "img_and_svg"
@pytest.mark.parametrize("scene_to_test", get_scenes_to_test(__name__), indirect=False)
def test_scene(scene_to_test, tmpdir, show_diff):
GraphicalUnitTester(scene_to_test[1], MODULE_NAME, tmpdir).test(show_diff=show_diff)
| [
"noreply@github.com"
] | ffintg3v7.noreply@github.com |
87ffa029204a02b6b24eb241d0b349c255608b57 | ccfc8b4b6b7a48e387c3ecd56ca110eb9f174367 | /python/work/5.0-stable/project/videoclient/api/persons/photos/urls.py | 75d2aa555f21b805075d975db736c3fe7ef27c5c | [] | no_license | denis-pinaev/tmp_trash | c4d6c4a4cefaacc8af5e93d1175f0a56e3d8e656 | 7642c0ef0cc45b978e579023406abfbbb656896d | refs/heads/master | 2016-08-11T12:03:43.376455 | 2016-03-04T12:53:35 | 2016-03-04T12:53:35 | 52,145,331 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 222 | py | from django.conf.urls.defaults import patterns, include, url
import views
urlpatterns = patterns('',
url(r'^list/*$', views.list, {}, 'api_list_photos'),
url(r'^left/*$', views.left, {}, 'api_left_photos'),
) | [
"corcc@yandex.ru"
] | corcc@yandex.ru |
8b0bbf598493047a2ddf09ef14a559e0ca925884 | 58983ba0dd5b8f39c9b280bce3628b098e2020c3 | /Hunger/Clientes/Hunger/wsgi.py | 760e4d0c38c6880d409e69cef0b4c65c213e2a72 | [
"Apache-2.0"
] | permissive | mjunior2016/HungerCompass | cb6406eb7b035a5a09a423a2dd53bbf77a0836dd | b87338a7f4efa5afb6f9fe04fd5fca6783452c0a | refs/heads/master | 2020-04-26T06:32:32.809799 | 2019-03-14T00:01:56 | 2019-03-14T00:01:56 | 173,367,598 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 389 | py | """
WSGI config for Hunger project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'Hunger.settings')
application = get_wsgi_application()
| [
"marcosjunior2016@hotmail.com"
] | marcosjunior2016@hotmail.com |
488691c6b9d0bcb63bd98066118597932c8db642 | 0b0db5c669ce1e4f1aaf0d44daa87790271df14a | /tutorial/settings.py | dc56a657a26d9e6f08dfc321a360719701ae02e0 | [] | no_license | lijinjiong/spider | 9ab10af2bc6578283b58e2268701ca207d659269 | 303d8a6439de0992b5b4d105b9f0c68484139fec | refs/heads/master | 2021-09-04T17:22:53.149948 | 2018-01-20T10:13:48 | 2018-01-20T10:13:48 | 114,831,828 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,198 | py | # -*- coding: utf-8 -*-
#feed_export_encoding
FEED_EXPORT_ENCODING = 'utf-8'
# Scrapy settings for tutorial project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
# http://doc.scrapy.org/en/latest/topics/settings.html
# http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html
# http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html
BOT_NAME = 'tutorial'
SPIDER_MODULES = ['tutorial.spiders']
NEWSPIDER_MODULE = 'tutorial.spiders'
# Crawl responsibly by identifying yourself (and your website) on the user-agent
#USER_AGENT = 'tutorial (+http://www.yourdomain.com)'
# Obey robots.txt rules
ROBOTSTXT_OBEY = False
# Configure maximum concurrent requests performed by Scrapy (default: 16)
#CONCURRENT_REQUESTS = 32
# Configure a delay for requests for the same website (default: 0)
# See http://scrapy.readthedocs.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
#DOWNLOAD_DELAY = 3
# The download delay setting will honor only one of:
#CONCURRENT_REQUESTS_PER_DOMAIN = 16
#CONCURRENT_REQUESTS_PER_IP = 16
# Disable cookies (enabled by default)
#COOKIES_ENABLED = False
# Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED = False
# Override the default request headers:
#DEFAULT_REQUEST_HEADERS = {
# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
# 'Accept-Language': 'en',
#}
# Enable or disable spider middlewares
# See http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html
#SPIDER_MIDDLEWARES = {
# 'tutorial.middlewares.TutorialSpiderMiddleware': 543,
#}
# Enable or disable downloader middlewares
# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html
#DOWNLOADER_MIDDLEWARES = {
# 'tutorial.middlewares.MyCustomDownloaderMiddleware': 543,
#}
# Enable or disable extensions
# See http://scrapy.readthedocs.org/en/latest/topics/extensions.html
#EXTENSIONS = {
# 'scrapy.extensions.telnet.TelnetConsole': None,
#}
# Configure item pipelines
# See http://scrapy.readthedocs.org/en/latest/topics/item-pipeline.html
ITEM_PIPELINES = {
'tutorial.pipelines.TutorialPipeline': 300,
}
# Enable and configure the AutoThrottle extension (disabled by default)
# See http://doc.scrapy.org/en/latest/topics/autothrottle.html
#AUTOTHROTTLE_ENABLED = True
# The initial download delay
#AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
#AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG = False
# Enable and configure HTTP caching (disabled by default)
# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
#HTTPCACHE_ENABLED = True
#HTTPCACHE_EXPIRATION_SECS = 0
#HTTPCACHE_DIR = 'httpcache'
#HTTPCACHE_IGNORE_HTTP_CODES = []
#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
| [
"huangmei@lxbznkj.com"
] | huangmei@lxbznkj.com |
fbec5f8aff50f8a10897aca674a0c3f8dcbbb350 | e9d9a7582bd1c72469ab3b570c7c8d554726afb8 | /docker/bcctapp/migrations/0001_initial.py | 15861ab3b39f34f7b97ff103ded3e8c1b3181e76 | [] | no_license | safarsaitam/ctm-interships-2020-webbcctcore | 6a03419724d9e92da59773b62dcce22ed301abbf | 36bbe4c4288faecd7a00638e48e36769de789aa1 | refs/heads/master | 2023-02-26T20:57:15.986190 | 2020-08-02T10:57:23 | 2020-08-02T10:57:23 | 333,961,489 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 11,139 | py | # Generated by Django 2.2.5 on 2020-07-09 10:42
from django.conf import settings
import django.core.validators
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='ImagesPatient',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('number', models.IntegerField(null=True)),
('image', models.ImageField(blank=True, null=True, upload_to='medical_images4/')),
('mime_type', models.CharField(max_length=100, null=True)),
('file_type', models.CharField(max_length=100, null=True)),
('image_width', models.CharField(max_length=100, null=True)),
('image_height', models.CharField(max_length=100, null=True)),
('date_created', models.CharField(max_length=100, null=True)),
('date_updated', models.CharField(max_length=100, null=True)),
('days', models.IntegerField()),
('left_endpoint_x', models.FloatField(null=True)),
('left_endpoint_y', models.FloatField(null=True)),
('l_breast_contour_2_x', models.FloatField(null=True)),
('l_breast_contour_2_y', models.FloatField(null=True)),
('l_breast_contour_3_x', models.FloatField(null=True)),
('l_breast_contour_3_y', models.FloatField(null=True)),
('l_breast_contour_4_x', models.FloatField(null=True)),
('l_breast_contour_4_y', models.FloatField(null=True)),
('l_breast_contour_5_x', models.FloatField(null=True)),
('l_breast_contour_5_y', models.FloatField(null=True)),
('l_breast_contour_6_x', models.FloatField(null=True)),
('l_breast_contour_6_y', models.FloatField(null=True)),
('l_breast_contour_7_x', models.FloatField(null=True)),
('l_breast_contour_7_y', models.FloatField(null=True)),
('l_breast_contour_8_x', models.FloatField(null=True)),
('l_breast_contour_8_y', models.FloatField(null=True)),
('l_breast_contour_9_x', models.FloatField(null=True)),
('l_breast_contour_9_y', models.FloatField(null=True)),
('l_breast_contour_10_x', models.FloatField(null=True)),
('l_breast_contour_10_y', models.FloatField(null=True)),
('l_breast_contour_11_x', models.FloatField(null=True)),
('l_breast_contour_11_y', models.FloatField(null=True)),
('l_breast_contour_12_x', models.FloatField(null=True)),
('l_breast_contour_12_y', models.FloatField(null=True)),
('l_breast_contour_13_x', models.FloatField(null=True)),
('l_breast_contour_13_y', models.FloatField(null=True)),
('l_breast_contour_14_x', models.FloatField(null=True)),
('l_breast_contour_14_y', models.FloatField(null=True)),
('l_breast_contour_15_x', models.FloatField(null=True)),
('l_breast_contour_15_y', models.FloatField(null=True)),
('l_breast_contour_16_x', models.FloatField(null=True)),
('l_breast_contour_16_y', models.FloatField(null=True)),
('left_midpoint_x', models.FloatField(null=True)),
('left_midpoint_y', models.FloatField(null=True)),
('right_endpoint_x', models.FloatField(null=True)),
('right_endpoint_y', models.FloatField(null=True)),
('r_breast_contour_19_x', models.FloatField(null=True)),
('r_breast_contour_19_y', models.FloatField(null=True)),
('r_breast_contour_20_x', models.FloatField(null=True)),
('r_breast_contour_20_y', models.FloatField(null=True)),
('r_breast_contour_21_x', models.FloatField(null=True)),
('r_breast_contour_21_y', models.FloatField(null=True)),
('r_breast_contour_22_x', models.FloatField(null=True)),
('r_breast_contour_22_y', models.FloatField(null=True)),
('r_breast_contour_23_x', models.FloatField(null=True)),
('r_breast_contour_23_y', models.FloatField(null=True)),
('r_breast_contour_24_x', models.FloatField(null=True)),
('r_breast_contour_24_y', models.FloatField(null=True)),
('r_breast_contour_25_x', models.FloatField(null=True)),
('r_breast_contour_25_y', models.FloatField(null=True)),
('r_breast_contour_26_x', models.FloatField(null=True)),
('r_breast_contour_26_y', models.FloatField(null=True)),
('r_breast_contour_27_x', models.FloatField(null=True)),
('r_breast_contour_27_y', models.FloatField(null=True)),
('r_breast_contour_28_x', models.FloatField(null=True)),
('r_breast_contour_28_y', models.FloatField(null=True)),
('r_breast_contour_29_x', models.FloatField(null=True)),
('r_breast_contour_29_y', models.FloatField(null=True)),
('r_breast_contour_30_x', models.FloatField(null=True)),
('r_breast_contour_30_y', models.FloatField(null=True)),
('r_breast_contour_31_x', models.FloatField(null=True)),
('r_breast_contour_31_y', models.FloatField(null=True)),
('r_breast_contour_32_x', models.FloatField(null=True)),
('r_breast_contour_32_y', models.FloatField(null=True)),
('r_breast_contour_33_x', models.FloatField(null=True)),
('r_breast_contour_33_y', models.FloatField(null=True)),
('right_midpoint_x', models.FloatField(null=True)),
('right_midpoint_y', models.FloatField(null=True)),
('sternal_notch_x', models.FloatField(null=True)),
('sternal_notch_y', models.FloatField(null=True)),
('left_nipple_x', models.FloatField(null=True)),
('left_nipple_y', models.FloatField(null=True)),
('right_nipple_x', models.FloatField(null=True)),
('right_nipple_y', models.FloatField(null=True)),
('view_type', models.IntegerField(blank=True, choices=[(1, 'Anterior Posterior'), (2, 'Lateral Esquerda'), (3, 'Lateral Direita')], default=0, null=True)),
('img_type', models.IntegerField(blank=True, choices=[(1, 'Classificação Estéticca'), (2, 'Mamografia')], default=0, null=True)),
],
),
migrations.CreateModel(
name='InteractionsPatient',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('number', models.IntegerField(null=True)),
('image_id', models.IntegerField(null=True)),
('author', models.IntegerField(null=True)),
('image', models.ImageField(blank=True, null=True, upload_to='medical_images4/')),
('date_created', models.DateTimeField(auto_now_add=True)),
('interaction_type', models.CharField(max_length=30)),
],
),
migrations.CreateModel(
name='Photo',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(blank=True, max_length=255)),
('file', models.FileField(upload_to='photos/')),
('uploaded_at', models.DateTimeField(auto_now_add=True)),
],
),
migrations.CreateModel(
name='Teams',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
('number', models.IntegerField()),
('users', models.CharField(max_length=10000000, validators=[django.core.validators.int_list_validator])),
('patients', models.CharField(max_length=10000000, validators=[django.core.validators.int_list_validator])),
],
),
migrations.CreateModel(
name='Patient',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('first_name', models.CharField(max_length=100, null=True)),
('last_name', models.CharField(max_length=100, null=True)),
('age', models.IntegerField(null=True)),
('birthday', models.CharField(max_length=100)),
('surgery_date', models.CharField(max_length=100)),
('patient_height', models.IntegerField(null=True)),
('patient_weight', models.IntegerField(null=True)),
('bra', models.CharField(max_length=100, null=True)),
('date_posted', models.DateTimeField(default=django.utils.timezone.now)),
('n_images', models.IntegerField()),
('team', models.IntegerField()),
('share', models.CharField(max_length=100000, validators=[django.core.validators.int_list_validator])),
('surgery_type', models.IntegerField(choices=[(1, 'Conservative surgery - unilateral'), (2, 'Conservative surgery with bilateral reduction'), (3, 'Conservative surgery with LD or LICAP / TDAP'), (4, 'Mastectomy with unilateral reconstruction with implant'), (5, 'Mastectomy with unilateral reconstruction with autologous flap'), (6, 'Mastectomy with bilateral reconstruction with implants'), (7, 'Mastectomy with unilateral reconstruction with implant and contralateral symmetrization with implant (augmentation)'), (8, 'Mastectomy with unilateral reconstruction with implant and contralateral symmetrization with reduction'), (9, 'Mastectomy with unilateral reconstruction with autologous flap and contralateral symmetrization with reduction'), (10, 'Mastectomy with unilateral reconstruction with autologous flap and contralateral symmetrisation with implant (augmentation)')])),
('owner', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='MedicalImages',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('image', models.ImageField(blank=True, null=True, upload_to='medical_images/')),
('doctor', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| [
"tiago.galves@hotmail.com"
] | tiago.galves@hotmail.com |
42ea6542998ab172e883faf783222a5f90e1c0ad | ebcb092d796366d36a1afe9c381cd9e4c31026f1 | /python_markup/handlers.py | b4d1acfc276ad3b816d1d590b2c12416311792c6 | [
"MIT"
] | permissive | MiracleWong/PythonBasic | d2e0e56c88781ebf9c6870f185ceaba6ffaa21ca | cb8ec59dc646842b41966ea4ea4b1ee66a342eee | refs/heads/master | 2021-06-06T22:26:08.780210 | 2020-01-08T14:48:54 | 2020-01-08T14:48:54 | 96,536,299 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,976 | py | #!/usr/local/bin/python
# -*- coding: utf-8 -*-
# filename: handlers.py
# 为文本块打上合适的 HTML 标记
class Handler:
"""
处理程序父类
"""
def callback(self, prefix, name, *args):
method = getattr(self, prefix + name, None)
if callable(method): return method(*args)
def start(self, name):
self.callback('start_', name)
def end(self, name):
self.callback('end_', name)
def sub(self, name):
def substitution(match):
result = self.callback('sub_', name, match)
if result is None: result = match.group(0)
return result
return substitution
class HTMLRenderer(Handler):
"""
HTML 处理程序,给文本块加相应的 HTML 标记
"""
def start_document(self):
print('<html><head><title>ShiYanLou</title></head><body>')
def end_document(self):
print('</body></html>'
def start_paragraph(self):
print('<p style="color: #444;">'
def end_paragraph(self):
print('</p>'
def start_heading(self):
print('<h2 style="color: #68BE5D;">'
def end_heading(self):
print('</h2>'
def start_list(self):
print('<ul style="color: #363736;">'
def end_list(self):
print('</ul>'
def start_listitem(self):
print '<li>'
def end_listitem(self):
print('</li>')
def start_title(self):
print('<h1 style="color: #1ABC9C;">')
def end_title(self):
print('</h1>')
def sub_emphasis(self, match):
return '<em>%s</em>' % match.group(1)
def sub_url(self, match):
return '<a target="_blank" style="text-decoration: none;color: #BC1A4B;" href="%s">%s</a>' % (match.group(1), match.group(1))
def sub_mail(self, match):
return '<a style="text-decoration: none;color: #BC1A4B;" href="mailto:%s">%s</a>' % (match.group(1), match.group(1))
def feed(self, data):
print(data) | [
"cfwr1991@126.com"
] | cfwr1991@126.com |
ae9094f55f7e0e5d413608d96218844164267b62 | 97da83469728be9fa1dc89a2669098e8dc03440d | /petup.spec | 04587aca8ec01017deee92aeaaffb87c251d2b1b | [] | no_license | Victor-J-L/JOGO-PETEEL | d030f44cb9ef8bb1fcbad66125688992559ba721 | d4191a808f13b1305f2835e9bf4ec76e7909927b | refs/heads/master | 2023-02-02T13:41:12.405659 | 2020-12-21T19:21:06 | 2020-12-21T19:21:06 | 299,363,921 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 898 | spec | # -*- mode: python ; coding: utf-8 -*-
block_cipher = None
a = Analysis(['petup.py'],
pathex=['C:\\Users\\Victor\\Desktop\\Victor\\PETEEL\\Projeto Calouros\\Jogo PETEEL'],
binaries=[],
datas=[],
hiddenimports=[],
hookspath=[],
runtime_hooks=[],
excludes=[],
win_no_prefer_redirects=False,
win_private_assemblies=False,
cipher=block_cipher,
noarchive=False)
pyz = PYZ(a.pure, a.zipped_data,
cipher=block_cipher)
exe = EXE(pyz,
a.scripts,
a.binaries,
a.zipfiles,
a.datas,
[],
name='petup',
debug=False,
bootloader_ignore_signals=False,
strip=False,
upx=True,
upx_exclude=[],
runtime_tmpdir=None,
console=True )
| [
"victorjolou@gmail.com"
] | victorjolou@gmail.com |
5395d6e1688efecb396d701104f7d238e776d00d | 27688bc0d9d8b6a8326218145214333af9f5b70a | /website/migrations/0008_auto_20161112_0428.py | 415ab85ff196aa7f1dbbc167eba4fe4a292d5369 | [] | no_license | ferminarellano/realestate-website | 408eb37a1470d6fe2263ed0fdee8e545b4f724dd | 63bd27b78f51417e0eefc1ba3a0e1b68ba5156ef | refs/heads/master | 2020-12-24T11:52:35.806432 | 2017-02-11T19:40:57 | 2017-02-11T19:40:57 | 73,109,839 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 456 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.3 on 2016-11-12 04:28
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('website', '0007_auto_20161112_0426'),
]
operations = [
migrations.AlterModelOptions(
name='menuitem',
options={'verbose_name': 'Menu Item', 'verbose_name_plural': 'Menu items'},
),
]
| [
"ferminarellano.hn@gmail.com"
] | ferminarellano.hn@gmail.com |
6344174edb82b52826ffe9156911e57162cf52b4 | c251223c9829a51fac8ae4d651dba0068da68f43 | /language_converter/main.py | 9d08979ae219e1e14aa3fa15aab2a9150fa319d7 | [] | no_license | Ajax12345/Web-Apps | b1c10e73f2c403cc900a0eddccb1d95b5f71e8aa | 105dfef93aa975cb95fa0216095939d33c2eb19a | refs/heads/master | 2021-01-23T17:34:05.962959 | 2017-09-18T23:44:37 | 2017-09-18T23:44:37 | 102,767,536 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 426 | py | from bottle import Bottle, template, request
app = Bottle()
users = [{"ajax1234":"zorro"}]
username = None
password = None
@app.route('/')
def index():
data = {"to_display":"HI, how are you"}
return template("simple.html", to_display = "HI, how are you?")
@app.route('/run_code', method = "POST")
def get_code():
full_code = request.forms.get('code')
print full_code
if __name__ == '__main__':
app.run()
| [
"noreply@github.com"
] | Ajax12345.noreply@github.com |
de61742f48f0bcf3070bd9ef57eb81ec8a345e8d | 0764420cfb7da709909979747792c177bed24e8a | /FairBNB/kivymd/toast/kivytoast/kivytoast.py | ed654797ec4f080cd6ef03c0ee5ef473ef37582c | [
"MIT"
] | permissive | flruee/AdvancedSoftwareProject | bebea9fe1fca617d0b9f562a29a1fb919543d395 | 0bfe61da6b249f23eeea8670d263cd958e466b0f | refs/heads/main | 2023-05-11T05:01:12.940188 | 2021-05-30T18:19:16 | 2021-05-30T18:19:16 | 345,618,131 | 0 | 2 | null | 2021-05-30T16:47:03 | 2021-03-08T10:32:20 | Python | UTF-8 | Python | false | false | 3,635 | py | """
KivyToast
=========
.. rubric:: Implementation of toasts for desktop.
.. code-block:: python
from kivy.lang import Builder
from kivymd.app import MDApp
from kivymd.toast import toast
KV = '''
MDScreen:
MDToolbar:
title: 'Test Toast'
pos_hint: {'top': 1}
left_action_items: [['menu', lambda x: x]]
MDRaisedButton:
text: 'TEST KIVY TOAST'
pos_hint: {'center_x': .5, 'center_y': .5}
on_release: app.show_toast()
'''
class Test(MDApp):
def show_toast(self):
'''Displays a toast on the screen.'''
toast('Test Kivy Toast')
def build(self):
return Builder.load_string(KV)
Test().run()
"""
from kivy.animation import Animation
from kivy.clock import Clock
from kivy.core.window import Window
from kivy.lang import Builder
from kivy.metrics import dp
from kivy.properties import ListProperty, NumericProperty
from kivy.uix.label import Label
from kivymd.uix.dialog import BaseDialog
Builder.load_string(
"""
<Toast>:
size_hint: (None, None)
pos_hint: {"center_x": 0.5, "center_y": 0.1}
opacity: 0
auto_dismiss: True
overlay_color: [0, 0, 0, 0]
canvas:
Color:
rgba: root._md_bg_color
RoundedRectangle:
pos: self.pos
size: self.size
radius: root.radius
"""
)
class Toast(BaseDialog):
duration = NumericProperty(2.5)
"""
The amount of time (in seconds) that the toast is visible on the screen.
:attr:`duration` is an :class:`~kivy.properties.NumericProperty`
and defaults to `2.5`.
"""
_md_bg_color = ListProperty()
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.label_toast = Label(size_hint=(None, None), opacity=0)
self.label_toast.bind(texture_size=self.label_check_texture_size)
self.add_widget(self.label_toast)
def label_check_texture_size(self, instance, texture_size):
texture_width, texture_height = texture_size
if texture_width > Window.width:
instance.text_size = (Window.width - dp(10), None)
instance.texture_update()
texture_width, texture_height = instance.texture_size
self.size = (texture_width + 25, texture_height + 25)
def toast(self, text_toast):
self.label_toast.text = text_toast
self.open()
def on_open(self):
self.fade_in()
Clock.schedule_once(self.fade_out, self.duration)
def fade_in(self):
anim = Animation(opacity=1, duration=0.4)
anim.start(self.label_toast)
anim.start(self)
def fade_out(self, *args):
anim = Animation(opacity=0, duration=0.4)
anim.bind(on_complete=lambda *x: self.dismiss())
anim.start(self.label_toast)
anim.start(self)
def on_touch_down(self, touch):
if not self.collide_point(*touch.pos):
if self.auto_dismiss:
self.fade_out()
return False
super().on_touch_down(touch)
return True
def toast(text="", background=[0.2, 0.2, 0.2, 1], duration=2.5):
"""Displays a toast.
:attr duration: the amount of time (in seconds) that the toast is visible on the screen
:type duration: float
:attr background: color ``rgba`` in Kivy format
:type background: list
"""
Toast(duration=duration, _md_bg_color=background).toast(text)
| [
"florian.rueegsegger@gmail.com"
] | florian.rueegsegger@gmail.com |
2c54341086eb673dbaca5e019ce478174e4d80db | 058eb45464c58cae808f8b9b9134d763e45907ba | /web_dev_blog/lib/python3.6/types.py | 977c004c862e4a3ddd008ff56ddee6daa9c4d0a4 | [] | no_license | ruisp666/web-dev | 4b590aac742bb885a055b50630c859820572e4c2 | a0b0aa301b8ecac4ff3c004e44965919e6122d4a | refs/heads/master | 2020-03-11T02:05:49.731692 | 2018-04-16T09:38:19 | 2018-04-16T09:38:19 | 129,703,000 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 49 | py | /Users/sapereira/anaconda3/lib/python3.6/types.py | [
"manuelsapereira@gmail.com"
] | manuelsapereira@gmail.com |
c9b0e136884e410cd43957424af878cf30e371e9 | 78c455c0095a5e1cc5bb76b9c3e686012e72c8f3 | /prod/mesANumero.py | 98eac38da572e892a46382e7e89504889b338e20 | [] | no_license | JorgeRamos01/Proyecto-texto | 0ec04815d4442ea03d336f8071ef2b01c2696cce | a19f551f807652b30aba4cd5a412f8dae95263db | refs/heads/master | 2020-05-31T21:00:08.454819 | 2019-06-06T00:53:41 | 2019-06-06T00:53:41 | 190,488,283 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,553 | py | import re # regular expressions
import datetime
def mNumero(string): #Funcion para arreglar el formato de la fecha
m = {
'enero': "01",
'febrero': "02",
'marzo': "03",
'abril': "04",
'mayo': "05",
'junio': "06",
'julio': "07",
'agosto': "08",
'septiembre': "09",
'octubre': "10",
'noviembre': "11",
'diciembre': "12"
}
fecha = string.split("/")
dia = fecha[0]
mes = fecha[1]
anio = fecha[2]
out = str(m[mes.lower()])
return(dia + "/" + out + "/" + anio)
def str_to_date(text): #Funcion para arreglar el formato de la fecha
m = {
'enero': "01",
'febrero': "02",
'marzo': "03",
'abril': "04",
'mayo': "05",
'junio': "06",
'julio': "07",
'agosto': "08",
'septiembre': "09",
'octubre': "10",
'noviembre': "11",
'diciembre': "12"
}
spt_str = text
temp_date = text.split("/")
dd = temp_date[0] # day
mm = str(m[temp_date[1].lower()]) # month
yy = temp_date[2] # year
return(datetime.date(int(yy), int(mm), int(dd)))
# Extract the sentence of rawdate
def extract_rawdate(text):
ans_patt = re.findall(r"\d+ de+ \w+ de \d+", text)
return(ans_patt[0])
# Transform the raw date to an datetime object
def trans_rawdate(patt):
res_temp = str(patt).replace("de", "/")
res_temp = res_temp.replace(" ", "") # remove blank spaces
return(str_to_date(res_temp)) | [
"noreply@github.com"
] | JorgeRamos01.noreply@github.com |
784b1a983bf87555a8d9fb0a88ed8431fafbbcab | ed568c336a42dfd69cadee8c93b5335e6d02c972 | /Profilepage/migrations/0009_auto_20201203_0619.py | 622a160f25df99d6cf347a0b47239657a102cc96 | [] | no_license | SaqibAli96/The-social-network | f34c1e2bdea08732cd8e27b3a32fea7e680e1e1c | 37d9306e33535695a972457e1fa3aff1aba45023 | refs/heads/master | 2023-08-16T22:28:32.023508 | 2021-10-09T13:06:38 | 2021-10-09T13:06:38 | 415,301,305 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 590 | py | # Generated by Django 3.1.1 on 2020-12-03 00:49
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('Profilepage', '0008_auto_20201203_0507'),
]
operations = [
migrations.AlterField(
model_name='post',
name='image',
field=models.ImageField(upload_to='Post'),
),
migrations.AlterField(
model_name='profile',
name='userImage',
field=models.ImageField(default='default/defaultimg.jpg', upload_to='Profiles'),
),
]
| [
"saquibsaiyyed@gmail.com"
] | saquibsaiyyed@gmail.com |
2db600c0467c6612aaabd2e26e0f59336663b91e | df8fef14cddba4040f740f751d1e8c962ed4a756 | /src/__init__.py | 80be2eee0b6a745225fce453dce43874bece2eb9 | [] | no_license | Rodrigosis/meli-challenge | 1ab16f65a69d723bd089efd8edf6cfd877c83d6d | d391716a90a8fa5d058d8a471a11b914d5d6ec1b | refs/heads/master | 2023-08-20T19:26:43.244127 | 2021-10-25T21:35:49 | 2021-10-25T21:35:49 | 421,090,052 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 123 | py | from fastapi import FastAPI
from src.application import controller
app = FastAPI()
app.include_router(controller.router)
| [
"rodrigo.sis.s7@gmail.com"
] | rodrigo.sis.s7@gmail.com |
eb7a298a8cca7f502b7de2373f586629310292b4 | bda12343a5e89cf13fbc9ff7d28b15f48a874cb2 | /generatesheet.py | 6d42fa1abc36ffe809c734bd711a4d36dde7ac60 | [] | no_license | luiz-meireles/scripts | 7bda5eeec6e8d1e292b917673417dcd22271c207 | 3542aea3ebe86989e06291b5776e92649eba0f6c | refs/heads/master | 2021-03-10T02:40:06.435507 | 2020-03-10T21:07:17 | 2020-03-10T21:07:17 | 246,408,925 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,438 | py | # -*- coding: utf-8 -*-
import openpyxl
import datetime
from random import randint
import argparse
sheet = openpyxl.Workbook()
active_sheet = sheet.active
active_sheet.title = "Generated fake users"
dr = ['sp', 'rj', 'mg']
turma = ['special-ops', 'team2', 'team3', 'team2']
uni = ['10 - maracatins', '15-geekie']
turno = [ 'Manhã', 'Tarde', 'Noite']
def main(start_id, end_id):
active_sheet.append([
u"identificador único",
u"nome comoleto",
u"email",
u"ano",
u"turma",
u"turno",
u"departamento regional",
u"unidade",
u"segmento em",
u"perfil",
u"status",
u"idgeekie",
u"idgeekieuni"
])
def get_any(list):
return list[randint(0, len(list) - 1)]
output_file_name = u"{}- generated.xlsx".format(
datetime.datetime.now()
)
for i in range(start_id, end_id + 1, 1):
active_sheet.append([
str(i),
u"Nome{}".format(i),
u"nome{}@teste.com.br".format(i),
str(randint(1, 9)),
get_any(turma),
get_any(turno),
get_any(dr),
get_any(uni),
u"Novo EM",
u"estudante",
u"",
u"",
u"",
])
sheet.save(filename=output_file_name)
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="creates a fake sheet."
)
parser.add_argument("--start-id", required=True, dest="start_id")
parser.add_argument("--range-ids", required=True, dest="range_ids")
args = parser.parse_args()
main(int(args.start_id), int(args.range_ids)) | [
"luiz.meireles@hotmail.co.uk"
] | luiz.meireles@hotmail.co.uk |
cdc626de8576a1ae62bd9055860e43d370c3b224 | ce30a76ac6da9acab9e6b8de0b23a77989168e64 | /file2(Using MatplotLid).py | 881cb0e2054302b83e7f05e298f54d6a86712c41 | [] | no_license | arboiscodemedia/Dicom | c905db3c0ff786175bed79ea978a7fc6129edc20 | e9330ee8955cb04ee56160a9883575f131020f00 | refs/heads/main | 2023-05-09T03:57:31.623153 | 2021-05-30T08:52:43 | 2021-05-30T08:52:43 | 371,937,557 | 6 | 2 | null | null | null | null | UTF-8 | Python | false | false | 163 | py | import pydicom as dicom
import matplotlib.pyplot as plt
path = "./img/D0006.dcm"
x=dicom.dcmread(path)
plt.imshow(x.pixel_array,cmap=plt.cm.gray)
plt.show() | [
"noreply@github.com"
] | arboiscodemedia.noreply@github.com |
cbc4b387cf33b7dfba311fc45d6eb3e553fb8662 | c1776e89d88b490c1925ea2c6153bfa43572c668 | /models/Usuario.py | be77b09528765e70d3b2f8cd7a7b40081951040e | [] | no_license | dgioielli/Devaria_BackEnd_Python | e536583ead18e93aae7b185becff6c7e061c7bd5 | 67d25b24034169cc17a9514f19b8ce0f54ecfd01 | refs/heads/main | 2023-06-04T21:28:03.368272 | 2021-06-21T19:04:47 | 2021-06-21T19:04:47 | 373,605,951 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 576 | py | from sqlalchemy import Column, Integer, String, inspect
from sqlalchemy.orm import relationship
import config
import database.databese
from database.databese import Base, engine
class Usuario(Base):
__tablename__ = 'usuario'
metadata = database.databese.metadata
id = Column(Integer, primary_key=True, index=True)
nome = Column(String(100))
email = Column(String(100))
senha = Column(String(100))
#tarefas = relationship("Tarefa")
if not inspect(engine).has_table('usuario', schema=config.MYSQL_DATABASE):
Usuario.__table__.create(engine) | [
"douglasgioielli@gmail.com"
] | douglasgioielli@gmail.com |
ab3cb641971ac50eb6cee0ed50e637ae46dd5c54 | 165ab339ffc782b2cb4144373672dad0376d7670 | /agenda/users.py | c582781488d2daa068e8488b887badd33cd5cc31 | [
"MIT"
] | permissive | domeav/sonofages-agenda | 496789f387a7b806d36b35eead281fe3ac4d8e46 | e099802572ba01f6dbdd719a7c15ca5c928cc0fc | refs/heads/master | 2021-01-24T10:49:39.219355 | 2016-12-14T15:56:42 | 2016-12-14T15:56:42 | 70,100,427 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,524 | py | from agenda.model import User, Event
from agenda.forms import UserForm
from flask import render_template, request, redirect, url_for
from agenda import app
@app.route('/user/<user_id>')
def user(user_id):
user = User.get(User.id == user_id)
return render_template('user.html', user=user)
@app.route('/users/')
def users():
users = User.select().order_by(User.username)
return render_template('users.html', users=users)
@app.route('/user_events/<user_id>')
def user_events(user_id):
events = Event.select()\
.where(Event.owner == user_id)\
.order_by(Event.creation).desc()
return render_template('user_events.html', events=events)
@app.route('/user/edit/<user_id>')
@app.route('/user/edit/')
def edit_user(user_id=None):
user = None
if user_id:
user = User.get(User.id == user_id)
return render_template('user_edit.html', form=UserForm(obj=user))
@app.route('/user/save/', methods=['POST'])
def save_user():
form = UserForm()
if not form.validate_on_submit():
return render_template('user_edit.html', form=form)
if form.id.data:
user = User.get(User.id == form.id.data)
else:
user = User()
user.username = form.username.data
user.name = form.name.data
user.contact = form.contact.data
user.presentation = form.presentation.data
user.set_image(form.pic.data, form.pic.data.filename)
user.save()
return redirect(url_for('user', user_id=user.id))
| [
"dom.eav@gmail.com"
] | dom.eav@gmail.com |
e160d44f4d73aa4f2d358ff3ab686eb2b74095d6 | c6e9f5ea0fa9e799da65fe79e56520628a547dba | /autonomous_routes.py | 286fecfc776566ab71664ac01647690a073a4f8d | [] | no_license | AlecR/PaceBot | 061e784ae6c1b758af91f4d808cbec44184a2a07 | 5d310f199e6653710aafa63929b4a701ebb171e3 | refs/heads/master | 2020-04-09T04:01:52.702394 | 2019-02-23T06:11:32 | 2019-02-23T06:11:32 | 160,007,522 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,233 | py | import time
from picamera.array import PiRGBArray
from picamera import PiCamera
import cv2
import numpy as np
import Adafruit_PCA9685
from threading import Thread
from flask import Blueprint, jsonify, Response
#Get the picture (low resolution, so it should be quite fast)
#Here you can also specify other parameters (e.g.:rotate the image)
pwm = Adafruit_PCA9685.PCA9685()
camera = PiCamera()
## CONSTANTS ##
HEIGHT = 480
WIDTH = 640
SCALE_FACTOR = 0.25
SCALED_WIDTH = int(WIDTH * SCALE_FACTOR)
SCALED_HEIGHT = int(HEIGHT * SCALE_FACTOR)
SCALED_X_CENTER = int(SCALED_WIDTH / 2)
SCALED_Y_CENTER = int(SCALED_HEIGHT / 2)
HORIZ_LINE_WIDTH = (WIDTH * .75)
ESC_PIN = 1
SERVO_PIN = 0
SERVO_LEFT = 350
SERVO_RIGHT = 250
ESC_DRIVE = 350
ESC_STOPPED = 320
current_turn_value = 300
auto_mode = False
camera.resolution = (WIDTH, HEIGHT)
camera.framerate = 32
rawCapture = PiRGBArray(camera, size=(WIDTH, HEIGHT))
time.sleep(0.1)
autonomous_routes = Blueprint('autonomous_routes', __name__)
def follow_line():
global current_turn_value
while(True):
for frame in camera.capture_continuous(rawCapture, format="bgr", use_video_port=True):
rawCapture.truncate(0)
if auto_mode is False: return
# Resize image
image = frame.array
image = cv2.resize(image, None, fx=0.25, fy=0.25, interpolation=cv2.INTER_CUBIC)
# Convert to gray and threshold
gray_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
_, image_thresh = cv2.threshold(gray_image, 180, 255, 0)
lines = cv2.HoughLines(image_thresh, 10, np.pi/180, 100)
# draw lines on image and computer error
if lines is not None and len(lines) > 0:
for rho, theta in lines[0]:
a = np.cos(theta)
b = np.sin(theta)
x0 = a*rho
y0 = b*rho
x1 = int(x0 + 1000*(-b))
y1 = int(y0 + 1000*(a))
x2 = int(x0 - 1000*(-b))
y2 = int(y0 - 1000*(a))
mid_x = (x1 + x2) / 2
cv2.line(image, (mid_x, y1), (mid_x, y2),(0,0,255),2)
cv2.line(image, (SCALED_X_CENTER, 0),(SCALED_X_CENTER, SCALED_HEIGHT),(0,255,255),2)
error = SCALED_X_CENTER - mid_x
pwm.set_pwm(ESC_PIN, 0, ESC_DRIVE)
calculate_turn_value(error)
pwm.set_pwm(SERVO_PIN, 0, current_turn_value)
else:
pwm.set_pwm(ESC_PIN, 0, ESC_STOPPED)
cv2.imshow("PaceBot Camera", image)
key = cv2.waitKey(25)
if key == 27:
break
def calculate_turn_value(error):
global current_turn_value, SERVO_LEFT, SERVO_RIGHT
KP = 0.01
TARGET = 0
turn_value = (error * KP) * -1
current_turn_value += int(turn_value)
if (current_turn_value < SERVO_RIGHT):
current_turn_value = SERVO_RIGHT
elif (current_turn_value > SERVO_LEFT):
current_turn_value = SERVO_LEFT
@autonomous_routes.route('/video', methods=['GET'])
def stream_video():
print("test")
@autonomous_routes.route('/toggle-auto', methods=['POST'])
def toggle_auto_drive():
global auto_mode
auto_mode = not auto_mode
if auto_mode:
auto_thread = Thread(target=follow_line)
auto_thread.start()
return jsonify(auto_mode=auto_mode)
| [
"alecr1997@gmail.com"
] | alecr1997@gmail.com |
2a030259e02cd94daf91e98a8e8e55470146a368 | 9c47fbb2761cc50b7b0be67decb20c377dd1d078 | /YandexContest/20191008/Task_C.py | 6f9bfb34d97d6708218af9fe11ce838dcab1fdf3 | [
"MIT"
] | permissive | IsFilimonov/Interviews | 782ec1f5d82373c20df0edaaeb56cfb0d493a9e7 | 3b9858f43ef6b7a2b5e565ef58406e4018edbf97 | refs/heads/main | 2022-12-12T13:16:25.750870 | 2022-11-30T11:31:38 | 2022-11-30T11:31:38 | 213,611,039 | 4 | 1 | null | null | null | null | UTF-8 | Python | false | false | 230 | py | import sys
max_n, curr, A = 1000000, None, []
n = sys.stdin.readline().strip()
for el in range(int(n)):
val = sys.stdin.readline().strip()
if curr != val:
A.append(val)
curr = val
for el in A:
print(el)
| [
"Filimonov_IS@mail.ru"
] | Filimonov_IS@mail.ru |
e037fa777ecf55411160796ccaf7d172e5ca38d4 | 36b740d4e29396c757c9652909ccdeb42859eaec | /biological.py | 23279e0f9db2e17a342b060df69208f1aa909bb9 | [] | no_license | moharsen/biological | 992f3aa583d2b12b2d1c26aa3438630ddd18dad7 | dc1fb38aaa389821bd46ee159fe1d93b56c7b3e5 | refs/heads/master | 2020-12-30T09:26:18.546650 | 2015-05-18T19:07:53 | 2015-05-18T19:07:53 | 35,799,707 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 984 | py | #This code trains a random forest model and produces probabilities on a test dataset
from sklearn.ensemble import RandomForestClassifier
from numpy import genfromtxt, savetxt
def main():
#create the training & test sets, skipping the header row with [1:]
dataset = genfromtxt(open('Data/train.csv','r'), delimiter=',', dtype='f8')[1:]
target = [x[0] for x in dataset]
train = [x[1:] for x in dataset]
test = genfromtxt(open('Data/test.csv','r'), delimiter=',', dtype='f8')[1:]
#create and train the random forest
#multi-core CPUs can use: rf = RandomForestClassifier(n_estimators=100, n_jobs=2)
rf = RandomForestClassifier(n_estimators=100)
rf.fit(train, target)
predicted_probs = [[index + 1, x[1]] for index, x in enumerate(rf.predict_proba(test))]
savetxt('Data/submission.csv', predicted_probs, delimiter=',', fmt='%d,%f',
header='MoleculeId,PredictedProbability', comments = '')
if __name__=="__main__":
main() | [
"mohar.sen@gmail.com"
] | mohar.sen@gmail.com |
ee03f9338782efa72a3588b9cd286b4159969d66 | cf319003bb76c9ff2fed8919f1726442d9fec532 | /plugins/blender/blender.client/batchlabs_blender/preferences.py | 8e7c22fe0666d443d457cd3039bd47f0e11871d7 | [
"MIT",
"LicenseRef-scancode-generic-cla"
] | permissive | Azure/azure-batch-rendering | a06a6fcba965fc9c52210324c0fa2ccfd3580137 | cbd33753afb47983429659c407ac657d1b981a7c | refs/heads/main | 2023-09-04T05:59:37.958417 | 2023-04-13T21:48:39 | 2023-04-13T21:48:39 | 123,842,545 | 10 | 6 | MIT | 2023-04-12T19:44:05 | 2018-03-05T00:24:04 | C# | UTF-8 | Python | false | false | 2,272 | py | import os
import bpy
class UserPreferences(bpy.types.AddonPreferences):
"""BatchLabs Blender plugin user preferences."""
bl_idname = __package__.split('.')[0]
log_dir = bpy.props.StringProperty(
name="Log directory",
description="Location of log file",
subtype='DIR_PATH',
default=os.path.expanduser('~'))
log_level = bpy.props.EnumProperty(items=(('10', 'Debug', ''),
('20', 'Info', ''),
('30', 'Warning', ''),
('40', 'Error', ''),
('50', 'Critical', '')),
name="Logging level",
description="Level of logging detail",
default="20")
account = bpy.props.StringProperty(
name="Batch Account",
description="Fully qualified Batch account identifier."
" (/subscriptions/<sub-id>/resourceGroups/<resource-group>/"
"providers/Microsoft.Batch/batchAccounts/<account>)",
default="")
pool_type = bpy.props.EnumProperty(items=(('0', 'Pre-existing pool', ''),
('1', 'Auto-pool', '')),
name="Pool Type",
description="Type of pool to use when submitting a job",
default="0")
def draw(self, context):
"""
Draw the display for the settings in the User Preferences
with next to the Addon entry.
:Args:
- context (bpy.types.Context): Blenders current runtime
context.
"""
layout = self.layout
layout.label(text="Blender will need to be restarted for changes to "
"take effect.")
layout.label(text="")
layout.label(text="Log Settings")
layout.prop(self, "log_dir")
layout.prop(self, "log_level")
layout.label(text="")
layout.label(text="Account Settings")
layout.prop(self, "account")
layout.prop(self, "pool_type")
| [
"dave.fellows@microsoft.com"
] | dave.fellows@microsoft.com |
de9f1ac86615db22abe1fe4d0960be4355abd789 | 96a5a95e8b7847a12ed11179ee38b0d653d3297c | /nuage_neutron/db/migration/alembic_migrations/versions/ussuri/contract/c86ff3efc46b_remove_nuage_security_group_extension.py | 74ef359e8d7a5451c7def1d638c54f2d6c8004fd | [
"Apache-2.0"
] | permissive | nuagenetworks/nuage-openstack-neutron | 4d72a4e8b0124071b6e4b301a914fdc79df7e14d | ee4444da00c87e7aee32f4b490ac714ed2999efa | refs/heads/stable/wallaby | 2023-02-17T15:48:43.057335 | 2021-11-30T12:28:10 | 2022-01-17T14:28:34 | 40,271,788 | 11 | 41 | Apache-2.0 | 2023-04-09T09:50:19 | 2015-08-05T22:25:35 | null | UTF-8 | Python | false | false | 2,179 | py | # Copyright 2020 NOKIA
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from alembic import op
import sqlalchemy as sa
"""Remove nuage security group extension
Revision ID: c86ff3efc46b
Revises: 45aaef218f29
Create Date: 2020-08-24 20:13:51.288485
"""
# revision identifiers, used by Alembic.
revision = 'c86ff3efc46b'
down_revision = '45aaef218f29'
depends_on = ('dde3c65f57d8')
def upgrade():
nuage_security_group = sa.Table(
'nuage_security_group',
sa.MetaData(),
sa.Column('security_group_id', sa.String(255), nullable=False),
sa.Column('parameter_name', sa.String(255), nullable=False),
sa.Column('parameter_value', sa.String(255), nullable=False)
)
neutron_security_group = sa.Table(
'securitygroups',
sa.MetaData(),
sa.Column('id', sa.String(36), primary_key=True),
sa.Column('stateful', sa.Boolean())
)
session = sa.orm.Session(bind=op.get_bind())
with session.begin(subtransactions=True):
# we only need to transfer the stateless ones since by default
# security groups are stateful.
stateless_sgs = (session.query(nuage_security_group)
.filter_by(parameter_name='STATEFUL',
parameter_value='0')
.all())
session.execute(
neutron_security_group.update().values(stateful=False).where(
neutron_security_group.c.id.in_(
[i[0] for i in stateless_sgs])))
op.drop_table('nuage_security_group')
op.drop_table('nuage_security_group_parameter')
session.commit()
| [
"glenn.van_de_water@nokia.com"
] | glenn.van_de_water@nokia.com |
f392f36c3dbd95b3b82de35f548790e6a3ee47da | 40c2a8ed3337cbb50ea8f646999c508b3cfbe8f0 | /polynomial regression/polynomial_regression.py | 1808e7adea1a7251251ec4e758ce0b5d05d469af | [] | no_license | imakshit/scikit-machine-learning | 8c41f4155db473267d3e37decc9b24350b39820d | 08d5f217eb4ad651f0db26a6978eb50a1b6d3405 | refs/heads/master | 2020-03-23T20:19:30.164515 | 2018-08-30T16:20:30 | 2018-08-30T16:20:30 | 142,035,097 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,769 | py | # Polynomial Regression
# Importing the libraries
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
# Importing the dataset
dataset = pd.read_csv('Position_Salaries.csv')
X = dataset.iloc[:, 1:2].values
y = dataset.iloc[:, 2].values
# Fitting Linear Regression to the dataset
from sklearn.linear_model import LinearRegression
lin_reg = LinearRegression()
lin_reg.fit(X, y)
# Fitting Polynomial Regression to the dataset
from sklearn.preprocessing import PolynomialFeatures
poly_reg = PolynomialFeatures(degree = 4)#degree set according to the curve
X_poly = poly_reg.fit_transform(X)
poly_reg.fit(X_poly, y)
lin_reg_2 = LinearRegression()
lin_reg_2.fit(X_poly, y)
# Visualising the Linear Regression results
plt.scatter(X, y, color = 'red')
plt.plot(X, lin_reg.predict(X), color = 'blue')
plt.title('Truth or Bluff (Linear Regression)')
plt.xlabel('Position level')
plt.ylabel('Salary')
plt.show()
# Visualising the Polynomial Regression results
plt.scatter(X, y, color = 'red')
plt.plot(X, lin_reg_2.predict(poly_reg.fit_transform(X)), color = 'blue')
plt.title('Truth or Bluff (Polynomial Regression)')
plt.xlabel('Position level')
plt.ylabel('Salary')
plt.show()
# Visualising the Polynomial Regression results (for higher resolution and smoother curve)
X_grid = np.arange(min(X), max(X), 0.1)
X_grid = X_grid.reshape((len(X_grid), 1))
plt.scatter(X, y, color = 'red')
plt.plot(X_grid, lin_reg_2.predict(poly_reg.fit_transform(X_grid)), color = 'blue')
plt.title('Truth or Bluff (Polynomial Regression)')
plt.xlabel('Position level')
plt.ylabel('Salary')
plt.show()
# Predicting a new result with Linear Regression
lin_reg.predict(6.5)
# Predicting a new result with Polynomial Regression
lin_reg_2.predict(poly_reg.fit_transform(6.5)) | [
"akshit.gupta98@gmail.com"
] | akshit.gupta98@gmail.com |
40d0e62c598bbbdb29fc8d67ff31ebae12cdab49 | 12472c36ad07ec89bfc27403e697d14b6351329b | /picshare/settings.py | 6c3f5153a27a8d5d0f7f3b9f74e409c54bd68b9f | [
"MIT"
] | permissive | Vohsty/-Gallery | f80122117cae25832bede52214d8f9ce6f1476d6 | 4f6ac9d67084756aa16a359db1747cfc03849e6b | refs/heads/master | 2022-12-14T09:45:27.117188 | 2019-06-25T13:10:38 | 2019-06-25T13:10:38 | 191,901,504 | 0 | 0 | MIT | 2022-12-08T01:05:15 | 2019-06-14T07:59:56 | Python | UTF-8 | Python | false | false | 4,533 | py | """
Django settings for picshare project.
Generated by 'django-admin startproject' using Django 1.11.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
import django_heroku
import dj_database_url
from decouple import config,Csv
MODE=config("MODE", default="dev")
SECRET_KEY = config('SECRET_KEY')
DEBUG = config('DEBUG', default=False, cast=bool)
# development
if config('MODE')=="dev":
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': config('DB_NAME'),
'USER': config('DB_USER'),
'PASSWORD': config('DB_PASSWORD'),
'HOST': config('DB_HOST'),
'PORT': '',
}
}
# production
else:
DATABASES = {
'default': dj_database_url.config(
default=config('DATABASE_URL')
)
}
db_from_env = dj_database_url.config(conn_max_age=500)
DATABASES['default'].update(db_from_env)
ALLOWED_HOSTS = config('ALLOWED_HOSTS', cast=Csv())
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'oh=u)=ey@47l!%%_0=&yd9g_%=$m2m$xs%4j0v(251+k^a!5tf'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'bootstrap3',
'gallery.apps.GalleryConfig',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'whitenoise.middleware.WhiteNoiseMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'picshare.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'django.template.context_processors.media',
],
},
},
]
WSGI_APPLICATION = 'picshare.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
# DATABASES = {
# 'default': {
# 'ENGINE': 'django.db.backends.postgresql',
# 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
# 'NAME': 'studio',
# 'USER': 'steve',
# 'PASSWORD':'niigatah',
# }
# }
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Africa/Nairobi'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = (os.path.join(BASE_DIR, "static"),
)
STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
# Configure Django App for Heroku.
django_heroku.settings(locals()) | [
"stevekimanthi87@gmail.com"
] | stevekimanthi87@gmail.com |
11372e1174c14bf0f2fcd7bcb02fba3c76370519 | 8ce87aa7b8230a3fd474501c35e23c564f2780d0 | /organizacion/migrations/0003_auto_20150725_0630.py | f233fa0171a7b7febfa5efddf0ad37f6e59aded2 | [] | no_license | ErickMurillo/canicacao | 46e7a485257ab95902fb427d4cb0b5e72fd14ab5 | d4a79260c87d1ae1cdd8ecb8bc4be82e9ddb0cc7 | refs/heads/master | 2020-12-29T02:24:36.519281 | 2018-03-16T15:38:26 | 2018-03-16T15:38:26 | 35,285,596 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 781 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('organizacion', '0002_auto_20150723_2221'),
]
operations = [
migrations.AddField(
model_name='comercializacion_org',
name='fecha',
field=models.IntegerField(default=1, verbose_name=b'A\xc3\xb1o de recolecci\xc3\xb3n de informaci\xc3\xb3n'),
preserve_default=False,
),
migrations.AlterField(
model_name='organizacion',
name='gerente',
field=models.CharField(max_length=200, null=True, verbose_name=b'Representante legal', blank=True),
preserve_default=True,
),
]
| [
"erickmurillo22@gmail.com"
] | erickmurillo22@gmail.com |
52f45db0bcbdeb11736ea472d8c7783d302d09be | 33964a2f9b2af381674d55d7a1746070d786c6dc | /try_catch.py | 8a76fb7f12b5d7f915c547847436b361c0bf2106 | [] | no_license | oldmonkandlinux/python-basics | abbe8b2d82c7d1d86d3932ef6d737a55df9f93be | 049807c439a412fb6c55815fdda43a3c2641632c | refs/heads/master | 2022-04-25T01:27:20.849787 | 2020-04-27T14:56:42 | 2020-04-27T14:56:42 | 259,357,041 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 220 | py | try:
age = int(input("age?: "))
income = 2000
risk = income/age
print(age)
except ValueError:
print("enter a numerical value")
except ZeroDivisionError:
print("division by zero is not possible")
| [
"noreply@github.com"
] | oldmonkandlinux.noreply@github.com |
b95a5f602761f73c08c5b39bdc930cf05acf73e4 | 66f7d157f51897d08c45700f0a60ef6e4ddcaa34 | /tests/test_general.py | 644ea50c8da4c69fe5cc911ecfeb0fe19299052f | [
"MIT"
] | permissive | FKLC/AnyAPI | 1d3eee97e507088ee754d80b6658cce7eca465d5 | be6f23ad2d4affb8574da0082d1a9375fe11f9ed | refs/heads/master | 2021-11-24T08:49:09.481422 | 2019-05-25T15:14:36 | 2019-05-25T15:14:36 | 166,031,038 | 131 | 9 | null | null | null | null | UTF-8 | Python | false | false | 487 | py | from anyapi import AnyAPI
import pytest
def test_params():
"""Test passing params to Session"""
httpbin = AnyAPI("http://httpbin.org", auth=("user", "password"))
assert httpbin("basic-auth").user.password.GET().json()["authenticated"]
def test_passing_url():
"""Test passing URL directly"""
httpbin = AnyAPI("http://httpbin.org")
assert (
httpbin.GET(url="http://httpbin.org/anything").json()["url"]
== "https://httpbin.org/anything"
)
| [
"m.fatihklc0@gmail.com"
] | m.fatihklc0@gmail.com |
c54780fdaada792761b06a65b636b22338aef471 | b18d63d01c4442d746d5b4bd626d439ec75d273c | /arithmetic_operators.py | f9fb8cefa4a957d8b0d1d4b487712c27165c5115 | [] | no_license | crishonsou/hackerrank_solutions_submissions | 2f080f15eb0557ec633be265f065b991c2b5c456 | ccd5525cf6e96c119df13945ff28d3473a8c8c1c | refs/heads/main | 2022-12-26T03:07:24.759426 | 2020-10-06T15:13:17 | 2020-10-06T15:13:17 | 301,766,798 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 67 | py | a = int(input())
b = int(input())
print(a // b)
print(a / b)
| [
"noreply@github.com"
] | crishonsou.noreply@github.com |
311b829b832a4f61e3ca97e3fc5c0827eeb0aa11 | 10ecb21c9cd858d5bb62e195bcda4ae8b2e187b2 | /PYTORCH3.py | 1a4097f2cc94380cf3bc257b6f555c287d3e67ea | [] | no_license | bishnucit/Pytorch_learning | 40e4f3e95993887c8ae0f4e60e8897ea9b4274d1 | c8dea0266b2c45c6bfa70deba592b14ebc42bfed | refs/heads/master | 2020-03-31T22:19:40.513140 | 2019-05-08T06:43:45 | 2019-05-08T06:43:45 | 152,614,788 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,070 | py | """
Autograd - Automatic differentiation
Autograd package Provide automatic differentiation for all operations on Tensors.
It is a define by run framework, the backdrop is defined by how the code runs, every
single iteration can be different.
Tensor -
torch.Tensor is the central class of the package.
if its attribute is set as .requires_grad as True, it starts to track all operations on it.
After finishing computation when .backward() is called, it will automatically compute the gradients which will
be accumulated in .grad attribute. To stop tracking history, .detach() can be called
To prevent tracking history, save memory, torch.no_grad(): can be used to wrap the code.
Another important class for Autograd is Function.
Function -
Tensor and Function are interconnected and build up an acyclic graph,
that encodes a complete history of computation. Each tensor has a .grad_fn
attribute that references a Function that has created the Tensor (except for
Tensors created by the user - their grad_fn is None).
import torch
Create a tensor and set requires_grad=True to track computation with it
x = torch.ones(2, 2, requires_grad=True)
print(x)
Out:
tensor([[1., 1.],
[1., 1.]], requires_grad=True)
Do an operation of tensor:
y = x + 2
print(y)
Out:
tensor([[3., 3.],
[3., 3.]], grad_fn=<AddBackward0>)
y was created as a result of an operation, so it has a grad_fn.
print(y.grad_fn)
Out:
<AddBackward0 object at 0x7f0ea616bac8>
Do more operations on y
z = y * y * 3
out = z.mean()
print(z, out)
Out:
tensor([[27., 27.],
[27., 27.]], grad_fn=<MulBackward0>) tensor(27., grad_fn=<MeanBackward1>)
.requires_grad_( ... ) changes an existing Tensor’s requires_grad flag in-place. The input flag defaults to False if not given.
a = torch.randn(2, 2)
a = ((a * 3) / (a - 1))
print(a.requires_grad)
a.requires_grad_(True)
print(a.requires_grad)
b = (a * a).sum()
print(b.grad_fn)
Out:
False
True
<SumBackward0 object at 0x7f0e86396e48>
GRADIENTS
Let’s backprop now Because out contains a single scalar, out.backward() is equivalent to out.backward(torch.tensor(1)).
out.backward()
print gradients d(out)/dx
print(x.grad)
Out:
tensor([[4.5000, 4.5000],
[4.5000, 4.5000]])
You should have got a matrix of 4.5. Let’s call the out Tensor “o”. We have that o=14∑izi, zi=3(xi+2)2 and zi∣∣xi=1=27. Therefore, ∂o∂xi=32(xi+2), hence ∂o∂xi∣∣xi=1=92=4.5.
You can do many crazy things with autograd!
x = torch.randn(3, requires_grad=True)
y = x * 2
while y.data.norm() < 1000:
y = y * 2
print(y)
Out:
tensor([-1178.9551, 1202.9015, 293.6342], grad_fn=<MulBackward0>)
gradients = torch.tensor([0.1, 1.0, 0.0001], dtype=torch.float)
y.backward(gradients)
print(x.grad)
Out:
tensor([ 102.4000, 1024.0000, 0.1024])
You can also stop autograd from tracking history on Tensors with .requires_grad=True by wrapping the code block in with torch.no_grad():
print(x.requires_grad)
print((x ** 2).requires_grad)
with torch.no_grad():
print((x ** 2).requires_grad)
Out:
True
True
False
"""
| [
"noreply@github.com"
] | bishnucit.noreply@github.com |
5165f9108e3a884a2b24dfed5437081cc63e5773 | 4275f8342b88d80d98167d87746ce145a9106877 | /backend/app/admin/api/posts_views.py | 000ddf07f0a8f32bb68cf95f29fd944c6d9a3963 | [] | no_license | cheerfulleg/fastapi-project | e076c1677cfd708e16fcbeb8cee5b95a5fa750a2 | 5af8ea5a9938f493cf0386e13be9b4ac04887edb | refs/heads/master | 2023-07-15T20:25:13.486077 | 2021-08-18T07:57:09 | 2021-08-18T07:57:09 | 393,935,356 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,686 | py | from fastapi import APIRouter, HTTPException
from fastapi.params import Path
from fastapi_pagination import Page
from fastapi_pagination.ext.tortoise import paginate
from starlette import status
from starlette.responses import JSONResponse
from backend.app.posts.models import Post
from backend.app.posts.schemas import Post_Pydantic, PostInWithProfileId_Pydantic
from backend.app.users.models import Profile
posts_router = APIRouter()
@posts_router.post("", status_code=201, response_model=Post_Pydantic)
async def create_post(post: PostInWithProfileId_Pydantic):
"""
**Admin permissions required**
Create post
- **title**: post title, 120 characters
- **body**: post body, text field
- **profile_id**: profile reference
"""
profile_obj = await Profile.get(id=post.dict().get("profile_id"))
if not profile_obj:
raise HTTPException(status_code=status.HTTP_404_NOT_FOUND)
post_obj = await Post.create(**post.dict(exclude_unset=True))
return await Post_Pydantic.from_tortoise_orm(post_obj)
@posts_router.get("", response_model=Page[Post_Pydantic])
async def get_posts_list():
"""
**Admin permissions required**
Get list of existing posts
"""
# posts = await Post_Pydantic.from_queryset(Post.all())
return await paginate(Post)
@posts_router.get("/{post_id}", response_model=Post_Pydantic)
async def get_post_by_id(post_id: int = Path(..., gt=0)):
"""
**Admin permissions required**
Get post details
"""
return await Post_Pydantic.from_queryset_single(Post.get(id=post_id))
@posts_router.put("/{post_id}", response_model=Post_Pydantic)
async def update_post_by_id(post: PostInWithProfileId_Pydantic, post_id: int = Path(..., gt=0)):
"""
**Admin permissions required**
Update post
- **title**: post title, 120 characters
- **body**: post body, text field
- **profile_id**: profile reference
"""
profile_obj = await Profile.get(id=post.dict().get("profile_id"))
if not profile_obj:
raise HTTPException(status_code=status.HTTP_404_NOT_FOUND)
await Post.filter(id=post_id).update(**post.dict(exclude_unset=True))
return await Post_Pydantic.from_queryset_single(Post.get(id=post_id))
@posts_router.delete("/{post_id}")
async def delete_post_by_id(post_id: int = Path(..., gt=0)):
"""
**Admin permissions required**
Delete post
"""
deleted_count = await Post.filter(id=post_id).delete()
if not deleted_count:
raise HTTPException(status_code=status.HTTP_404_NOT_FOUND)
return JSONResponse(
status_code=status.HTTP_200_OK,
content={"message": "Post deleted successfully"},
)
| [
"ivan.yolgin@exrtawest.com"
] | ivan.yolgin@exrtawest.com |
6f8a5d826a59a6fa889903106b5230476ae6b9cb | 9c2a0050174009929a98f235072243b315a81b72 | /test/part2_test.py | 25515946af81fe70b27285106be5ae0a0058770e | [] | no_license | moesamahdi/google-coding-challenge | deb483771024560efaf5da307cc27d06a6fc4ec9 | 49e65420c026de4cd9c934bd1654bc4a63e6f377 | refs/heads/main | 2023-06-15T17:04:19.399338 | 2021-07-13T16:05:07 | 2021-07-13T16:05:07 | 382,007,308 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,541 | py | from src.video_player import VideoPlayer
def test_create_playlist(capfd):
player = VideoPlayer()
player.create_playlist("my_PLAYlist")
out, err = capfd.readouterr()
lines = out.splitlines()
assert len(lines) == 1
assert "Successfully created new playlist: my_PLAYlist" in lines[0]
def test_create_existing_playlist(capfd):
player = VideoPlayer()
player.create_playlist("my_cool_playlist")
player.create_playlist("my_COOL_PLAYLIST")
out, err = capfd.readouterr()
lines = out.splitlines()
assert len(lines) == 2
assert "Successfully created new playlist: my_cool_playlist" in lines[0]
assert ("Cannot create playlist: A playlist with the same name already "
"exists") in lines[1]
def test_add_to_playlist(capfd):
player = VideoPlayer()
player.create_playlist("my_COOL_playlist")
player.add_to_playlist("my_cool_PLAYLIST", "amazing_cats_video_id")
out, err = capfd.readouterr()
lines = out.splitlines()
assert len(lines) == 2
assert "Successfully created new playlist: my_COOL_playlist" in lines[0]
assert "Added video to my_cool_PLAYLIST: Amazing Cats" in lines[1]
def test_add_to_playlist_already_added(capfd):
player = VideoPlayer()
player.create_playlist("my_cool_playlist")
player.add_to_playlist("my_cool_playlist", "amazing_cats_video_id")
player.add_to_playlist("my_cool_playlist", "amazing_cats_video_id")
out, err = capfd.readouterr()
lines = out.splitlines()
assert len(lines) == 3
assert "Successfully created new playlist: my_cool_playlist" in lines[0]
assert "Added video to my_cool_playlist: Amazing Cats" in lines[1]
assert "Cannot add video to my_cool_playlist: Video already added" in lines[2]
def test_add_to_playlist_nonexistent_video(capfd):
player = VideoPlayer()
player.create_playlist("my_cool_playlist")
player.add_to_playlist("my_cool_playlist", "amazing_cats_video_id")
player.add_to_playlist("my_cool_playlist", "some_other_video_id")
out, err = capfd.readouterr()
lines = out.splitlines()
assert len(lines) == 3
assert "Successfully created new playlist: my_cool_playlist" in lines[0]
assert "Added video to my_cool_playlist: Amazing Cats" in lines[1]
assert "Cannot add video to my_cool_playlist: Video does not exist" in lines[2]
def test_add_to_playlist_nonexistent_playlist(capfd):
player = VideoPlayer()
player.add_to_playlist("another_playlist", "amazing_cats_video_id")
out, err = capfd.readouterr()
lines = out.splitlines()
assert len(lines) == 1
assert "Cannot add video to another_playlist: Playlist does not exist" in lines[0]
def test_add_to_playlist_nonexistent_playlist_nonexistent_video(capfd):
player = VideoPlayer()
player.add_to_playlist("another_playlist", "does_not_exist_video_id")
out, err = capfd.readouterr()
lines = out.splitlines()
assert len(lines) == 1
assert "Cannot add video to another_playlist: Playlist does not exist" in lines[0]
def test_show_all_playlists_no_playlists_exist(capfd):
player = VideoPlayer()
player.show_all_playlists()
out, err = capfd.readouterr()
lines = out.splitlines()
assert len(lines) == 1
assert "No playlists exist yet" in lines[0]
def test_show_all_playlists(capfd):
player = VideoPlayer()
player.create_playlist("my_cool_playLIST")
player.create_playlist("anotheR_playlist")
player.show_all_playlists()
out, err = capfd.readouterr()
lines = out.splitlines()
print(lines)
assert len(lines) == 5
assert "Showing all playlists:" in lines[2]
assert "anotheR_playlist" in lines[3]
assert "my_cool_playLIST" in lines[4]
def test_show_playlist(capfd):
player = VideoPlayer()
player.create_playlist("my_cool_playlist")
player.show_playlist("my_cool_playlist")
player.add_to_playlist("my_cool_playlist", "amazing_cats_video_id")
player.show_playlist("my_COOL_playlist")
out, err = capfd.readouterr()
lines = out.splitlines()
assert len(lines) == 6
assert "Successfully created new playlist: my_cool_playlist" in lines[0]
assert "Showing playlist: my_cool_playlist" in lines[1]
assert "No videos here yet" in lines[2]
assert "Added video to my_cool_playlist: Amazing Cats" in lines[3]
assert "Showing playlist: my_COOL_playlist" in lines[4]
assert "Amazing Cats (amazing_cats_video_id) [#cat #animal]" in lines[5]
def test_remove_from_playlist_then_re_add(capfd):
player = VideoPlayer()
player.create_playlist("MY_playlist")
player.add_to_playlist("my_playlist", "amazing_cats_video_id")
player.add_to_playlist("my_playlist", "life_at_google_video_id")
player.remove_from_playlist("my_playlist", "amazing_cats_video_id")
player.add_to_playlist("my_playlist", "amazing_cats_video_id")
player.show_playlist("my_playLIST")
out, err = capfd.readouterr()
lines = out.splitlines()
assert len(lines) == 8
assert "Showing playlist: my_playLIST" in lines[5]
assert "Life at Google (life_at_google_video_id) [#google #career]" in lines[6]
assert "Amazing Cats (amazing_cats_video_id) [#cat #animal]" in lines[7]
def test_show_playlist_nonexistent_playlist(capfd):
player = VideoPlayer()
player.show_playlist("another_playlist")
out, err = capfd.readouterr()
lines = out.splitlines()
assert len(lines) == 1
assert "Cannot show playlist another_playlist: Playlist does not exist" in lines[0]
def test_remove_from_playlist(capfd):
player = VideoPlayer()
player.create_playlist("my_cool_playlist")
player.add_to_playlist("my_cool_playlist", "amazing_cats_video_id")
player.remove_from_playlist("my_COOL_playlist", "amazing_cats_video_id")
player.remove_from_playlist("my_cool_playlist", "amazing_cats_video_id")
out, err = capfd.readouterr()
lines = out.splitlines()
assert len(lines) == 4
assert "Successfully created new playlist: my_cool_playlist" in lines[0]
assert "Added video to my_cool_playlist: Amazing Cats" in lines[1]
assert "Removed video from my_COOL_playlist: Amazing Cats" in lines[2]
assert "Cannot remove video from my_cool_playlist: Video is not in playlist" in lines[3]
def test_remove_from_playlist_video_is_not_in_playlist(capfd):
player = VideoPlayer()
player.create_playlist("my_cool_playlist")
player.remove_from_playlist("my_cool_playlist", "amazing_cats_video_id")
out, err = capfd.readouterr()
lines = out.splitlines()
assert len(lines) == 2
assert "Successfully created new playlist: my_cool_playlist" in lines[0]
assert "Cannot remove video from my_cool_playlist: Video is not in playlist" in lines[1]
def test_remove_from_playlist_nonexistent_video(capfd):
player = VideoPlayer()
player.create_playlist("my_cool_playlist")
player.add_to_playlist("my_cool_playlist", "amazing_cats_video_id")
player.remove_from_playlist("my_cool_playlist", "some_other_video_id")
out, err = capfd.readouterr()
lines = out.splitlines()
assert len(lines) == 3
assert "Successfully created new playlist: my_cool_playlist" in lines[0]
assert "Added video to my_cool_playlist: Amazing Cats" in lines[1]
assert "Cannot remove video from my_cool_playlist: Video does not exist" in lines[2]
def test_remove_from_playlist_nonexistent_playlist(capfd):
player = VideoPlayer()
player.remove_from_playlist("my_cool_playlist", "amazing_cats_video_id")
out, err = capfd.readouterr()
lines = out.splitlines()
assert len(lines) == 1
assert "Cannot remove video from my_cool_playlist: Playlist does not exist" in lines[0]
def test_clear_playlist(capfd):
player = VideoPlayer()
player.create_playlist("my_cool_playlist")
player.add_to_playlist("my_cool_playlist", "amazing_cats_video_id")
player.show_playlist("my_cool_playlist")
player.clear_playlist("my_COOL_playlist")
player.show_playlist("my_cool_playlist")
out, err = capfd.readouterr()
lines = out.splitlines()
assert len(lines) == 7
assert "Successfully created new playlist: my_cool_playlist" in lines[0]
assert "Added video to my_cool_playlist: Amazing Cats" in lines[1]
assert "Showing playlist: my_cool_playlist" in lines[2]
assert "Amazing Cats (amazing_cats_video_id) [#cat #animal]" in lines[3]
assert "Successfully removed all videos from my_COOL_playlist" in lines[4]
assert "Showing playlist: my_cool_playlist" in lines[5]
assert "No videos here yet" in lines[6]
def test_clear_playlist_nonexistent(capfd):
player = VideoPlayer()
player.clear_playlist("my_cool_playlist")
out, err = capfd.readouterr()
lines = out.splitlines()
assert len(lines) == 1
assert "Cannot clear playlist my_cool_playlist: Playlist does not exist" in lines[0]
def test_delete_playlist(capfd):
player = VideoPlayer()
player.create_playlist("my_cool_playlist")
player.delete_playlist("my_cool_playlist")
out, err = capfd.readouterr()
lines = out.splitlines()
assert len(lines) == 2
assert "Successfully created new playlist: my_cool_playlist" in lines[0]
assert "Deleted playlist: my_cool_playlist" in lines[1]
def test_delete_playlist_nonexistent(capfd):
player = VideoPlayer()
player.delete_playlist("my_cool_playlist")
out, err = capfd.readouterr()
lines = out.splitlines()
assert len(lines) == 1
assert "Cannot delete playlist my_cool_playlist: Playlist does not exist" in lines[0]
| [
"moesamahdi@gmail.com"
] | moesamahdi@gmail.com |
3831c53381eb7a0315f52b6acf3282e6641d8f64 | 33deada36787c198fac075461d39dd7b7969505d | /textToPic.py | b88d8795a8cf038abf32ca64e5b1fbc933d7ca1a | [] | no_license | axwei0/new_scientist_fanyi | 42503ffeeaac7bc70e2fff54c4c34edce6d5efb8 | b3f73a22347d136987853ff44796c3048b71472c | refs/heads/master | 2021-04-05T23:25:25.020162 | 2016-07-27T15:24:56 | 2016-07-27T15:24:56 | 62,406,015 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,384 | py | #-*- coding:utf-8 -*-
import pygame
#/usr/share/fonts/truetype/wqy/wqy-zenhei.ttc为树莓派的汉字字体库路径,请自行替换
def tToP(text):
pygame.init()
file = open(text, 'r')
font = pygame.font.Font("/usr/share/fonts/truetype/wqy/wqy-zenhei.ttc", 20)
done = 0
lines = []
line = ''
i = 0
tempWord = ''
while not done:
aLine = file.readline()
aLine = aLine.replace('\r\n', '')
if (aLine != ''):
for charactor in aLine.decode('utf-8'):
# 微博图片大小设定为440,根据这个大小来进行换行
if font.size(line + tempWord)[0] > 440:
lines.append(line)
line = ''
# 把余出的英文单词留到下一行
tempWord = tempWord + charactor
else:
tempWord = tempWord + charactor
# 对英文单词的换行进行判断
if charactor == ' ':
line = line + tempWord
tempWord = ''
# 设定为419是因为字符大小为20;汉字之间没有空格,换行必须增加判断
if font.size(tempWord)[0] > 419 and font.size(tempWord)[0] <= 440:
lines.append(tempWord)
tempWord = ''
lines.append(line + tempWord)
line = ''
tempWord = ''
else:
done = 1
line_height = font.size(line)[1]
img_height = line_height * (len(lines) + 1)
rtext = pygame.Surface((440, img_height))
rtext.fill([255, 255, 255])
for line in lines:
rtext1 = font.render(line, True, (0, 0, 0), (255, 255, 255))
rtext.blit(rtext1, (0, i * line_height))
i = i + 1
pygame.image.save(rtext, text + ".jpg")
file.close()
| [
"55755211@qq.com"
] | 55755211@qq.com |
b8a5ebd8681495fd6c38f0e14d85a0f3171860dd | dd80a584130ef1a0333429ba76c1cee0eb40df73 | /external/chromium_org/printing/DEPS | bc43b418c77aafd04e87cead8cd587db70d587dc | [
"MIT",
"BSD-3-Clause"
] | permissive | karunmatharu/Android-4.4-Pay-by-Data | 466f4e169ede13c5835424c78e8c30ce58f885c1 | fcb778e92d4aad525ef7a995660580f948d40bc9 | refs/heads/master | 2021-03-24T13:33:01.721868 | 2017-02-18T17:48:49 | 2017-02-18T17:48:49 | 81,847,777 | 0 | 2 | MIT | 2020-03-09T00:02:12 | 2017-02-13T16:47:00 | null | UTF-8 | Python | false | false | 262 | include_rules = [
"+jni",
"+skia/ext",
"+third_party/icu/source/common/unicode",
"+third_party/icu/source/i18n/unicode",
"+third_party/skia",
"+ui/aura",
"+ui/base/resource",
"+ui/base/text",
"+ui/gfx",
"+ui/shell_dialogs",
"+win8/util",
]
| [
"karun.matharu@gmail.com"
] | karun.matharu@gmail.com | |
48af397da3481385e6851363be0df3496f4de015 | c5b6a2df2f4a23215b1727a4c6f893ad855d9cb9 | /config/knob_config_parser.py | aa053f8f47920bd751bdc671d9bbd3c4004647c4 | [] | no_license | vitaliibalakin/c_orbit | 1ae6df78e218b8ed9a6368b28be392bf18664300 | 6e9e96c5288a0b1339e0cc3c639bea0e4151c129 | refs/heads/master | 2022-05-05T10:53:47.258041 | 2022-04-22T14:36:33 | 2022-04-22T14:36:33 | 127,502,408 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,267 | py | import re
def load_config_knob(conf_name):
conf_file = open(conf_name, "r")
configuration = conf_file.readlines()
control_sum = 0
def load_chans(i_b, data):
chans_sett = {}
while True:
result = re.match(r'(\w+)', data[i_b])
if result:
chan_name = result.group()
chans_sett[chan_name] = {}
chans_sett[chan_name].update({elem.split('=')[0]: elem.split('=')[1]
for elem in re.findall(r'\s(\S+=\w+:\S+)', data[i_b])})
chans_sett[chan_name].update({elem.split('=')[0]: int(elem.split('=')[1])
for elem in re.findall(r'\s(\S+=\d+)', data[i_b])})
i_b += 1
if data[i_b] == '[end]\n' or data[i_b] == '[end]':
return i_b, chans_sett
i = 0
while i < len(configuration):
if configuration[i] == '[chans_list]\n':
control_sum += 1
i_next, chans_config_sett = load_chans(i + 1, configuration)
i = i_next
i += 1
if control_sum == 1:
return {'chans_conf': chans_config_sett}
else:
print('wrong control_sum: orbitd config file is incomplete')
| [
"balakinvitalyv@gmail.com"
] | balakinvitalyv@gmail.com |
146d68aab8bfeadf80d5bc566c1ce8ed75bf4187 | 57ad43025032c965252b2f09cabf0e534a7b8a5d | /disketoApp/disketoApp/urls.py | 17b928589a6fb02fd2d807cba6c6dc7d770284d8 | [] | no_license | stngarcia/django-disketo | 1ce803262e8ac1c5c99f1a173fadcf6ee0591521 | e4ddf360b080630b234b0a691c1e179d85e0c5b9 | refs/heads/master | 2020-05-17T09:16:26.680206 | 2019-04-26T13:26:21 | 2019-04-26T13:26:21 | 183,628,809 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 773 | py | """disketoApp URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
urlpatterns = [
path('admin/', admin.site.urls),
]
| [
"stngarcia8@gmail.com"
] | stngarcia8@gmail.com |
0d8a223b3f1590a1b1e4491f34cf5321e061913b | 07eb17b45ce5414282a2464c69f50197968c312d | /stusched/app/urls.py | ffdfa5ca2f2ce34e32c9ac872ee1c74578091181 | [] | no_license | cmontemuino/dbschools | e15d4d03a3d2f0e1ee1fa47b8ce9748b7f09cdbc | d3ee1fdc5c36274e5d5f7834ca1110b941d097b9 | refs/heads/master | 2021-01-16T21:16:56.427183 | 2015-08-02T17:09:43 | 2015-08-02T17:09:43 | 6,158,940 | 0 | 0 | null | 2015-10-15T12:45:34 | 2012-10-10T14:49:16 | HTML | UTF-8 | Python | false | false | 783 | py | """stusched URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.8/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Add an import: from blog import urls as blog_urls
2. Add a URL to urlpatterns: url(r'^blog/', include(blog_urls))
"""
from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^$', views.index, name='index'),
url(r'^status$', views.status, name='status'),
]
| [
"daveb@davebsoft.com"
] | daveb@davebsoft.com |
aaf20c2fe8ce1671ee96f32aad3cbdfa2ec5fc4a | 5f5c6809e9e68127262c843602185f3d6d6d556b | /thejoker/prior.py | ce3c89878d8784fd7d3f2c94b9ce086aeb86412f | [
"MIT"
] | permissive | minaskar/thejoker | e195bd361d4eadf051fb29380d110d214ea65a1b | b7ba1d094ce3d4d61c1db80da37981327f280d34 | refs/heads/master | 2023-03-16T02:55:04.644778 | 2020-06-15T19:39:29 | 2020-06-15T19:39:29 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 22,046 | py | # Third-party
import astropy.units as u
import numpy as np
from theano.gof import MissingInputError
# Project
from .logging import logger
from .samples import JokerSamples
from .prior_helpers import (get_nonlinear_equiv_units,
get_linear_equiv_units,
validate_poly_trend,
get_v0_offsets_equiv_units,
validate_sigma_v)
from .utils import random_state_context
__all__ = ['JokerPrior']
def _validate_model(model):
import pymc3 as pm
# validate input model
if model is None:
try:
# check to see if we are in a context
model = pm.modelcontext(None)
except TypeError: # we are not!
# if no model is specified, create one and hold onto it
model = pm.Model()
if not isinstance(model, pm.Model):
raise TypeError("Input model must be a pymc3.Model instance, not "
"a {}".format(type(model)))
return model
class JokerPrior:
def __init__(self, pars=None, poly_trend=1, v0_offsets=None, model=None):
"""
This class controls the prior probability distributions for the
parameters used in The Joker.
This initializer is meant to be flexible, allowing you to specify the
prior distributions on the linear and nonlinear parameters used in The
Joker. However, for many use cases, you may want to just use the
default prior: To initialize this object using the default prior, see
the alternate initializer `JokerPrior.default()`.
Parameters
----------
pars : dict, list (optional)
Either a list of pymc3 variables, or a dictionary of variables with
keys set to the variable names. If any of these variables are
defined as deterministic transforms from other variables, see the
next parameter below.
poly_trend : int (optional)
Specifies the number of coefficients in an additional polynomial
velocity trend, meant to capture long-term trends in the data. The
default here is ``polytrend=1``, meaning one term: the (constant)
systemtic velocity. For example, ``poly_trend=3`` will sample over
parameters of a long-term quadratic velocity trend.
v0_offsets : list (optional)
A list of additional Gaussian parameters that set systematic offsets
of subsets of the data. TODO: link to tutorial here
model : `pymc3.Model`
This is either required, or this function must be called within a
pymc3 model context.
"""
import theano.tensor as tt
import pymc3 as pm
import exoplanet.units as xu
self.model = _validate_model(model)
# Parse and clean up the input pars
if pars is None:
pars = dict()
pars.update(model.named_vars)
elif isinstance(pars, tt.TensorVariable): # a single variable
# Note: this has to go before the next clause because TensorVariable
# instances are iterable...
pars = {pars.name: pars}
else:
try:
pars = dict(pars) # try to coerce to a dictionary
except Exception:
# if that fails, assume it is an iterable, like a list or tuple
try:
pars = {p.name: p for p in pars}
except Exception:
raise ValueError("Invalid input parameters: The input "
"`pars` must either be a dictionary, "
"list, or a single pymc3 variable, not a "
"'{}'.".format(type(pars)))
# Set the number of polynomial trend parameters
self.poly_trend, self._v_trend_names = validate_poly_trend(poly_trend)
# Calibration offsets of velocity zero-point
if v0_offsets is None:
v0_offsets = []
try:
v0_offsets = list(v0_offsets)
except Exception:
raise TypeError("Constant velocity offsets must be an iterable "
"of pymc3 variables that define the priors on "
"each offset term.")
self.v0_offsets = v0_offsets
pars.update({p.name: p for p in self.v0_offsets})
# Store the names of the default parameters, used for validating input:
# Note: these are *not* the units assumed internally by the code, but
# are only used to validate that the units for each parameter are
# equivalent to these
self._nonlinear_equiv_units = get_nonlinear_equiv_units()
self._linear_equiv_units = get_linear_equiv_units(self.poly_trend)
self._v0_offsets_equiv_units = get_v0_offsets_equiv_units(self.n_offsets)
self._all_par_unit_equiv = {**self._nonlinear_equiv_units,
**self._linear_equiv_units,
**self._v0_offsets_equiv_units}
# At this point, pars must be a dictionary: validate that all
# parameters are specified and that they all have units
for name in self.par_names:
if name not in pars:
raise ValueError(f"Missing prior for parameter '{name}': "
"you must specify a prior distribution for "
"all parameters.")
if not hasattr(pars[name], xu.UNIT_ATTR_NAME):
raise ValueError(f"Parameter '{name}' does not have associated "
"units: Use exoplanet.units to specify units "
"for your pymc3 variables. See the "
"documentation for examples: thejoker.rtfd.io")
equiv_unit = self._all_par_unit_equiv[name]
if not getattr(pars[name],
xu.UNIT_ATTR_NAME).is_equivalent(equiv_unit):
raise ValueError(f"Parameter '{name}' has an invalid unit: "
f"The units for this parameter must be "
f"transformable to '{equiv_unit}'")
# Enforce that the priors on all linear parameters are Normal (or a
# subclass of Normal)
for name in (list(self._linear_equiv_units.keys())
+ list(self._v0_offsets_equiv_units.keys())):
if not isinstance(pars[name].distribution, pm.Normal):
raise ValueError("Priors on the linear parameters (K, v0, "
"etc.) must be independent Normal "
"distributions, not '{}'"
.format(type(pars[name].distribution)))
self.pars = pars
@classmethod
def default(cls, P_min=None, P_max=None, sigma_K0=None, P0=1*u.year,
sigma_v=None, s=None, poly_trend=1, v0_offsets=None,
model=None, pars=None):
r"""
An alternative initializer to set up the default prior for The Joker.
The default prior is:
.. math::
p(P) \propto \frac{1}{P} \quad ; \quad P \in (P_{\rm min}, P_{\rm max})\\
p(e) = B(a_e, b_e)\\
p(\omega) = \mathcal{U}(0, 2\pi)\\
p(M_0) = \mathcal{U}(0, 2\pi)\\
p(s) = 0\\
p(K) = \mathcal{N}(K \,|\, \mu_K, \sigma_K)\\
\sigma_K = \sigma_{K, 0} \, \left(\frac{P}{P_0}\right)^{-1/3} \, \left(1 - e^2\right)^{-1/2}
and the priors on any polynomial trend parameters are assumed to be
independent, univariate Normals.
This prior has sensible choices for typical binary star or exoplanet
use cases, but if you need more control over the prior distributions
you might need to use the standard initializer (i.e.
``JokerPrior(...)```) and specify all parameter distributions manually.
See `the documentation <http://thejoker.readthedocs.io>`_ for tutorials
that demonstrate this functionality.
Parameters
----------
P_min : `~astropy.units.Quantity` [time]
Minimum period for the default period prior.
P_max : `~astropy.units.Quantity` [time]
Maximum period for the default period prior.
sigma_K0 : `~astropy.units.Quantity` [speed]
The scale factor, :math:`\sigma_{K, 0}` in the equation above that
sets the scale of the semi-amplitude prior at the reference period,
``P0``.
P0 : `~astropy.units.Quantity` [time]
The reference period, :math:`P_0`, used in the prior on velocity
semi-amplitude (see equation above).
sigma_v : `~astropy.units.Quantity` (or iterable of)
The standard deviations of the velocity trend priors.
s : `~astropy.units.Quantity` [speed]
The jitter value, assuming it is constant.
poly_trend : int (optional)
Specifies the number of coefficients in an additional polynomial
velocity trend, meant to capture long-term trends in the data. The
default here is ``polytrend=1``, meaning one term: the (constant)
systemtic velocity. For example, ``poly_trend=3`` will sample over
parameters of a long-term quadratic velocity trend.
v0_offsets : list (optional)
A list of additional Gaussian parameters that set systematic offsets
of subsets of the data. TODO: link to tutorial here
model : `pymc3.Model` (optional)
If not specified, this will create a model instance and store it on
the prior object.
pars : dict, list (optional)
Either a list of pymc3 variables, or a dictionary of variables with
keys set to the variable names. If any of these variables are
defined as deterministic transforms from other variables, see the
next parameter below.
"""
model = _validate_model(model)
nl_pars = default_nonlinear_prior(P_min, P_max, s=s,
model=model, pars=pars)
l_pars = default_linear_prior(sigma_K0=sigma_K0, P0=P0, sigma_v=sigma_v,
poly_trend=poly_trend, model=model,
pars=pars)
pars = {**nl_pars, **l_pars}
obj = cls(pars=pars, model=model, poly_trend=poly_trend,
v0_offsets=v0_offsets)
return obj
@property
def par_names(self):
return (list(self._nonlinear_equiv_units.keys())
+ list(self._linear_equiv_units.keys())
+ list(self._v0_offsets_equiv_units))
@property
def par_units(self):
import exoplanet.units as xu
return {p.name: getattr(p, xu.UNIT_ATTR_NAME, u.one) for p in self.pars}
@property
def n_offsets(self):
return len(self.v0_offsets)
def __repr__(self):
return f'<JokerPrior [{", ".join(self.par_names)}]>'
def __str__(self):
return ", ".join(self.par_names)
def sample(self, size=1, generate_linear=False, return_logprobs=False,
random_state=None, dtype=None, **kwargs):
"""
Generate random samples from the prior.
.. note::
Right now, generating samples with the prior values is slow (i.e.
with ``return_logprobs=True``) because of pymc3 issues (see
discussion here:
https://discourse.pymc.io/t/draw-values-speed-scaling-with-transformed-variables/4076).
This will hopefully be resolved in the future...
Parameters
----------
size : int (optional)
The number of samples to generate.
generate_linear : bool (optional)
Also generate samples in the linear parameters.
return_logprobs : bool (optional)
Generate the log-prior probability at the position of each sample.
**kwargs
Additional keyword arguments are passed to the
`~thejoker.JokerSamples` initializer.
Returns
-------
samples : `thejoker.Jokersamples`
The random samples.
"""
from theano.gof.fg import MissingInputError
from pymc3.distributions import draw_values
import exoplanet.units as xu
if dtype is None:
dtype = np.float64
sub_pars = {k: p for k, p in self.pars.items()
if k in self._nonlinear_equiv_units
or ((k in self._linear_equiv_units
or k in self._v0_offsets_equiv_units)
and generate_linear)}
if generate_linear:
par_names = self.par_names
else:
par_names = list(self._nonlinear_equiv_units.keys())
pars_list = list(sub_pars.values())
# MAJOR HACK RELATED TO UPSTREAM ISSUES WITH pymc3:
init_shapes = dict()
for par in pars_list:
if hasattr(par, 'distribution'):
init_shapes[par.name] = par.distribution.shape
par.distribution.shape = (size, )
with random_state_context(random_state):
samples_values = draw_values(pars_list)
raw_samples = {p.name: samples.astype(dtype)
for p, samples in zip(pars_list, samples_values)}
if return_logprobs:
logp = []
for par in pars_list:
try:
_logp = par.distribution.logp(raw_samples[par.name]).eval()
except AttributeError:
logger.warning("Cannot auto-compute log-prior value for "
f"parameter {par} because it is defined "
"as a transformation from another "
"variable.")
continue
except MissingInputError:
logger.warning("Cannot auto-compute log-prior value for "
f"parameter {par} because it depends on "
"other variables.")
continue
logp.append(_logp)
log_prior = np.sum(logp, axis=0)
# CONTINUED MAJOR HACK RELATED TO UPSTREAM ISSUES WITH pymc3:
for par in pars_list:
if hasattr(par, 'distribution'):
par.distribution.shape = init_shapes[par.name]
# Apply units if they are specified:
prior_samples = JokerSamples(poly_trend=self.poly_trend,
n_offsets=self.n_offsets,
**kwargs)
for name in par_names:
p = sub_pars[name]
unit = getattr(p, xu.UNIT_ATTR_NAME, u.one)
if p.name not in prior_samples._valid_units.keys():
continue
prior_samples[p.name] = np.atleast_1d(raw_samples[p.name]) * unit
if return_logprobs:
prior_samples['ln_prior'] = log_prior
# TODO: right now, elsewhere, we assume the log_prior is a single value
# for each sample (i.e. the total prior value). In principle, we could
# store all of the individual log-prior values (for each parameter),
# like here:
# log_prior = {k: np.atleast_1d(v)
# for k, v in log_prior.items()}
# log_prior = Table(log_prior)[par_names]
return prior_samples
@u.quantity_input(P_min=u.day, P_max=u.day)
def default_nonlinear_prior(P_min=None, P_max=None, s=None,
model=None, pars=None):
r"""
Retrieve pymc3 variables that specify the default prior on the nonlinear
parameters of The Joker. See docstring of `JokerPrior.default()` for more
information.
The nonlinear parameters an default prior forms are:
* ``P``, period: :math:`p(P) \propto 1/P`, over the domain
:math:`(P_{\rm min}, P_{\rm max})`
* ``e``, eccentricity: the short-period form from Kipping (2013)
* ``M0``, phase: uniform over the domain :math:`(0, 2\pi)`
* ``omega``, argument of pericenter: uniform over the domain
:math:`(0, 2\pi)`
* ``s``, additional extra variance added in quadrature to data
uncertainties: delta-function at 0
Parameters
----------
P_min : `~astropy.units.Quantity` [time]
P_max : `~astropy.units.Quantity` [time]
s : `~pm.model.TensorVariable`, ~astropy.units.Quantity` [speed]
model : `pymc3.Model`
This is either required, or this function must be called within a pymc3
model context.
"""
import theano.tensor as tt
import pymc3 as pm
from exoplanet.distributions import Angle
import exoplanet.units as xu
from .distributions import UniformLog, Kipping13Global
model = pm.modelcontext(model)
if pars is None:
pars = dict()
if s is None:
s = 0 * u.m/u.s
if isinstance(s, pm.model.TensorVariable):
pars['s'] = pars.get('s', s)
else:
if not hasattr(s, 'unit') or not s.unit.is_equivalent(u.km/u.s):
raise u.UnitsError("Invalid unit for s: must be equivalent to km/s")
# dictionary of parameters to return
out_pars = dict()
with model:
# Set up the default priors for parameters with defaults
# Note: we have to do it this way (as opposed to with .get(..., default)
# because this can only get executed if the param is not already
# defined, otherwise variables are defined twice in the model
if 'e' not in pars:
out_pars['e'] = xu.with_unit(Kipping13Global('e'),
u.one)
# If either omega or M0 is specified by user, default to U(0,2π)
if 'omega' not in pars:
out_pars['omega'] = xu.with_unit(Angle('omega'), u.rad)
if 'M0' not in pars:
out_pars['M0'] = xu.with_unit(Angle('M0'), u.rad)
if 's' not in pars:
out_pars['s'] = xu.with_unit(pm.Deterministic('s',
tt.constant(s.value)),
s.unit)
if 'P' not in pars:
if P_min is None or P_max is None:
raise ValueError("If you are using the default period prior, "
"you must pass in both P_min and P_max to set "
"the period prior domain.")
out_pars['P'] = xu.with_unit(UniformLog('P',
P_min.value,
P_max.to_value(P_min.unit)),
P_min.unit)
for k in pars.keys():
out_pars[k] = pars[k]
return out_pars
@u.quantity_input(sigma_K0=u.km/u.s, P0=u.day)
def default_linear_prior(sigma_K0=None, P0=None, sigma_v=None,
poly_trend=1, model=None, pars=None):
r"""
Retrieve pymc3 variables that specify the default prior on the linear
parameters of The Joker. See docstring of `JokerPrior.default()` for more
information.
The linear parameters an default prior forms are:
* ``K``, velocity semi-amplitude: Normal distribution, but with a variance
that scales with period and eccentricity.
* ``v0``, ``v1``, etc. polynomial velocity trend parameters: Independent
Normal distributions.
Parameters
----------
sigma_K0 : `~astropy.units.Quantity` [speed]
P0 : `~astropy.units.Quantity` [time]
sigma_v : iterable of `~astropy.units.Quantity`
model : `pymc3.Model`
This is either required, or this function must be called within a pymc3
model context.
"""
import pymc3 as pm
import exoplanet.units as xu
from .distributions import FixedCompanionMass
model = pm.modelcontext(model)
if pars is None:
pars = dict()
# dictionary of parameters to return
out_pars = dict()
# set up poly. trend names:
poly_trend, v_names = validate_poly_trend(poly_trend)
# get period/ecc from dict of nonlinear parameters
P = model.named_vars.get('P', None)
e = model.named_vars.get('e', None)
if P is None or e is None:
raise ValueError("Period P and eccentricity e must both be defined as "
"nonlinear parameters on the model.")
if v_names and 'v0' not in pars:
sigma_v = validate_sigma_v(sigma_v, poly_trend, v_names)
with model:
if 'K' not in pars:
if sigma_K0 is None or P0 is None:
raise ValueError("If using the default prior form on K, you "
"must pass in a variance scale (sigma_K0) "
"and a reference period (P0)")
# Default prior on semi-amplitude: scales with period and
# eccentricity such that it is flat with companion mass
v_unit = sigma_K0.unit
out_pars['K'] = xu.with_unit(FixedCompanionMass('K', P=P, e=e,
sigma_K0=sigma_K0,
P0=P0),
v_unit)
else:
v_unit = getattr(pars['K'], xu.UNIT_ATTR_NAME, u.one)
for i, name in enumerate(v_names):
if name not in pars:
# Default priors are independent gaussians
# FIXME: make mean, mu_v, customizable
out_pars[name] = xu.with_unit(
pm.Normal(name, 0.,
sigma_v[name].value),
sigma_v[name].unit)
for k in pars.keys():
out_pars[k] = pars[k]
return out_pars
| [
"adrian.prw@gmail.com"
] | adrian.prw@gmail.com |
c7371a79f24d086e2067124263d120c561a84446 | 35aecbd65de889efbfa5e206191a314fec0992d8 | /app.py | bd448577a9deeb4850bfb0c09b7a6b2353e87a74 | [] | no_license | Iliavas/SampleFlaskShit | 6552c79d8e6bebb2117f450105a177353819dc5c | ae57f1dd881f898a6ba8f091ada17fed23ec9e19 | refs/heads/master | 2023-03-23T11:39:47.015409 | 2021-03-18T18:25:44 | 2021-03-18T18:25:44 | 349,177,882 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 96 | py | from flask import Flask
app = Flask(__name__)
@app.route("/")
def hello():
return "hello" | [
"il.vsl0110@gmail.com"
] | il.vsl0110@gmail.com |
2e62706dd09fce4b9dfe0a3774ce8c2b2c34c139 | e109dca6755098ee2911add173126e91adf69eb6 | /Right number.py | bf8511098a28e4232c13638a0d3c331b183295dd | [] | no_license | Kalaikko/Python | e61fdf85fb7e94477b705fd480f4b0c7342e27e5 | b99dee23848bdcf5b7630c411645eb65863933fe | refs/heads/main | 2023-01-27T19:55:15.721610 | 2020-12-08T16:41:43 | 2020-12-08T16:41:43 | 319,698,807 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 176 | py | money = int(input("Enter the amount"))
if (money >= 100 and money <=500) or (money >= 1000 and money <=5000):
print("The money is between 100 and 500 or 1000 and 5000")
| [
"noreply@github.com"
] | Kalaikko.noreply@github.com |
10e316d5e550fbb88ca5a8c32a147b95fa7067cd | 43764004e691a7a77faf78dabdd54dab45f10cdb | /source/palindrome.py | c82b53b28a2188d25b8476e0bc9694dd50328841 | [
"MIT"
] | permissive | BurhanH/code-signal-python | 4e62265246859ab8a17bee4dae73e1271bdbb1ff | 1937b65bfad23248126879020c5a8104c4d1f7c1 | refs/heads/master | 2022-11-01T15:06:59.617824 | 2020-06-15T18:56:34 | 2020-06-15T18:56:34 | 271,887,683 | 1 | 2 | MIT | 2020-06-15T18:54:05 | 2020-06-12T20:48:43 | Python | UTF-8 | Python | false | false | 281 | py | # https://app.codesignal.com/arcade/intro/level-1/s5PbmwxfECC52PWyQ
# Given the string, check if it is a palindrome.
def check_palindrome(input_string: str = '') -> bool:
if 1 <= len(input_string) <= 100000:
return input_string == input_string[::-1]
return False
| [
"noreply@github.com"
] | BurhanH.noreply@github.com |
584809ed53ad5619053d2185651806cf8714ed04 | 2195bec4cc44f5eb552f46fe62135d9f22e6dc03 | /apps/trade/migrations/0008_auto_20190122_1826.py | 25d6418999665e72e1ecc7a24ee97f90647b4dac | [] | no_license | DzrJob/gulishop | 5c802d1bba0ad6ec23aa4c29a8ac6abcc085497b | 5620f09cd6d2a99e7643d5ec0b6bc9e1203be6fe | refs/heads/master | 2020-04-16T17:58:17.404170 | 2019-02-07T07:17:59 | 2019-02-07T07:17:59 | 165,797,566 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 862 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.6 on 2019-01-22 18:26
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('trade', '0007_auto_20190122_1812'),
]
operations = [
migrations.AlterField(
model_name='orderinfo',
name='address',
field=models.CharField(max_length=200, verbose_name='收货地址'),
),
migrations.AlterField(
model_name='orderinfo',
name='signer_mobile',
field=models.CharField(max_length=11, verbose_name='联系电话'),
),
migrations.AlterField(
model_name='orderinfo',
name='signer_name',
field=models.CharField(max_length=30, verbose_name='签收人'),
),
]
| [
"dzr_job@163.com"
] | dzr_job@163.com |
59859690d990364b30c66a3d73d15396cbde7967 | 374b00d7a3108dcd853d80727703a1bbbae070ac | /garde.py | e83f28a6fac0b081b4dd5e02354b3f6558b9f606 | [] | no_license | EmeryBV/POA | 24e74eaf43094a9c365a77538a1d5ec97bf6e09f | 3d74efc1a6f0d1f86fb5fa144946e7364fdc235b | refs/heads/master | 2023-01-22T14:07:43.594841 | 2020-12-10T21:39:08 | 2020-12-10T21:39:08 | 315,971,950 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,891 | py | from random import *
from collections import OrderedDict # /
class garde:
def __init__(self, coordX=0, coordY=0, ):
self.x = coordX
self.y = coordY
self.id = None
self.orientation = 0 #
self.lastchemin = []
def prevenir(self, gardeProche):
print("garde prévenu")
def explore(self, env):
ok = False
chooseDirection = self.modifRandomDirectionChoice([0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3], env)
while not ok:
try:
direction = chooseDirection[randint(0, len(chooseDirection) - 1)]
except:
direction = chooseDirection[0]
if direction == 0:
if self.y > 0 and not env.grille[self.x][self.y - 1].mur and env.grille[self.x][
self.y - 1].perso == False and env.grille[self.x][self.y - 1].espion == False:
self.move(self.x, self.y - 1, env)
ok = True
else:
chooseDirection[:] = (i for i in chooseDirection if i != 0)
elif direction == 1:
if self.y < len(env.grille[self.x]) - 1 and not env.grille[self.x][self.y + 1].mur and \
env.grille[self.x][self.y + 1].perso == False and env.grille[self.x][
self.y + 1].espion == False:
self.move(self.x, self.y + 1, env)
ok = True
else:
chooseDirection[:] = (i for i in chooseDirection if i != 1)
elif direction == 2:
if self.x > 0 and not env.grille[self.x - 1][self.y].mur and env.grille[self.x - 1][
self.y].perso == False and env.grille[self.x - 1][self.y].espion == False:
self.move(self.x - 1, self.y, env)
ok = True
else:
chooseDirection[:] = (i for i in chooseDirection if i != 2)
elif direction == 3:
if self.x < len(env.grille) - 1 and not env.grille[self.x + 1][self.y].mur and env.grille[self.x + 1][
self.y].perso == False and env.grille[self.x + 1][self.y].espion == False:
self.move(self.x + 1, self.y, env)
ok = True
else:
chooseDirection[:] = (i for i in chooseDirection if i != 3)
if len(chooseDirection) == 0:
ok = True # si aucun deplacement possible, on ne bouge pas
def getPosition(self, grille):
return self.x, self.y
def move(self, x, y, env):
self.actualiseLastChemin(self.x, self.y)
env.grille[self.x][self.y].perso = False;
env.grille[x][y].perso = self;
self.x = x
self.y = y
env.fenetre.moveGarde(self.id, x, y)
def actualiseLastChemin(self, x, y):
self.lastchemin.append((x, y))
if len(self.lastchemin) > 15:
self.lastchemin.pop(0)
def modifRandomDirectionChoice(self, tab, env):
for i in self.lastchemin:
if (abs((i[0] + i[1]) - (self.x + self.y)) == 1):
if (i[0] > self.x):
tab = list(filter((3).__ne__, tab))
tab.append(3)
if self.lastchemin.index(i) < 5: tab.append(3)
elif (i[0] < self.x):
tab = list(filter((2).__ne__, tab))
tab.append(2)
if self.lastchemin.index(i) < 5: tab.append(2)
elif (i[1] > self.y):
tab = list(filter((1).__ne__, tab))
tab.append(1)
if self.lastchemin.index(i) < 5: tab.append(1)
elif (i[1] < self.y):
tab = list(filter((0).__ne__, tab))
tab.append(0)
if self.lastchemin.index(i) < 5: tab.append(0)
tab = self.modifDirectionByShadow(tab, env)
return tab
def modifDirectionByShadow(self, tab, env):
# ordre: (0,lum), (1,lum), (2,lum), (3,lum)
lum = [(0, env.grille[self.x][self.y - 1].lumiere),
(1, env.grille[self.x][self.y + 1].lumiere),
(2, env.grille[self.x - 1][self.y].lumiere),
(3, env.grille[self.x + 1][self.y].lumiere)]
lum.sort(key=lambda tup: tup[1])
tab.append(lum[0][0]);
tab.append(lum[0][0]);
tab.append(lum[1][0]);
return tab;
def exploreOpti(self, env):
directionAccessible = []
repere = self.repereEspion(env)
if repere != False:
self.move(repere[0], repere[1], env)
else:
if self.y > 0 and not env.grille[self.x][self.y - 1].mur and env.grille[self.x][self.y - 1].perso == False:
directionAccessible.append(0)
if self.y < len(env.grille[self.x]) - 1 and not env.grille[self.x][self.y + 1].mur and env.grille[self.x][
self.y + 1].perso == False:
directionAccessible.append(1)
if self.x > 0 and not env.grille[self.x - 1][self.y].mur and env.grille[self.x - 1][self.y].perso == False:
directionAccessible.append(2)
if self.x < len(env.grille) - 1 and not env.grille[self.x + 1][self.y].mur and env.grille[self.x + 1][
self.y].perso == False:
directionAccessible.append(3)
if len(directionAccessible) != 0:
dir = self.chooseBestDir(directionAccessible, env)
if dir == 0:
self.move(self.x, self.y - 1, env)
elif dir == 1:
self.move(self.x, self.y + 1, env)
elif dir == 2:
self.move(self.x - 1, self.y, env)
elif dir == 3:
self.move(self.x + 1, self.y, env)
def chooseBestDir(self, directionAccessible, env):
cheminNonVisiter = directionAccessible
voisin = OrderedDict()
maxTab = len(self.lastchemin)
for i in self.lastchemin:
if abs(abs(i[0] - self.x) + abs(self.y - i[1])) == 1:
if (i[0] > self.x and 3 in directionAccessible):
voisin[3] = maxTab - self.lastchemin.index(i)
cheminNonVisiter = list(filter((3).__ne__, cheminNonVisiter))
elif (i[0] < self.x and 2 in directionAccessible):
voisin[2] = maxTab - self.lastchemin.index(i)
cheminNonVisiter = list(filter((2).__ne__, cheminNonVisiter))
elif (i[1] > self.y and 1 in directionAccessible):
voisin[1] = maxTab - self.lastchemin.index(i)
cheminNonVisiter = list(filter((1).__ne__, cheminNonVisiter))
elif (i[1] < self.y and 0 in directionAccessible):
voisin[0] = maxTab - self.lastchemin.index(i)
cheminNonVisiter = list(filter((0).__ne__, cheminNonVisiter))
if len(cheminNonVisiter) > 0:
return self.modifDirectionByShadow2(list(set(directionAccessible) & set(cheminNonVisiter)), env)
voisin = sorted(voisin.items(), key=lambda t: t[1])
voisinEloigne = []
for i in voisin[::-1]:
if i[1] >= 5:
voisinEloigne.append((i[0], i[1]))
if len(voisinEloigne) <= 0:
return voisin[len(voisin) - 1][0]
else:
return self.modifDirectionByShadow2(voisinEloigne, env, True)
def modifDirectionByShadow2(self, tab, env, voisin=False):
# ordre: (0,lum), (1,lum), (2,lum), (3,lum)
lum = [(0, env.grille[self.x][self.y - 1].lumiere),
(1, env.grille[self.x][self.y + 1].lumiere),
(2, env.grille[self.x - 1][self.y].lumiere),
(3, env.grille[self.x + 1][self.y].lumiere)]
if voisin == False:
for i in range(len(tab)):
tab[i] = (tab[i], lum[tab[i]][1])
tab = sorted(tab, key=lambda t: t[1])
# print(tab)
tabPlusObscure = []
for i in tab:
if i[1] == tab[0][1]:
tabPlusObscure.append(i)
if len(tabPlusObscure) > 1:
return tabPlusObscure[randint(0, len(tabPlusObscure) - 1)][0]
else:
return tab[0][0]
if voisin:
newTab = []
for i in range(len(tab)):
newTab.append((tab[i][0], lum[tab[i][0]]))
newTab = sorted(newTab, key=lambda t: t[1])
return newTab[0][0]
def repereEspion(self, env):
if (len(env.listEspion) > 0 and abs(env.listEspion[0].x - self.x) +
abs(self.y - env.listEspion[0].y) <= 7):
soluce = (env.listEspion[0].x, env.listEspion[0].y)
dirOrd = env.listEspion[0].y - self.y;
dirAbs = env.listEspion[0].x - self.x;
listCasePossible = [(self.x, self.y, 7)]
closedList = [((self.x, self.y, 7), [(self.x, self.y, 7)])]
while len(listCasePossible) > 0:
coord = [listCasePossible.pop(0)]
if (soluce[0] == coord[0][0] and soluce[1] == coord[0][1]):
if coord[0][2] + env.grille[soluce[0]][soluce[1]].lumiere >= 8:
env.listEspion[0].ifReperer(self)
parents = closedList[-1]
while parents[1][0] != (self.x, self.y, 7):
parents = list(filter(lambda a: a[0] == parents[1][0], closedList))[0]
return parents[0][0], parents[0][1]
if (coord[0][2] > 0):
if (abs(dirOrd) > 0 and self.verifCaseLibre(coord[0][0], coord[0][1] + (dirOrd // abs(dirOrd)), env,
True)):
listCasePossible.append((coord[0][0], coord[0][1] + (dirOrd // abs(dirOrd)), coord[0][2] - 1))
closedList.append((listCasePossible[-1], coord))
if (abs(dirAbs) > 0 and self.verifCaseLibre(coord[0][0] + (dirAbs // abs(dirAbs)), coord[0][1], env,
True)):
listCasePossible.append((coord[0][0] + (dirAbs // abs(dirAbs)), coord[0][1], coord[0][2] - 1))
closedList.append((listCasePossible[-1], coord))
return False
def verifCaseLibre(self, newx, newy, env, espion=False):
try:
if ((env.grille[newx][newy].mur == False) and env.grille[newx][newy].perso == False and (
espion or env.grille[newx][newy].espion == False)):
return True
else:
return False
except:
print('erreur survenu')
return False
| [
"emery.vecchio@gmail.com"
] | emery.vecchio@gmail.com |
f563f972b5867bb3c676e04ae2dc19e4979f4d25 | a197253c2e422c0cb95873fa2cea06fc73e304b6 | /browser/danboorudb.py | 11e4fa8ad1aed06ee14ffb9427f3e935e2f6666a | [
"MIT"
] | permissive | greg-campbell/Danbooru2018 | 599dfd51a7326d6fe8061759667cf752103eb440 | f1aa99ff67d87c7e1d4c548cf0fb80e7220b9a98 | refs/heads/master | 2020-12-17T23:11:53.204320 | 2020-01-19T00:14:26 | 2020-01-19T00:14:26 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,578 | py | import sqlite3
class DanbooruDB:
def __init__(self):
self.conn = sqlite3.connect("danbooru2018_kbr.db")
self.conn.isolation_level = None
self.cur = self.conn.cursor()
self.catDict={'a':1,'c':4,'d':0,'m':5,'s':3}
def getImageIdsForTag(self,tag_name):
# self.cur.execute('''select image_id from imageTags
# where tag_id= (select tag_id from tags where name=?)
# order by image_id''',tag_name)
self.cur.execute('''select image_id from images
where user_delete = 0 and image_id in
(select image_id from imageTags where tag_id in
(select tag_id from tags where name=?)
) order by image_id ''', tag_name)
res = self.cur.fetchall()
return [i[0] for i in res] # list of tuples to list of ids
def getExtForImage(self, image_id):
self.cur.execute('select file_ext from images where image_id=?', (image_id,))
res = self.cur.fetchall()
return res[0]
def get_tags(self):
self.cur.execute('select name from tags order by name limit 1000')
rows = self.cur.fetchall()
return rows
def get_tags2(self, filter):
if filter == '':
return self.get_tags()
self.cur.execute('select name from tags where name like ? order by name', (filter,))
return self.cur.fetchall()
def get_tags3(self, filter, cat):
# TODO: 'not' category
if cat == '':
return self.get_tags2(filter)
params = (filter, self.catDict[cat.lower()])
self.cur.execute('select name from tags where name like ? and category = ? order by name', params)
return self.cur.fetchall()
def getImageIdsForTag2(self,tag_name,rating):
if rating=='':
return self.getImageIdsForTag(tag_name)
params = (rating.lower(),tag_name[0])
# annoying, this variant is faster than join
self.cur.execute('''select image_id from images
where user_delete = 0 and rating=? and image_id in
(select image_id from imageTags where tag_id in
(select tag_id from tags where name=?)
) order by image_id ''', params)
#params = (tag_name[0],rating.lower())
# self.cur.execute('''select I.image_id from images I
# join imageTags IT on I.image_id=IT.image_id
# join tags T on IT.tag_id = T.tag_id
# where T.name=? and I.rating=?
# order by I.image_id''',
# params)
res = self.cur.fetchall()
return [i[0] for i in res] # list of tuples to list of ids
def markAsDelete(self,image_id):
self.cur.execute('update images set user_delete=1 where image_id=?',(image_id,))
def getTagsForImage(self,image_id):
self.cur.execute('''select name from tags where tag_id in
(select tag_id from imageTags where image_id=?)''',(image_id,))
res = self.cur.fetchall()
return [i[0] for i in res] # list of tuples to list of tags
def getTagsForImage2(self,image_id):
self.cur.execute('''select category,name from tags where tag_id in
(select tag_id from imageTags where image_id=?)
order by category,name''',(image_id,))
res = self.cur.fetchall()
return res
def getRatingForImage(self, image_id):
self.cur.execute('select rating from images where image_id=?',(image_id,))
res = self.cur.fetchall()
if res[0][0] == 's':
return 'Safe'
if res[0][0] == 'q':
return 'Questionable'
if res[0][0] == 'e':
return 'Explicit'
return '?'
def tagSubClause(self, tFilter):
res = ''
val = ''
if (tFilter[2] != ''):
tag2 = 'select image_id from imageTags where tag_id in (select tag_id from tags where name like ?)'
if (tFilter[0] == 'OR'):
op = ' UNION '
if (tFilter[0] == 'AND' or tFilter[0] == ''):
op = ' INTERSECT '
if (tFilter[1] == 'NOT'):
op = ' EXCEPT '
res = op + tag2
val = tFilter[2]
return res, val
def getImagesForTags2(self, filter1, filter2, filter3, rating):
params = []
start = 'select image_id from images where user_delete=0 '
if rating != '':
params.append(rating.lower())
start += 'and rating=? '
start += 'and image_id in '
tag1 = 'select image_id from imageTags where tag_id in (select tag_id from tags where name like ?)'
params.append(filter1[2])
tag2, param2 = self.tagSubClause(filter2)
tag3, param3 = self.tagSubClause(filter3)
full = start + '(' + tag1 + tag2 + tag3 + ') order by image_id'
if (param2 != ''):
params.append(param2)
if (param3 != ''):
params.append(param3)
print(full)
print(tuple(params))
self.cur.execute(full,params)
res = self.cur.fetchall()
return [i[0] for i in res]
| [
"lifeattickville@gmail.com"
] | lifeattickville@gmail.com |
c7c6abe6ddd69173a76601375d5aa36b3acc06e4 | 0627cc5c3adb47fd4e780b31a76d17839ad384ec | /tensorflow_probability/python/layers/__init__.py | 55a5079eaf94190c21b271793559f7ec7f4b90b3 | [
"Apache-2.0"
] | permissive | ml-lab/probability | 7e57377ae15bcbb9a7878e23d53f4505823b9117 | 09c1e495c929f5bc461a4edbc7710ab81b5b4933 | refs/heads/master | 2021-09-09T04:40:10.045594 | 2018-03-13T23:26:59 | 2018-03-13T23:27:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,304 | py | # Copyright 2018 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""TensorFlow Probability layers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow_probability.python.layers.conv_variational import convolution1d_flipout
from tensorflow_probability.python.layers.conv_variational import convolution1d_reparameterization
from tensorflow_probability.python.layers.conv_variational import Convolution1DFlipout
from tensorflow_probability.python.layers.conv_variational import Convolution1DReparameterization
from tensorflow_probability.python.layers.conv_variational import convolution2d_flipout
from tensorflow_probability.python.layers.conv_variational import convolution2d_reparameterization
from tensorflow_probability.python.layers.conv_variational import Convolution2DFlipout
from tensorflow_probability.python.layers.conv_variational import Convolution2DReparameterization
from tensorflow_probability.python.layers.conv_variational import convolution3d_flipout
from tensorflow_probability.python.layers.conv_variational import convolution3d_reparameterization
from tensorflow_probability.python.layers.conv_variational import Convolution3DFlipout
from tensorflow_probability.python.layers.conv_variational import Convolution3DReparameterization
from tensorflow_probability.python.layers.dense_variational import dense_flipout
from tensorflow_probability.python.layers.dense_variational import dense_local_reparameterization
from tensorflow_probability.python.layers.dense_variational import dense_reparameterization
from tensorflow_probability.python.layers.dense_variational import DenseFlipout
from tensorflow_probability.python.layers.dense_variational import DenseLocalReparameterization
from tensorflow_probability.python.layers.dense_variational import DenseReparameterization
from tensorflow_probability.python.layers.util import default_loc_scale_fn
from tensorflow_probability.python.layers.util import default_mean_field_normal_fn
__all__ = [
'Convolution1DFlipout',
'Convolution1DReparameterization',
'Convolution2DFlipout',
'Convolution2DReparameterization',
'Convolution3DFlipout',
'Convolution3DReparameterization',
'DenseFlipout',
'DenseLocalReparameterization',
'DenseReparameterization',
'convolution1d_flipout',
'convolution1d_reparameterization',
'convolution2d_flipout',
'convolution2d_reparameterization',
'convolution3d_flipout',
'convolution3d_reparameterization',
'default_loc_scale_fn',
'default_mean_field_normal_fn',
'dense_flipout',
'dense_local_reparameterization',
'dense_reparameterization',
]
| [
"copybara-piper@google.com"
] | copybara-piper@google.com |
a771a00c3fd049c6dc8482812b8ea1fb06246838 | 1c72aa6d53c886d8fb8ae41a3e9b9c6c4dd9dc6f | /Semester 1/Project submissions/Lewis Clarke/Lewis_Clarke_Python_Coding-2016-04-18/Python Coding/Week 6/position_in_alphabet.py | be2781345f12dca69e6c90d6d0f722351786bf62 | [] | no_license | codebubb/python_course | 74761ce3189d67e3aff964c056aeab27d4e94d4a | 4a6ed4a64e6a726d886add8364c65956d5053fc2 | refs/heads/master | 2021-01-11T03:06:50.519208 | 2016-07-29T10:47:12 | 2016-10-17T10:42:29 | 71,114,974 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 350 | py | key = 'abcdefghijklmnopqrstuvwxyz'
def alpha():
word = raw_input('Enter a letter to find its numerical value: ')
if word not in key:
word = raw_input('You did not enter a letter.\nEnter a letter to find its numerical value: ')
for i in word:
n = 1
x = (key.index(i) + n)
print x
alpha()
| [
"jpbubb82@gmail.com"
] | jpbubb82@gmail.com |
0be15dd30872102b4fad90030e23e3675a7cd227 | 2cf6d74f0b80ce25d38714cc58c8990321541ade | /classcastvirtualenv/classcast/project/classcast/urls.py | 8646af4f172c6e94aca0213ac0cf677227b54e92 | [] | no_license | ch33zer/ClassCast | 17ad1454a33970bfb7e2d2027f0dbadf5611aac3 | 42d496870ef891c9f7e065034012c66f0594cf45 | refs/heads/master | 2016-09-06T13:04:27.153527 | 2014-01-11T09:03:47 | 2014-01-11T09:03:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,986 | py | from django.conf.urls import patterns, include, url
from django.contrib.auth.views import login, logout
from api import EmailSuffixResource, SchoolResource
from forms import UserCreateForm, SchoolCreateForm
from tastypie.api import Api
import views
def betterurl(regex, view):
name = view.__name__
return url(regex,view,name=name)
v1_api = Api(api_name='v1')
v1_api.register(SchoolResource())
v1_api.register(EmailSuffixResource())
schoolpatterns = patterns('',
betterurl(r'^$',views.schoolview)
)
classpatterns = patterns('',
betterurl(r'^$',views.classview),
betterurl(r'^upload/?$',views.addcontentview),
betterurl(r'^stream/?$',views.streamview)
)
contentpatterns = patterns('',
betterurl(r'^$',views.contentview)
)
userpatterns = patterns('',
betterurl(r'^$',views.userview)
)
forms = [
("userform",UserCreateForm),
("schoolform",SchoolCreateForm)
]
conditions = {
"schoolform":views.is_new_school
}
urlpatterns = patterns('',
betterurl(r'^$', views.indexview),
url(r'^accounts/register/?$',views.RegistrationWizard.as_view(forms,condition_dict=conditions),name='registerview'),
url(r'^accounts/login/?$', login, name='loginview'),
url(r'^accounts/logout/?$', logout, {"next_page":"/"}, name='logoutview'),
url(r'^school/(?P<schoolslug>[\w-]+)/',include(schoolpatterns)),
url(r'^class/create/?$',views.CreateClassView.as_view(),name='createclassview'),
url(r'^class/(?P<classslug>[\w-]+)/',include(classpatterns)),
url(r'^user/(?P<userid>\d+)/',include(userpatterns)),
url(r'^content/(?P<contentslug>[\w-]+)/',include(contentpatterns)),
url(r'^api/',include(v1_api.urls)),
url(r'^addclass/?$',views.addclass),
url(r'^streams/on_publish/?$',views.on_publish),
url(r'^streams/on_publish_done/?$',views.on_publish_done),
url(r'^streams/on_play/?$',views.on_play),
url(r'^streams/on_play_done/?$',views.on_play_done)
)
| [
"ch33zer@gmail.com"
] | ch33zer@gmail.com |
181d0ad555d3981f3dd7bddcc29112593d660846 | 09cf1ab9eddc64613398822c7f45244e8a34a39a | /special_getitem.py | 8b1240b71e4803a1f48c7db00b2a37b89b5054e1 | [] | no_license | guibiaoguo/pydemo | 0a221af838727256910cc057d165cc5b6aa08d5e | 65dec2705b5fb03f3987ed6ec12a17bdb82f2c92 | refs/heads/master | 2023-08-08T09:39:00.792238 | 2021-09-04T18:38:44 | 2021-09-04T18:38:44 | 325,356,929 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 934 | py | class Fib(object):
"""docstring for Fib"""
def __init__(self):
super(Fib, self).__init__()
self.a, self.b = 0, 1
def __iter__(self):
return self
def __next__(self):
self.a, self.b = self.b, self.a + self.b
if self.a > 1000000:
raise StopIteration()
return self.a
def __getitem__(self, n):
if isinstance(n, int):
a, b = 1, 1
for x in range(n):
a, b = b, a + b
return a
if isinstance(n, slice):
start = n.start
stop = n.stop
if start is None:
start = 0
a, b = 1, 1
L = []
for x in range(stop):
if x >= start:
L.append(a)
a, b = b, a + b
return L
print(Fib()[5])
f = Fib()
print(f[0])
print(f[1])
print(f[100])
print(f[0:5])
| [
"guoa1234@163.com"
] | guoa1234@163.com |
08366dcc624471b8269aabe510ee9f4625242ad9 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03012/s966640357.py | f6c0b5afb072f834dc2ec2f8bf626c997ed95245 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 185 | py | N = int(input())
W = input().split()
W = [int(w) for w in W]
S = sum(W)
mini = pow(10, 10)
for i in range(N):
mi = abs(S - 2 * sum(W[0:i]))
if mi < mini:
mini = mi
print(mini) | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
11f264fffe256d55ce47d92831273b0547a51867 | 56a0b432e9dd7cbd4f8111473954264d4e3a6cbf | /OwnCNN/CNN.py | 77041339641be5c86cb5f46d27dea5e6625667f5 | [] | no_license | badwulf51/FinalYearProject | 21107aec6f1db325e44722dbc89c94e79626f142 | b061f288f93a09102a0b9eab35189935183899d9 | refs/heads/master | 2022-12-14T07:39:30.092293 | 2019-06-14T20:27:37 | 2019-06-14T20:27:37 | 209,832,781 | 1 | 0 | null | 2022-12-06T23:49:34 | 2019-09-20T16:16:36 | Jupyter Notebook | UTF-8 | Python | false | false | 441 | py | from Conv2d import *
from Maxpooling import *
from flatten import *
from Dense import *
class CNN():
def __init__():
conv1 = conv2d()
Max1 = maxPooling(conv1)
conv2 = conv2d(Max1)
Max2 = MaxPooling(conv2)
conv3 = conv2d(Max2)
Max3 = MaxPooling(conv3)
Dropout1 = Dropout(MAx3)
flat1 = Flatten(Dropout1)
Dense1 = Dense(flat1)
Dense2 = Dense(Dense1)
| [
"aronomalley@yahoo.com"
] | aronomalley@yahoo.com |
07e62911914a1a3d2c70875282c1d69c59ffcd1c | e3998a42f3c8b25044d761745724d59d25796eed | /pycaptcha/ext/commands.py | d4093a934525c405370de499da2be54239c07d0f | [] | no_license | WesleyPestana/recaptcha-form | 45d19876c771238d7ea2533c385e4aa866220bfd | 80532df60aeaa3c4d5fb9ccf2fa6125dd861c266 | refs/heads/master | 2023-05-25T13:01:49.698061 | 2020-04-12T15:39:48 | 2020-04-12T15:39:48 | 254,709,076 | 1 | 0 | null | 2023-05-01T21:38:25 | 2020-04-10T18:38:40 | HTML | UTF-8 | Python | false | false | 230 | py | from pycaptcha.ext.database import db
def create_db():
db.create_all()
def drop_db():
db.drop_all()
def init_app(app):
for command in [create_db, drop_db]:
app.cli.add_command(app.cli.command()(command))
| [
"wesley.pestana@hotmail.com"
] | wesley.pestana@hotmail.com |
2dedeb65d601fe8c0439aca46ccce1f8236b7b3f | 5a22cd3e60550562977b3a74a7f3651573fe1376 | /migrations/versions/b57df5cf4b1_create_offer_table.py | 82856b87ad7859f607f406c3cffc09bb276f26c3 | [] | no_license | GopyDev/neighborme | 92eeaab21479a1f5f1ed7f564dc5ef416f71cce3 | de1841b2cab6ffe1bdabf44ca61ebb50ac94dfaa | refs/heads/master | 2021-01-17T21:11:14.599907 | 2016-07-18T17:16:38 | 2016-07-18T17:16:38 | 63,621,976 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 697 | py | """create Offer table
Revision ID: b57df5cf4b1
Revises: 15d14ef884cf
Create Date: 2016-04-07 12:59:12.524337
"""
# revision identifiers, used by Alembic.
revision = 'b57df5cf4b1'
down_revision = '15d14ef884cf'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
def upgrade():
op.create_table(
'offers',
sa.Column('id', sa.Integer, primary_key=True),
sa.Column('request_id', sa.Integer, sa.ForeignKey('requests.id')),
sa.Column('offer', sa.Unicode(512)),
sa.Column('offer_status', sa.String(255)),
sa.Column('lmessage_at', sa.DateTime))
def downgrade():
op.drop_table('offers')
| [
"wgoldm@hotmail.com"
] | wgoldm@hotmail.com |
dccbf0aa8ca5790f9398e781f70be9b622d73121 | 620b58e17d4851e43bd1270cabc8c26f43629a7b | /lib/candy_editor/core/project.py | 69425c8ff5a3552a6ad7ce5b1f6313225e568819 | [
"MIT"
] | permissive | lihaochen910/Candy | 78b9862cf06748b365b6fb35ac23f0e7a00ab558 | d12cb964768459c22f30c22531d3e1734901e814 | refs/heads/master | 2022-11-25T19:12:34.533828 | 2021-11-07T16:11:07 | 2021-11-07T16:11:07 | 141,284,960 | 1 | 1 | NOASSERTION | 2022-11-22T09:20:08 | 2018-07-17T12:12:02 | Lua | UTF-8 | Python | false | false | 13,353 | py | import logging
import sys
import os
import os.path
import re
import shutil
import hashlib
import time
from . import signals
from . import jsonHelper
##----------------------------------------------------------------##
from .cache import CacheManager
from .asset import AssetLibrary
##----------------------------------------------------------------##
_CANDY_ENV_DIR = 'env'
_CANDY_GAME_DIR = 'game'
_CANDY_HOST_DIR = 'host'
_CANDY_BINARY_DIR = 'bin'
_CANDY_ASSET_DIR = _CANDY_GAME_DIR + '/asset'
# _CANDY_SCRIPT_LIB_DIR = _CANDY_GAME_DIR + '/lib'
_CANDY_SCRIPT_LIB_DIR = _CANDY_GAME_DIR
_CANDY_HOST_EXTENSION_DIR = _CANDY_HOST_DIR + '/extension'
_CANDY_ENV_PACKAGE_DIR = _CANDY_ENV_DIR + '/packages'
_CANDY_ENV_DATA_DIR = _CANDY_ENV_DIR + '/data'
_CANDY_ENV_LIB_DIR = _CANDY_ENV_DIR + '/lib'
_CANDY_ENV_CONFIG_DIR = _CANDY_ENV_DIR + '/config'
_CANDY_INFO_FILE = 'project.json'
_CANDY_CONFIG_FILE = 'config.json'
####----------------------------------------------------------------##
_default_config = {
"excluded_packages" : []
}
##----------------------------------------------------------------##
def _fixPath ( path ):
path = path.replace ( '\\', '/' ) #for windows
if path.startswith ('./'): path = path[2:]
return path
##----------------------------------------------------------------##
def _makePath ( base, path ):
if path:
return base + '/' + path
else:
return base
##----------------------------------------------------------------##
def _affirmPath ( path ):
if os.path.exists ( path ): return
try:
os.mkdir ( path )
except Exception as e:
pass
##----------------------------------------------------------------##
def _hashPath ( path ):
name, ext = os.path.splitext ( os.path.basename ( path ) )
m = hashlib.md5 ()
m.update ( path.encode ('utf-8') )
return m.hexdigest ()
##----------------------------------------------------------------##
class ProjectException ( Exception ):
pass
##----------------------------------------------------------------##
class Project ( object ):
_singleton = None
@staticmethod
def get ():
return Project._singleton
@staticmethod
def findProject ( path = None ):
#TODO: return project info dict instead of path?
path = os.path.abspath ( path or '' )
opath = None
while path and not ( path in ( '', '/','\\' ) ):
if os.path.exists ( path + '/' + _CANDY_ENV_CONFIG_DIR ) \
and os.path.exists ( path + '/' + _CANDY_INFO_FILE ) :
#get info
info = jsonHelper.tryLoadJSON ( path + '/' + _CANDY_INFO_FILE )
info['path'] = path
return info
#go up level
opath = path
path = os.path.dirname ( path )
if path == opath: break
return None
def __init__ ( self ):
assert not Project._singleton
Project._singleton = self
self.path = None
self.cacheManager = CacheManager ()
self.assetLibrary = AssetLibrary ()
self.info = {
'name' : 'Name',
'author' : 'author',
'version' : '1.0.0'
}
self.config = {}
def isLoaded ( self ):
return self.path != None
def _initPath ( self, path ):
self.path = path
self.binaryPath = path + '/' + _CANDY_BINARY_DIR
self.gamePath = path + '/' + _CANDY_GAME_DIR
self.envPath = path + '/' + _CANDY_ENV_DIR
self.envPackagePath = path + '/' + _CANDY_ENV_PACKAGE_DIR
self.envDataPath = path + '/' + _CANDY_ENV_DATA_DIR
self.envConfigPath = path + '/' + _CANDY_ENV_CONFIG_DIR
self.envLibPath = path + '/' + _CANDY_ENV_LIB_DIR
self.assetPath = path + '/' + _CANDY_ASSET_DIR
self.scriptLibPath = path + '/' + _CANDY_SCRIPT_LIB_DIR
self.hostPath = path + '/' + _CANDY_HOST_DIR
self.hostExtensionPath = path + '/' + _CANDY_HOST_EXTENSION_DIR
def _affirmDirectories ( self ):
#mkdir - lv1
_affirmPath ( self.binaryPath )
_affirmPath ( self.envPath )
_affirmPath ( self.envPackagePath )
_affirmPath ( self.envDataPath )
_affirmPath ( self.envLibPath )
_affirmPath ( self.envConfigPath )
_affirmPath ( self.gamePath )
_affirmPath ( self.assetPath )
_affirmPath ( self.scriptLibPath )
_affirmPath ( self.hostPath )
_affirmPath ( self.hostExtensionPath )
def init ( self, path, name ):
info = Project.findProject ( path )
if info:
raise ProjectException ( 'Candy project already initialized:' + info[ 'path' ] )
#
path = os.path.realpath ( path )
if not os.path.isdir ( path ):
raise ProjectException ('%s is not a valid path' % path )
self._initPath ( path )
#
# logging.info ( 'copy template contents' )
# from MainModulePath import getMainModulePath
# def ignore ( src, names ):
# return ['.DS_Store']
# shutil.copytree ( getMainModulePath ('template/host'), self.getPath ('host'), ignore )
# shutil.copytree ( getMainModulePath ('template/game'), self.getPath ('game'), ignore )
# shutil.copy ( getMainModulePath ('template/.gitignore'), self.getPath () )
self._affirmDirectories ()
try:
self.cacheManager.init ( _CANDY_ENV_CONFIG_DIR, self.envConfigPath )
except OSError as e:
raise ProjectException ('error creating cache folder:%s' % e)
self.assetLibrary.load ( _CANDY_ASSET_DIR, self.assetPath, self.path, self.envConfigPath )
signals.emitNow ( 'project.init', self )
logging.info ( 'project initialized: %s' % path )
self.info[ 'name' ] = name
self.saveConfig ()
self.save ()
return True
def load ( self, path ):
path = os.path.realpath (path )
self._initPath ( path )
self._affirmDirectories ()
# os.chdir ( path )
sys.path.insert ( 0, self.envLibPath )
sys.path.insert ( 0, self.getBinaryPath ( 'python' ) ) #for python extension modules
self.info = jsonHelper.tryLoadJSON ( self.getBasePath ( _CANDY_INFO_FILE ) )
self.config = jsonHelper.tryLoadJSON ( self.getConfigPath ( _CANDY_CONFIG_FILE ) )
if not self.config:
self.config = {}
jsonHelper.trySaveJSON ( self.config, self.getConfigPath ( _CANDY_CONFIG_FILE ) )
if not self.info:
self.info = {
'name' : 'name',
'version' : [0,0,1],
}
jsonHelper.trySaveJSON ( self.info, self.getBasePath ( _CANDY_INFO_FILE ) )
self.cacheManager.load ( _CANDY_ENV_CONFIG_DIR, self.envConfigPath )
self.assetLibrary.load ( _CANDY_ASSET_DIR, self.assetPath, self.path, self.envConfigPath )
#will trigger other module
signals.emitNow ( 'project.preload', self )
signals.emitNow ( 'project.load', self )
logging.info ( 'project loaded: %s' % path )
return True
def loadAssetLibrary ( self ):
#load cache & assetlib
self.assetLibrary.loadAssetTable ()
def deploy ( self, **option ):
base = self.getPath ( option.get ( 'path', 'output' ) )
context = DeployContext ( base )
context.cleanPath ()
hostResPath = self.getHostPath ('resource')
gameLibPath = self.getGamePath ('lib')
logging.info ( 'deploy current project' )
context.copyFilesInDir ( hostResPath )
context.copyFile ( gameLibPath, 'lib' )
signals.emitNow ( 'project.pre_deploy', context )
#deploy asset library
objectFiles = []
for node in self.assetLibrary.assetTable.values ():
mgr = node.getManager ()
if not mgr: continue
mgr.deployAsset ( node, context )
#copy scripts
#copy static resources
signals.emitNow ( 'project.deploy', context )
self.assetLibrary.saveAssetTable (
path = base + '/asset/asset_index',
deploy_context = context
)
context.flushTask ()
signals.emitNow ( 'project.post_deploy', context )
print ( 'Deploy building done!' )
signals.emitNow ( 'project.done_deploy', context )
def save ( self ):
logging.info ( 'saving current project' )
signals.emitNow ( 'project.presave', self )
#save project info & config
jsonHelper.trySaveJSON ( self.info, self.getBasePath ( _CANDY_INFO_FILE ), 'project info' )
#save asset & cache
self.assetLibrary.save ()
self.cacheManager.clearFreeCacheFiles ()
self.cacheManager.save ()
signals.emitNow ( 'project.save', self ) #post save
logging.info ( 'project saved' )
return True
def saveConfig ( self ):
jsonHelper.trySaveJSON ( self.config, self.getConfigPath ( _CANDY_CONFIG_FILE ), 'project config')
def getRelativePath ( self, path ):
return _fixPath ( os.path.relpath ( path, self.path ) )
def getPath ( self, path=None ):
return self.getBasePath ( path )
def getBasePath ( self, path=None ):
return _makePath ( self.path, path )
def getEnvPath ( self, path=None ):
return _makePath ( self.envPath, path )
def getEnvDataPath ( self, path=None ):
return _makePath ( self.envDataPath, path )
def getEnvLibPath ( self, path=None ):
return _makePath ( self.envLibPath, path )
def getHostPath ( self, path=None ):
return _makePath ( self.hostPath, path )
def getPackagePath ( self, path=None ):
return _makePath ( self.envPackagePath, path )
def getConfigPath ( self, path=None ):
return _makePath ( self.envConfigPath, path )
def getBinaryPath ( self, path=None ):
return _makePath ( self.binaryPath, path )
def getGamePath ( self, path=None ):
return _makePath ( self.gamePath, path )
def getAssetPath ( self, path=None ):
return _makePath ( self.assetPath, path )
def getAssetNodeRelativePath ( self, path ):
return _fixPath ( os.path.relpath ( path, self.assetPath ) )
def getScriptLibPath ( self, path=None ):
return _makePath ( self.scriptLibPath, path )
def isProjectFile ( self, path ):
path = os.path.abspath ( path )
relpath = os.path.relpath ( path, self.path )
return not ( relpath.startswith ( '..' ) or relpath.startswith ( '/' ) )
def getConfigDict ( self ):
return self.config
def getConfig ( self, key, default = None ):
return self.config.get ( key, default )
def setConfig ( self, key, value ):
self.config[ key ] = value
self.saveConfig ()
def getAssetLibrary ( self ):
return self.assetLibrary
def getCacheManager ( self ):
return self.cacheManager
def generateID ( self ):
userID = 1
index = self.globalIndex
self.globalIndex += 1
return '%d:%d'% ( userID, index )
Project ()
##----------------------------------------------------------------##
class DeployContext ():
_ignoreFilePattern = [
'\.git',
'\.assetmeta',
'^\..*',
]
def __init__ ( self, path ):
self.taskQueue = []
self.path = path
self.assetPath = path + '/asset'
self.fileMapping = {}
self.meta = {}
self.startTime = time.time ()
def cleanPath ( self ):
logging.info ( 'removing output path: %s' % self.path )
# if os.path.isdir ( self.path ):
# shutil.rmtree ( self.path )
_affirmPath ( self.path )
_affirmPath ( self.assetPath )
def ignorePattern ( self ):
return DeployContext._ignoreFilePattern
def getAssetPath ( self, path = None ):
return _makePath ( self.assetPath, path )
def getPath ( self, path = None ):
return _makePath ( self.path, path )
# def getAbsPath ( self, path = None):
def addTask ( self, stage, func, *args ):
task = ( func, args )
self.taskQueue.append ( task )
def _copyFile ( self, src, dst ):
if os.path.isdir ( src ): #dir
if not os.path.exists ( dst ): os.mkdir ( dst )
if os.path.isdir ( dst ):
for f in os.listdir ( src ):
if self.checkFileIgnorable ( f ): continue
self._copyFile ( src + '/' + f, dst + '/' + f )
else: #file
if not os.path.exists ( dst )\
or ( os.path.getmtime ( src ) > os.path.getmtime ( dst ) ):
shutil.copy ( src, dst )
def isNewFile ( self, absPath ):
return int ( os.path.getmtime ( absPath ) ) >= int (self.startTime)
def copyFile ( self, srcPath, dstPath = None, **option ):
if not dstPath:
dstPath = os.path.basename ( srcPath )
absDstPath = self.getPath ( dstPath )
self._copyFile ( srcPath, absDstPath )
def copyFilesInDir ( self, srcDir, dstDir = None ):
if not os.path.isdir ( srcDir ):
raise Exception ( 'Directory expected' )
for fname in os.listdir ( srcDir ):
if self.checkFileIgnorable ( fname ): continue
fpath = srcDir + '/' + fname
self.copyFile ( fpath )
def hasFile ( self, srcPath ):
return srcPath in self.fileMapping
def addFile ( self, srcPath, dstPath = None, **option ):
newPath = self.fileMapping.get ( srcPath, None )
if newPath:
if dstPath and dstPath != newPath:
logging.warn ( 'attempt to deploy a file with different names' )
if not option.get ( 'force', False ): return newPath
#mapping
if not dstPath:
dstPath = 'asset/' + _hashPath ( srcPath )
self.fileMapping[ srcPath ] = dstPath
#copy
self.copyFile ( srcPath, dstPath )
return dstPath
def getFile ( self, srcPath ):
return self.fileMapping.get ( srcPath, None )
def getAbsFile ( self, srcPath ):
return self.getPath ( self.getFile ( srcPath ) )
def replaceInFile ( self, srcFile, strFrom, strTo ):
try:
fp = open ( srcFile, 'r' )
data = fp.read ()
fp.close ()
data = data.replace ( strFrom, strTo )
fp = open ( srcFile, 'w' )
fp.write ( data )
fp.close ()
except Exception as e:
logging.exception ( e )
def flushTask ( self ):
q = self.taskQueue
self.taskQueue = []
for t in q:
func, args = t
func ( *args )
def checkFileIgnorable (self, name):
for pattern in DeployContext._ignoreFilePattern:
if re.match (pattern, name):
return True
return False
| [
"lihaochen910@hotmail.com"
] | lihaochen910@hotmail.com |
e3ff521d76ece0d845ff31b48a19153d060fb44a | 9701081e2d5fd6b3844c85ad8a99a75d01083479 | /AutoPano/Phase2/Code/Test.py | 858a84164ef0991afd0ff4b4c40a68551217e3b2 | [
"MIT"
] | permissive | jsistla/ComputerVision-CMSC733 | b1f6f219a97e2f456cb87c593cfae6d0b6d34632 | f5fa21a0ada8ab8ea08a6c558f6df9676570a2df | refs/heads/master | 2020-08-13T03:24:45.882391 | 2019-05-26T06:38:12 | 2019-05-26T06:38:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,419 | py | #!/usr/bin/env python
"""
CMSC733 Spring 2019: Classical and Deep Learning Approaches for
Geometric Computer Vision
Homework 0: Alohomora: Phase 2 Starter Code
Author(s):
Abhishek Kathpal
M.Eng. Robotics
University of Maryland, College Park
"""
# Dependencies:
# opencv, do (pip install opencv-python)
# skimage, do (apt install python-skimage)
import tensorflow as tf
import cv2
import os
import sys
import glob
import random
from skimage import data, exposure, img_as_float
import matplotlib.pyplot as plt
from Network.Network import Supervised_HomographyModel
from Misc.MiscUtils import *
import numpy as np
import time
import argparse
import shutil
from StringIO import StringIO
import string
import math as m
from tqdm import tqdm
from Misc.TFSpatialTransformer import *
# Don't generate pyc codes
sys.dont_write_bytecode = True
def image_data(BasePath):
RandIdx = random.randint(1, 1000)
# RandIdx = 1
RandImageName = BasePath + str(RandIdx) + '.jpg'
patchSize = 128
r = 16
img_orig = plt.imread(RandImageName)
if(len(img_orig.shape)==3):
img = cv2.cvtColor(img_orig,cv2.COLOR_RGB2GRAY)
else:
img = img_orig
img=(img-np.mean(img))/255
if(img.shape[1]-r-patchSize)>r+1 and (img.shape[0]-r-patchSize)>r+1:
x = np.random.randint(r, img.shape[1]-r-patchSize)
y = np.random.randint(r, img.shape[0]-r-patchSize)
# print(x)
p1 = (x,y)
p2 = (patchSize+x, y)
p3 = (patchSize+x, patchSize+y)
p4 = (x, patchSize+y)
src = [p1, p2, p3, p4]
src = np.array(src)
dst = []
for pt in src:
dst.append((pt[0]+np.random.randint(-r, r), pt[1]+np.random.randint(-r, r)))
H = cv2.getPerspectiveTransform(np.float32(src), np.float32(dst))
H_inv = np.linalg.inv(H)
warpImg = cv2.warpPerspective(img, H_inv, (img.shape[1],img.shape[0]))
patch1 = img[y:y + patchSize, x:x + patchSize]
patch2 = warpImg[y:y + patchSize, x:x + patchSize]
imgData = np.dstack((patch1, patch2))
hData = np.subtract(np.array(dst), np.array(src))
return imgData,hData,np.array(src),np.array(dst),img_orig
def TestOperation(ImgPH, ImageSize, ModelPath, BasePath):
"""
Inputs:
ImgPH is the Input Image placeholder
ImageSize is the size of the image
ModelPath - Path to load trained model from
DataPath - Paths of all images where testing will be run on
LabelsPathPred - Path to save predictions
Outputs:
Predictions written to ./TxtFiles/PredOut.txt
"""
imgData,hData,src,dst,img = image_data(BasePath)
H4pt = Supervised_HomographyModel(ImgPH, ImageSize, 1)
Saver = tf.train.Saver()
with tf.Session() as sess:
Saver.restore(sess, ModelPath)
print('Number of parameters in this model are %d ' % np.sum([np.prod(v.get_shape().as_list()) for v in tf.trainable_variables()]))
imgData=np.array(imgData).reshape(1,128,128,2)
FeedDict = {ImgPH: imgData}
Predicted = sess.run(H4pt,FeedDict)
src_new=src+Predicted.reshape(4,2)
H4pt_new=dst-src_new
cv2.polylines(img,np.int32([src]),True,(0,255,0), 3)
cv2.polylines(img,np.int32([dst]),True,(255,0,0), 5)
cv2.polylines(img,np.int32([src_new]),True,(0,0,255), 5)
plt.figure()
plt.imshow(img)
plt.show()
cv2.imwrite('Final_Output'+'.png',img)
def main():
"""
Inputs:
None
Outputs:
Prints out the confusion matrix with accuracy
"""
# Parse Command Line arguments
Parser = argparse.ArgumentParser()
Parser.add_argument('--ModelPath', dest='ModelPath', default='../Checkpoints/49a0model.ckpt', help='Path to load latest model from, Default:ModelPath')
Parser.add_argument('--BasePath', dest='BasePath', default='../Data/Val/', help='Path to load images from, Default:BasePath')
Args = Parser.parse_args()
ModelPath = Args.ModelPath
BasePath = Args.BasePath
ImageSize = [1,128,128,2]
# Define PlaceHolder variables for Input and Predicted output
ImgPH=tf.placeholder(tf.float32, shape=(1, 128, 128, 2))
TestOperation(ImgPH, ImageSize, ModelPath, BasePath)
if __name__ == '__main__':
main()
| [
"akathpal@umd.edu"
] | akathpal@umd.edu |
a915f7f5aac8290e569c4536650cbcb728edb52d | 5d1c178763908decbcd2d63481e3b240e4ab3763 | /build/iiwa_moveit/catkin_generated/pkg.installspace.context.pc.py | 9c57060b9bae0749f8b4fbdf4862ee4a39946335 | [] | no_license | ichbindunst/ros3djs_tutorial | 0f27127e00574411babc8127db5f57c542045db8 | f840e93445ffbc7228956ce5db1fbe706224dc59 | refs/heads/master | 2023-03-31T19:51:29.409983 | 2021-04-01T17:54:43 | 2021-04-01T17:54:43 | 353,783,567 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 379 | py | # generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "".split(';') if "" != "" else []
PROJECT_CATKIN_DEPENDS = "".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "iiwa_moveit"
PROJECT_SPACE_DIR = "/home/yao/ros3djs_tutorial/install"
PROJECT_VERSION = "1.4.0"
| [
"leo.li.3821@gmail.com"
] | leo.li.3821@gmail.com |
dae0c7efd07d618c0200e208199e6b93461f2de2 | 8afae72db835f4b886d1d2e7368528d98355bb07 | /Knn.py | 3d964965438bc187d7dcfb97a8487c8a1ce063fb | [] | no_license | asd14277/Android | 62d2c8981c476176b279c31ab699be98c0f76827 | a463d627bd44cc5c6afcf84485dbf1081f2acd3f | refs/heads/master | 2022-02-09T17:01:59.610468 | 2019-08-08T06:06:55 | 2019-08-08T06:06:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 59,523 | py | import pandas as pd
from sklearn import neighbors
#import numpy as np
from distutils.version import LooseVersion as Version
from sklearn import __version__ as sklearn_version
if Version(sklearn_version) < '0.18':
from sklearn.cross_validation import train_test_split
else:
from sklearn.model_selection import train_test_split
from sklearn.feature_extraction import DictVectorizer
from sklearn import metrics
#from matplotlib.colors import ListedColormap
import time
start = time.clock()
#np.random.seed(0)#设置随机数种子
data = pd.read_csv(open('E:\\Smalis\\dataset\\3-gram.csv'))
#attack_test = pd.read_csv('E:\\Smalis\\dataset\\3-gram-test.csv')
#4
#X=data[['Name', 'Class', 'Lenth', 'Sequence_0', 'Ratio_0', 'Sequence_1', 'Ratio_1', 'Sequence_2', 'Ratio_2', 'Sequence_3', 'Ratio_3', 'Sequence_4', 'Ratio_4', 'Sequence_5', 'Ratio_5', 'Sequence_6', 'Ratio_6', 'Sequence_7', 'Ratio_7', 'Sequence_8', 'Ratio_8', 'Sequence_9', 'Ratio_9', 'Sequence_10', 'Ratio_10', 'Sequence_11', 'Ratio_11', 'Sequence_12', 'Ratio_12', 'Sequence_13', 'Ratio_13', 'Sequence_14', 'Ratio_14', 'Sequence_15', 'Ratio_15', 'Sequence_16', 'Ratio_16', 'Sequence_17', 'Ratio_17', 'Sequence_18', 'Ratio_18', 'Sequence_19', 'Ratio_19', 'Sequence_20', 'Ratio_20', 'Sequence_21', 'Ratio_21', 'Sequence_22', 'Ratio_22', 'Sequence_23', 'Ratio_23', 'Sequence_24', 'Ratio_24', 'Sequence_25', 'Ratio_25', 'Sequence_26', 'Ratio_26', 'Sequence_27', 'Ratio_27', 'Sequence_28', 'Ratio_28', 'Sequence_29', 'Ratio_29', 'Sequence_30', 'Ratio_30', 'Sequence_31', 'Ratio_31', 'Sequence_32', 'Ratio_32', 'Sequence_33', 'Ratio_33', 'Sequence_34', 'Ratio_34', 'Sequence_35', 'Ratio_35', 'Sequence_36', 'Ratio_36', 'Sequence_37', 'Ratio_37', 'Sequence_38', 'Ratio_38', 'Sequence_39', 'Ratio_39', 'Sequence_40', 'Ratio_40', 'Sequence_41', 'Ratio_41', 'Sequence_42', 'Ratio_42', 'Sequence_43', 'Ratio_43', 'Sequence_44', 'Ratio_44', 'Sequence_45', 'Ratio_45', 'Sequence_46', 'Ratio_46', 'Sequence_47', 'Ratio_47', 'Sequence_48', 'Ratio_48', 'Sequence_49', 'Ratio_49', 'Sequence_50', 'Ratio_50', 'Sequence_51', 'Ratio_51', 'Sequence_52', 'Ratio_52', 'Sequence_53', 'Ratio_53', 'Sequence_54', 'Ratio_54', 'Sequence_55', 'Ratio_55', 'Sequence_56', 'Ratio_56', 'Sequence_57', 'Ratio_57', 'Sequence_58', 'Ratio_58', 'Sequence_59', 'Ratio_59', 'Sequence_60', 'Ratio_60', 'Sequence_61', 'Ratio_61', 'Sequence_62', 'Ratio_62', 'Sequence_63', 'Ratio_63', 'Sequence_64', 'Ratio_64', 'Sequence_65', 'Ratio_65', 'Sequence_66', 'Ratio_66', 'Sequence_67', 'Ratio_67', 'Sequence_68', 'Ratio_68', 'Sequence_69', 'Ratio_69', 'Sequence_70', 'Ratio_70', 'Sequence_71', 'Ratio_71', 'Sequence_72', 'Ratio_72', 'Sequence_73', 'Ratio_73', 'Sequence_74', 'Ratio_74', 'Sequence_75', 'Ratio_75', 'Sequence_76', 'Ratio_76', 'Sequence_77', 'Ratio_77', 'Sequence_78', 'Ratio_78', 'Sequence_79', 'Ratio_79', 'Sequence_80', 'Ratio_80', 'Sequence_81', 'Ratio_81', 'Sequence_82', 'Ratio_82', 'Sequence_83', 'Ratio_83', 'Sequence_84', 'Ratio_84', 'Sequence_85', 'Ratio_85', 'Sequence_86', 'Ratio_86', 'Sequence_87', 'Ratio_87', 'Sequence_88', 'Ratio_88', 'Sequence_89', 'Ratio_89', 'Sequence_90', 'Ratio_90', 'Sequence_91', 'Ratio_91', 'Sequence_92', 'Ratio_92', 'Sequence_93', 'Ratio_93', 'Sequence_94', 'Ratio_94', 'Sequence_95', 'Ratio_95', 'Sequence_96', 'Ratio_96', 'Sequence_97', 'Ratio_97', 'Sequence_98', 'Ratio_98', 'Sequence_99', 'Ratio_99', 'Sequence_100', 'Ratio_100', 'Sequence_101', 'Ratio_101', 'Sequence_102', 'Ratio_102', 'Sequence_103', 'Ratio_103', 'Sequence_104', 'Ratio_104', 'Sequence_105', 'Ratio_105', 'Sequence_106', 'Ratio_106', 'Sequence_107', 'Ratio_107', 'Sequence_108', 'Ratio_108', 'Sequence_109', 'Ratio_109', 'Sequence_110', 'Ratio_110', 'Sequence_111', 'Ratio_111', 'Sequence_112', 'Ratio_112', 'Sequence_113', 'Ratio_113', 'Sequence_114', 'Ratio_114', 'Sequence_115', 'Ratio_115', 'Sequence_116', 'Ratio_116', 'Sequence_117', 'Ratio_117', 'Sequence_118', 'Ratio_118', 'Sequence_119', 'Ratio_119', 'Sequence_120', 'Ratio_120', 'Sequence_121', 'Ratio_121', 'Sequence_122', 'Ratio_122', 'Sequence_123', 'Ratio_123', 'Sequence_124', 'Ratio_124', 'Sequence_125', 'Ratio_125', 'Sequence_126', 'Ratio_126', 'Sequence_127', 'Ratio_127', 'Sequence_128', 'Ratio_128', 'Sequence_129', 'Ratio_129', 'Sequence_130', 'Ratio_130', 'Sequence_131', 'Ratio_131', 'Sequence_132', 'Ratio_132', 'Sequence_133', 'Ratio_133', 'Sequence_134', 'Ratio_134', 'Sequence_135', 'Ratio_135', 'Sequence_136', 'Ratio_136', 'Sequence_137', 'Ratio_137', 'Sequence_138', 'Ratio_138', 'Sequence_139', 'Ratio_139', 'Sequence_140', 'Ratio_140', 'Sequence_141', 'Ratio_141', 'Sequence_142', 'Ratio_142', 'Sequence_143', 'Ratio_143', 'Sequence_144', 'Ratio_144', 'Sequence_145', 'Ratio_145', 'Sequence_146', 'Ratio_146', 'Sequence_147', 'Ratio_147', 'Sequence_148', 'Ratio_148', 'Sequence_149', 'Ratio_149', 'Sequence_150', 'Ratio_150', 'Sequence_151', 'Ratio_151', 'Sequence_152', 'Ratio_152', 'Sequence_153', 'Ratio_153', 'Sequence_154', 'Ratio_154', 'Sequence_155', 'Ratio_155', 'Sequence_156', 'Ratio_156', 'Sequence_157', 'Ratio_157', 'Sequence_158', 'Ratio_158', 'Sequence_159', 'Ratio_159', 'Sequence_160', 'Ratio_160', 'Sequence_161', 'Ratio_161', 'Sequence_162', 'Ratio_162', 'Sequence_163', 'Ratio_163', 'Sequence_164', 'Ratio_164', 'Sequence_165', 'Ratio_165', 'Sequence_166', 'Ratio_166', 'Sequence_167', 'Ratio_167', 'Sequence_168', 'Ratio_168', 'Sequence_169', 'Ratio_169', 'Sequence_170', 'Ratio_170', 'Sequence_171', 'Ratio_171', 'Sequence_172', 'Ratio_172', 'Sequence_173', 'Ratio_173', 'Sequence_174', 'Ratio_174', 'Sequence_175', 'Ratio_175', 'Sequence_176', 'Ratio_176', 'Sequence_177', 'Ratio_177', 'Sequence_178', 'Ratio_178', 'Sequence_179', 'Ratio_179', 'Sequence_180', 'Ratio_180', 'Sequence_181', 'Ratio_181', 'Sequence_182', 'Ratio_182', 'Sequence_183', 'Ratio_183', 'Sequence_184', 'Ratio_184', 'Sequence_185', 'Ratio_185', 'Sequence_186', 'Ratio_186', 'Sequence_187', 'Ratio_187', 'Sequence_188', 'Ratio_188', 'Sequence_189', 'Ratio_189', 'Sequence_190', 'Ratio_190', 'Sequence_191', 'Ratio_191', 'Sequence_192', 'Ratio_192', 'Sequence_193', 'Ratio_193', 'Sequence_194', 'Ratio_194', 'Sequence_195', 'Ratio_195', 'Sequence_196', 'Ratio_196', 'Sequence_197', 'Ratio_197', 'Sequence_198', 'Ratio_198', 'Sequence_199', 'Ratio_199', 'Sequence_200', 'Ratio_200', 'Sequence_201', 'Ratio_201', 'Sequence_202', 'Ratio_202', 'Sequence_203', 'Ratio_203', 'Sequence_204', 'Ratio_204', 'Sequence_205', 'Ratio_205', 'Sequence_206', 'Ratio_206', 'Sequence_207', 'Ratio_207', 'Sequence_208', 'Ratio_208', 'Sequence_209', 'Ratio_209', 'Sequence_210', 'Ratio_210', 'Sequence_211', 'Ratio_211', 'Sequence_212', 'Ratio_212', 'Sequence_213', 'Ratio_213', 'Sequence_214', 'Ratio_214', 'Sequence_215', 'Ratio_215', 'Sequence_216', 'Ratio_216', 'Sequence_217', 'Ratio_217', 'Sequence_218', 'Ratio_218', 'Sequence_219', 'Ratio_219', 'Sequence_220', 'Ratio_220', 'Sequence_221', 'Ratio_221', 'Sequence_222', 'Ratio_222', 'Sequence_223', 'Ratio_223', 'Sequence_224', 'Ratio_224', 'Sequence_225', 'Ratio_225', 'Sequence_226', 'Ratio_226', 'Sequence_227', 'Ratio_227', 'Sequence_228', 'Ratio_228', 'Sequence_229', 'Ratio_229', 'Sequence_230', 'Ratio_230', 'Sequence_231', 'Ratio_231', 'Sequence_232', 'Ratio_232', 'Sequence_233', 'Ratio_233', 'Sequence_234', 'Ratio_234', 'Sequence_235', 'Ratio_235', 'Sequence_236', 'Ratio_236', 'Sequence_237', 'Ratio_237', 'Sequence_238', 'Ratio_238', 'Sequence_239', 'Ratio_239', 'Sequence_240', 'Ratio_240', 'Sequence_241', 'Ratio_241', 'Sequence_242', 'Ratio_242', 'Sequence_243', 'Ratio_243', 'Sequence_244', 'Ratio_244', 'Sequence_245', 'Ratio_245', 'Sequence_246', 'Ratio_246', 'Sequence_247', 'Ratio_247', 'Sequence_248', 'Ratio_248', 'Sequence_249', 'Ratio_249', 'Sequence_250', 'Ratio_250', 'Sequence_251', 'Ratio_251', 'Sequence_252', 'Ratio_252', 'Sequence_253', 'Ratio_253', 'Sequence_254', 'Ratio_254', 'Sequence_255', 'Ratio_255', 'Sequence_256', 'Ratio_256', 'Sequence_257', 'Ratio_257', 'Sequence_258', 'Ratio_258', 'Sequence_259', 'Ratio_259', 'Sequence_260', 'Ratio_260', 'Sequence_261', 'Ratio_261', 'Sequence_262', 'Ratio_262', 'Sequence_263', 'Ratio_263', 'Sequence_264', 'Ratio_264', 'Sequence_265', 'Ratio_265', 'Sequence_266', 'Ratio_266', 'Sequence_267', 'Ratio_267', 'Sequence_268', 'Ratio_268', 'Sequence_269', 'Ratio_269', 'Sequence_270', 'Ratio_270', 'Sequence_271', 'Ratio_271', 'Sequence_272', 'Ratio_272', 'Sequence_273', 'Ratio_273', 'Sequence_274', 'Ratio_274', 'Sequence_275', 'Ratio_275', 'Sequence_276', 'Ratio_276', 'Sequence_277', 'Ratio_277', 'Sequence_278', 'Ratio_278', 'Sequence_279', 'Ratio_279', 'Sequence_280', 'Ratio_280', 'Sequence_281', 'Ratio_281', 'Sequence_282', 'Ratio_282', 'Sequence_283', 'Ratio_283', 'Sequence_284', 'Ratio_284', 'Sequence_285', 'Ratio_285', 'Sequence_286', 'Ratio_286', 'Sequence_287', 'Ratio_287', 'Sequence_288', 'Ratio_288', 'Sequence_289', 'Ratio_289', 'Sequence_290', 'Ratio_290', 'Sequence_291', 'Ratio_291', 'Sequence_292', 'Ratio_292', 'Sequence_293', 'Ratio_293', 'Sequence_294', 'Ratio_294', 'Sequence_295', 'Ratio_295', 'Sequence_296', 'Ratio_296', 'Sequence_297', 'Ratio_297', 'Sequence_298', 'Ratio_298', 'Sequence_299', 'Ratio_299', 'Sequence_300', 'Ratio_300', 'Sequence_301', 'Ratio_301', 'Sequence_302', 'Ratio_302', 'Sequence_303', 'Ratio_303', 'Sequence_304', 'Ratio_304', 'Sequence_305', 'Ratio_305', 'Sequence_306', 'Ratio_306', 'Sequence_307', 'Ratio_307', 'Sequence_308', 'Ratio_308', 'Sequence_309', 'Ratio_309', 'Sequence_310', 'Ratio_310', 'Sequence_311', 'Ratio_311', 'Sequence_312', 'Ratio_312', 'Sequence_313', 'Ratio_313', 'Sequence_314', 'Ratio_314', 'Sequence_315', 'Ratio_315', 'Sequence_316', 'Ratio_316', 'Sequence_317', 'Ratio_317', 'Sequence_318', 'Ratio_318', 'Sequence_319', 'Ratio_319', 'Sequence_320', 'Ratio_320', 'Sequence_321', 'Ratio_321', 'Sequence_322', 'Ratio_322', 'Sequence_323', 'Ratio_323', 'Sequence_324', 'Ratio_324', 'Sequence_325', 'Ratio_325', 'Sequence_326', 'Ratio_326', 'Sequence_327', 'Ratio_327', 'Sequence_328', 'Ratio_328', 'Sequence_329', 'Ratio_329', 'Sequence_330', 'Ratio_330', 'Sequence_331', 'Ratio_331', 'Sequence_332', 'Ratio_332', 'Sequence_333', 'Ratio_333', 'Sequence_334', 'Ratio_334', 'Sequence_335', 'Ratio_335', 'Sequence_336', 'Ratio_336', 'Sequence_337', 'Ratio_337', 'Sequence_338', 'Ratio_338', 'Sequence_339', 'Ratio_339', 'Sequence_340', 'Ratio_340', 'Sequence_341', 'Ratio_341', 'Sequence_342', 'Ratio_342', 'Sequence_343', 'Ratio_343', 'Sequence_344', 'Ratio_344', 'Sequence_345', 'Ratio_345', 'Sequence_346', 'Ratio_346', 'Sequence_347', 'Ratio_347', 'Sequence_348', 'Ratio_348', 'Sequence_349', 'Ratio_349', 'Sequence_350', 'Ratio_350', 'Sequence_351', 'Ratio_351', 'Sequence_352', 'Ratio_352', 'Sequence_353', 'Ratio_353', 'Sequence_354', 'Ratio_354', 'Sequence_355', 'Ratio_355', 'Sequence_356', 'Ratio_356', 'Sequence_357', 'Ratio_357', 'Sequence_358', 'Ratio_358', 'Sequence_359', 'Ratio_359', 'Sequence_360', 'Ratio_360', 'Sequence_361', 'Ratio_361', 'Sequence_362', 'Ratio_362', 'Sequence_363', 'Ratio_363', 'Sequence_364', 'Ratio_364', 'Sequence_365', 'Ratio_365', 'Sequence_366', 'Ratio_366', 'Sequence_367', 'Ratio_367', 'Sequence_368', 'Ratio_368', 'Sequence_369', 'Ratio_369', 'Sequence_370', 'Ratio_370', 'Sequence_371', 'Ratio_371', 'Sequence_372', 'Ratio_372', 'Sequence_373', 'Ratio_373', 'Sequence_374', 'Ratio_374', 'Sequence_375', 'Ratio_375', 'Sequence_376', 'Ratio_376', 'Sequence_377', 'Ratio_377', 'Sequence_378', 'Ratio_378', 'Sequence_379', 'Ratio_379', 'Sequence_380', 'Ratio_380', 'Sequence_381', 'Ratio_381', 'Sequence_382', 'Ratio_382', 'Sequence_383', 'Ratio_383', 'Sequence_384', 'Ratio_384', 'Sequence_385', 'Ratio_385', 'Sequence_386', 'Ratio_386', 'Sequence_387', 'Ratio_387', 'Sequence_388', 'Ratio_388', 'Sequence_389', 'Ratio_389', 'Sequence_390', 'Ratio_390', 'Sequence_391', 'Ratio_391', 'Sequence_392', 'Ratio_392', 'Sequence_393', 'Ratio_393', 'Sequence_394', 'Ratio_394', 'Sequence_395', 'Ratio_395', 'Sequence_396', 'Ratio_396', 'Sequence_397', 'Ratio_397', 'Sequence_398', 'Ratio_398', 'Sequence_399', 'Ratio_399', 'Sequence_400', 'Ratio_400', 'Sequence_401', 'Ratio_401', 'Sequence_402', 'Ratio_402', 'Sequence_403', 'Ratio_403', 'Sequence_404', 'Ratio_404', 'Sequence_405', 'Ratio_405', 'Sequence_406', 'Ratio_406', 'Sequence_407', 'Ratio_407', 'Sequence_408', 'Ratio_408', 'Sequence_409', 'Ratio_409', 'Sequence_410', 'Ratio_410', 'Sequence_411', 'Ratio_411', 'Sequence_412', 'Ratio_412', 'Sequence_413', 'Ratio_413', 'Sequence_414', 'Ratio_414', 'Sequence_415', 'Ratio_415', 'Sequence_416', 'Ratio_416', 'Sequence_417', 'Ratio_417', 'Sequence_418', 'Ratio_418', 'Sequence_419', 'Ratio_419', 'Sequence_420', 'Ratio_420', 'Sequence_421', 'Ratio_421', 'Sequence_422', 'Ratio_422', 'Sequence_423', 'Ratio_423', 'Sequence_424', 'Ratio_424', 'Sequence_425', 'Ratio_425', 'Sequence_426', 'Ratio_426', 'Sequence_427', 'Ratio_427', 'Sequence_428', 'Ratio_428', 'Sequence_429', 'Ratio_429', 'Sequence_430', 'Ratio_430', 'Sequence_431', 'Ratio_431', 'Sequence_432', 'Ratio_432', 'Sequence_433', 'Ratio_433', 'Sequence_434', 'Ratio_434', 'Sequence_435', 'Ratio_435', 'Sequence_436', 'Ratio_436', 'Sequence_437', 'Ratio_437', 'Sequence_438', 'Ratio_438', 'Sequence_439', 'Ratio_439', 'Sequence_440', 'Ratio_440', 'Sequence_441', 'Ratio_441', 'Sequence_442', 'Ratio_442', 'Sequence_443', 'Ratio_443', 'Sequence_444', 'Ratio_444', 'Sequence_445', 'Ratio_445', 'Sequence_446', 'Ratio_446', 'Sequence_447', 'Ratio_447', 'Sequence_448', 'Ratio_448', 'Sequence_449', 'Ratio_449', 'Sequence_450', 'Ratio_450', 'Sequence_451', 'Ratio_451', 'Sequence_452', 'Ratio_452', 'Sequence_453', 'Ratio_453', 'Sequence_454', 'Ratio_454', 'Sequence_455', 'Ratio_455', 'Sequence_456', 'Ratio_456', 'Sequence_457', 'Ratio_457', 'Sequence_458', 'Ratio_458', 'Sequence_459', 'Ratio_459', 'Sequence_460', 'Ratio_460', 'Sequence_461', 'Ratio_461', 'Sequence_462', 'Ratio_462', 'Sequence_463', 'Ratio_463', 'Sequence_464', 'Ratio_464', 'Sequence_465', 'Ratio_465', 'Sequence_466', 'Ratio_466', 'Sequence_467', 'Ratio_467', 'Sequence_468', 'Ratio_468', 'Sequence_469', 'Ratio_469', 'Sequence_470', 'Ratio_470', 'Sequence_471', 'Ratio_471', 'Sequence_472', 'Ratio_472', 'Sequence_473', 'Ratio_473', 'Sequence_474', 'Ratio_474', 'Sequence_475', 'Ratio_475', 'Sequence_476', 'Ratio_476', 'Sequence_477', 'Ratio_477', 'Sequence_478', 'Ratio_478', 'Sequence_479', 'Ratio_479', 'Sequence_480', 'Ratio_480', 'Sequence_481', 'Ratio_481', 'Sequence_482', 'Ratio_482', 'Sequence_483', 'Ratio_483', 'Sequence_484', 'Ratio_484', 'Sequence_485', 'Ratio_485', 'Sequence_486', 'Ratio_486', 'Sequence_487', 'Ratio_487', 'Sequence_488', 'Ratio_488', 'Sequence_489', 'Ratio_489', 'Sequence_490', 'Ratio_490', 'Sequence_491', 'Ratio_491', 'Sequence_492', 'Ratio_492', 'Sequence_493', 'Ratio_493', 'Sequence_494', 'Ratio_494', 'Sequence_495', 'Ratio_495', 'Sequence_496', 'Ratio_496', 'Sequence_497', 'Ratio_497', 'Sequence_498', 'Ratio_498', 'Sequence_499', 'Ratio_499', 'Sequence_500', 'Ratio_500', 'Sequence_501', 'Ratio_501', 'Sequence_502', 'Ratio_502', 'Sequence_503', 'Ratio_503', 'Sequence_504', 'Ratio_504', 'Sequence_505', 'Ratio_505', 'Sequence_506', 'Ratio_506', 'Sequence_507', 'Ratio_507', 'Sequence_508', 'Ratio_508', 'Sequence_509', 'Ratio_509', 'Sequence_510', 'Ratio_510', 'Sequence_511', 'Ratio_511', 'Sequence_512', 'Ratio_512', 'Sequence_513', 'Ratio_513', 'Sequence_514', 'Ratio_514', 'Sequence_515', 'Ratio_515', 'Sequence_516', 'Ratio_516', 'Sequence_517', 'Ratio_517', 'Sequence_518', 'Ratio_518', 'Sequence_519', 'Ratio_519', 'Sequence_520', 'Ratio_520', 'Sequence_521', 'Ratio_521', 'Sequence_522', 'Ratio_522', 'Sequence_523', 'Ratio_523', 'Sequence_524', 'Ratio_524', 'Sequence_525', 'Ratio_525', 'Sequence_526', 'Ratio_526', 'Sequence_527', 'Ratio_527', 'Sequence_528', 'Ratio_528', 'Sequence_529', 'Ratio_529', 'Sequence_530', 'Ratio_530', 'Sequence_531', 'Ratio_531', 'Sequence_532', 'Ratio_532', 'Sequence_533', 'Ratio_533', 'Sequence_534', 'Ratio_534', 'Sequence_535', 'Ratio_535', 'Sequence_536', 'Ratio_536', 'Sequence_537', 'Ratio_537', 'Sequence_538', 'Ratio_538', 'Sequence_539', 'Ratio_539', 'Sequence_540', 'Ratio_540', 'Sequence_541', 'Ratio_541', 'Sequence_542', 'Ratio_542', 'Sequence_543', 'Ratio_543', 'Sequence_544', 'Ratio_544', 'Sequence_545', 'Ratio_545', 'Sequence_546', 'Ratio_546', 'Sequence_547', 'Ratio_547', 'Sequence_548', 'Ratio_548', 'Sequence_549', 'Ratio_549', 'Sequence_550', 'Ratio_550', 'Sequence_551', 'Ratio_551', 'Sequence_552', 'Ratio_552', 'Sequence_553', 'Ratio_553', 'Sequence_554', 'Ratio_554', 'Sequence_555', 'Ratio_555', 'Sequence_556', 'Ratio_556', 'Sequence_557', 'Ratio_557', 'Sequence_558', 'Ratio_558', 'Sequence_559', 'Ratio_559', 'Sequence_560', 'Ratio_560', 'Sequence_561', 'Ratio_561', 'Sequence_562', 'Ratio_562', 'Sequence_563', 'Ratio_563', 'Sequence_564', 'Ratio_564', 'Sequence_565', 'Ratio_565', 'Sequence_566', 'Ratio_566', 'Sequence_567', 'Ratio_567', 'Sequence_568', 'Ratio_568', 'Sequence_569', 'Ratio_569', 'Sequence_570', 'Ratio_570', 'Sequence_571', 'Ratio_571', 'Sequence_572', 'Ratio_572', 'Sequence_573', 'Ratio_573', 'Sequence_574', 'Ratio_574', 'Sequence_575', 'Ratio_575', 'Sequence_576', 'Ratio_576', 'Sequence_577', 'Ratio_577', 'Sequence_578', 'Ratio_578', 'Sequence_579', 'Ratio_579', 'Sequence_580', 'Ratio_580', 'Sequence_581', 'Ratio_581', 'Sequence_582', 'Ratio_582', 'Sequence_583', 'Ratio_583', 'Sequence_584', 'Ratio_584', 'Sequence_585', 'Ratio_585', 'Sequence_586', 'Ratio_586', 'Sequence_587', 'Ratio_587', 'Sequence_588', 'Ratio_588', 'Sequence_589', 'Ratio_589', 'Sequence_590', 'Ratio_590', 'Sequence_591', 'Ratio_591', 'Sequence_592', 'Ratio_592', 'Sequence_593', 'Ratio_593', 'Sequence_594', 'Ratio_594', 'Sequence_595', 'Ratio_595', 'Sequence_596', 'Ratio_596', 'Sequence_597', 'Ratio_597', 'Sequence_598', 'Ratio_598', 'Sequence_599', 'Ratio_599', 'Sequence_600', 'Ratio_600', 'Sequence_601', 'Ratio_601', 'Sequence_602', 'Ratio_602', 'Sequence_603', 'Ratio_603', 'Sequence_604', 'Ratio_604', 'Sequence_605', 'Ratio_605', 'Sequence_606', 'Ratio_606', 'Sequence_607', 'Ratio_607', 'Sequence_608', 'Ratio_608', 'Sequence_609', 'Ratio_609', 'Sequence_610', 'Ratio_610', 'Sequence_611', 'Ratio_611', 'Sequence_612', 'Ratio_612', 'Sequence_613', 'Ratio_613', 'Sequence_614', 'Ratio_614', 'Sequence_615', 'Ratio_615', 'Sequence_616', 'Ratio_616', 'Sequence_617', 'Ratio_617', 'Sequence_618', 'Ratio_618', 'Sequence_619', 'Ratio_619', 'Sequence_620', 'Ratio_620', 'Sequence_621', 'Ratio_621', 'Sequence_622', 'Ratio_622', 'Sequence_623', 'Ratio_623', 'Sequence_624', 'Ratio_624', 'Sequence_625', 'Ratio_625', 'Sequence_626', 'Ratio_626', 'Sequence_627', 'Ratio_627', 'Sequence_628', 'Ratio_628', 'Sequence_629', 'Ratio_629', 'Sequence_630', 'Ratio_630', 'Sequence_631', 'Ratio_631', 'Sequence_632', 'Ratio_632', 'Sequence_633', 'Ratio_633', 'Sequence_634', 'Ratio_634', 'Sequence_635', 'Ratio_635', 'Sequence_636', 'Ratio_636', 'Sequence_637', 'Ratio_637', 'Sequence_638', 'Ratio_638', 'Sequence_639', 'Ratio_639', 'Sequence_640', 'Ratio_640', 'Sequence_641', 'Ratio_641', 'Sequence_642', 'Ratio_642', 'Sequence_643', 'Ratio_643', 'Sequence_644', 'Ratio_644', 'Sequence_645', 'Ratio_645', 'Sequence_646', 'Ratio_646', 'Sequence_647', 'Ratio_647', 'Sequence_648', 'Ratio_648', 'Sequence_649', 'Ratio_649', 'Sequence_650', 'Ratio_650', 'Sequence_651', 'Ratio_651', 'Sequence_652', 'Ratio_652', 'Sequence_653', 'Ratio_653', 'Sequence_654', 'Ratio_654', 'Sequence_655', 'Ratio_655', 'Sequence_656', 'Ratio_656', 'Sequence_657', 'Ratio_657', 'Sequence_658', 'Ratio_658', 'Sequence_659', 'Ratio_659', 'Sequence_660', 'Ratio_660', 'Sequence_661', 'Ratio_661', 'Sequence_662', 'Ratio_662', 'Sequence_663', 'Ratio_663', 'Sequence_664', 'Ratio_664', 'Sequence_665', 'Ratio_665', 'Sequence_666', 'Ratio_666', 'Sequence_667', 'Ratio_667', 'Sequence_668', 'Ratio_668', 'Sequence_669', 'Ratio_669', 'Sequence_670', 'Ratio_670', 'Sequence_671', 'Ratio_671', 'Sequence_672', 'Ratio_672', 'Sequence_673', 'Ratio_673', 'Sequence_674', 'Ratio_674', 'Sequence_675', 'Ratio_675', 'Sequence_676', 'Ratio_676', 'Sequence_677', 'Ratio_677', 'Sequence_678', 'Ratio_678', 'Sequence_679', 'Ratio_679', 'Sequence_680', 'Ratio_680', 'Sequence_681', 'Ratio_681', 'Sequence_682', 'Ratio_682', 'Sequence_683', 'Ratio_683', 'Sequence_684', 'Ratio_684', 'Sequence_685', 'Ratio_685', 'Sequence_686', 'Ratio_686', 'Sequence_687', 'Ratio_687', 'Sequence_688', 'Ratio_688', 'Sequence_689', 'Ratio_689', 'Sequence_690', 'Ratio_690', 'Sequence_691', 'Ratio_691', 'Sequence_692', 'Ratio_692', 'Sequence_693', 'Ratio_693', 'Sequence_694', 'Ratio_694', 'Sequence_695', 'Ratio_695', 'Sequence_696', 'Ratio_696', 'Sequence_697', 'Ratio_697', 'Sequence_698', 'Ratio_698', 'Sequence_699', 'Ratio_699', 'Sequence_700', 'Ratio_700', 'Sequence_701', 'Ratio_701', 'Sequence_702', 'Ratio_702', 'Sequence_703', 'Ratio_703', 'Sequence_704', 'Ratio_704', 'Sequence_705', 'Ratio_705', 'Sequence_706', 'Ratio_706', 'Sequence_707', 'Ratio_707', 'Sequence_708', 'Ratio_708', 'Sequence_709', 'Ratio_709', 'Sequence_710', 'Ratio_710', 'Sequence_711', 'Ratio_711', 'Sequence_712', 'Ratio_712', 'Sequence_713', 'Ratio_713', 'Sequence_714', 'Ratio_714', 'Sequence_715', 'Ratio_715', 'Sequence_716', 'Ratio_716', 'Sequence_717', 'Ratio_717', 'Sequence_718', 'Ratio_718', 'Sequence_719', 'Ratio_719', 'Sequence_720', 'Ratio_720', 'Sequence_721', 'Ratio_721', 'Sequence_722', 'Ratio_722', 'Sequence_723', 'Ratio_723', 'Sequence_724', 'Ratio_724', 'Sequence_725', 'Ratio_725', 'Sequence_726', 'Ratio_726', 'Sequence_727', 'Ratio_727', 'Sequence_728', 'Ratio_728', 'Sequence_729', 'Ratio_729', 'Sequence_730', 'Ratio_730', 'Sequence_731', 'Ratio_731', 'Sequence_732', 'Ratio_732', 'Sequence_733', 'Ratio_733', 'Sequence_734', 'Ratio_734', 'Sequence_735', 'Ratio_735', 'Sequence_736', 'Ratio_736', 'Sequence_737', 'Ratio_737', 'Sequence_738', 'Ratio_738', 'Sequence_739', 'Ratio_739', 'Sequence_740', 'Ratio_740', 'Sequence_741', 'Ratio_741', 'Sequence_742', 'Ratio_742', 'Sequence_743', 'Ratio_743', 'Sequence_744', 'Ratio_744', 'Sequence_745', 'Ratio_745', 'Sequence_746', 'Ratio_746', 'Sequence_747', 'Ratio_747', 'Sequence_748', 'Ratio_748', 'Sequence_749', 'Ratio_749', 'Sequence_750', 'Ratio_750', 'Sequence_751', 'Ratio_751', 'Sequence_752', 'Ratio_752', 'Sequence_753', 'Ratio_753', 'Sequence_754', 'Ratio_754', 'Sequence_755', 'Ratio_755', 'Sequence_756', 'Ratio_756', 'Sequence_757', 'Ratio_757', 'Sequence_758', 'Ratio_758', 'Sequence_759', 'Ratio_759', 'Sequence_760', 'Ratio_760', 'Sequence_761', 'Ratio_761', 'Sequence_762', 'Ratio_762', 'Sequence_763', 'Ratio_763', 'Sequence_764', 'Ratio_764', 'Sequence_765', 'Ratio_765', 'Sequence_766', 'Ratio_766', 'Sequence_767', 'Ratio_767', 'Sequence_768', 'Ratio_768', 'Sequence_769', 'Ratio_769', 'Sequence_770', 'Ratio_770', 'Sequence_771', 'Ratio_771', 'Sequence_772', 'Ratio_772', 'Sequence_773', 'Ratio_773', 'Sequence_774', 'Ratio_774', 'Sequence_775', 'Ratio_775', 'Sequence_776', 'Ratio_776', 'Sequence_777', 'Ratio_777', 'Sequence_778', 'Ratio_778', 'Sequence_779', 'Ratio_779', 'Sequence_780', 'Ratio_780', 'Sequence_781', 'Ratio_781', 'Sequence_782', 'Ratio_782', 'Sequence_783', 'Ratio_783', 'Sequence_784', 'Ratio_784', 'Sequence_785', 'Ratio_785', 'Sequence_786', 'Ratio_786', 'Sequence_787', 'Ratio_787', 'Sequence_788', 'Ratio_788', 'Sequence_789', 'Ratio_789', 'Sequence_790', 'Ratio_790', 'Sequence_791', 'Ratio_791', 'Sequence_792', 'Ratio_792', 'Sequence_793', 'Ratio_793', 'Sequence_794', 'Ratio_794', 'Sequence_795', 'Ratio_795', 'Sequence_796', 'Ratio_796', 'Sequence_797', 'Ratio_797', 'Sequence_798', 'Ratio_798', 'Sequence_799', 'Ratio_799', 'Sequence_800', 'Ratio_800', 'Sequence_801', 'Ratio_801', 'Sequence_802', 'Ratio_802', 'Sequence_803', 'Ratio_803', 'Sequence_804', 'Ratio_804', 'Sequence_805', 'Ratio_805', 'Sequence_806', 'Ratio_806', 'Sequence_807', 'Ratio_807', 'Sequence_808', 'Ratio_808', 'Sequence_809', 'Ratio_809', 'Sequence_810', 'Ratio_810', 'Sequence_811', 'Ratio_811', 'Sequence_812', 'Ratio_812', 'Sequence_813', 'Ratio_813', 'Sequence_814', 'Ratio_814', 'Sequence_815', 'Ratio_815', 'Sequence_816', 'Ratio_816', 'Sequence_817', 'Ratio_817', 'Sequence_818', 'Ratio_818', 'Sequence_819', 'Ratio_819', 'Sequence_820', 'Ratio_820', 'Sequence_821', 'Ratio_821', 'Sequence_822', 'Ratio_822', 'Sequence_823', 'Ratio_823', 'Sequence_824', 'Ratio_824', 'Sequence_825', 'Ratio_825', 'Sequence_826', 'Ratio_826', 'Sequence_827', 'Ratio_827', 'Sequence_828', 'Ratio_828', 'Sequence_829', 'Ratio_829', 'Sequence_830', 'Ratio_830', 'Sequence_831', 'Ratio_831', 'Sequence_832', 'Ratio_832', 'Sequence_833', 'Ratio_833', 'Sequence_834', 'Ratio_834', 'Sequence_835', 'Ratio_835', 'Sequence_836', 'Ratio_836', 'Sequence_837', 'Ratio_837', 'Sequence_838', 'Ratio_838', 'Sequence_839', 'Ratio_839', 'Sequence_840', 'Ratio_840', 'Sequence_841', 'Ratio_841', 'Sequence_842', 'Ratio_842', 'Sequence_843', 'Ratio_843', 'Sequence_844', 'Ratio_844', 'Sequence_845', 'Ratio_845', 'Sequence_846', 'Ratio_846', 'Sequence_847', 'Ratio_847', 'Sequence_848', 'Ratio_848', 'Sequence_849', 'Ratio_849', 'Sequence_850', 'Ratio_850', 'Sequence_851', 'Ratio_851', 'Sequence_852', 'Ratio_852', 'Sequence_853', 'Ratio_853', 'Sequence_854', 'Ratio_854', 'Sequence_855', 'Ratio_855', 'Sequence_856', 'Ratio_856', 'Sequence_857', 'Ratio_857', 'Sequence_858', 'Ratio_858', 'Sequence_859', 'Ratio_859', 'Sequence_860', 'Ratio_860', 'Sequence_861', 'Ratio_861', 'Sequence_862', 'Ratio_862', 'Sequence_863', 'Ratio_863', 'Sequence_864', 'Ratio_864', 'Sequence_865', 'Ratio_865', 'Sequence_866', 'Ratio_866', 'Sequence_867', 'Ratio_867', 'Sequence_868', 'Ratio_868', 'Sequence_869', 'Ratio_869', 'Sequence_870', 'Ratio_870', 'Sequence_871', 'Ratio_871', 'Sequence_872', 'Ratio_872', 'Sequence_873', 'Ratio_873', 'Sequence_874', 'Ratio_874', 'Sequence_875', 'Ratio_875', 'Sequence_876', 'Ratio_876', 'Sequence_877', 'Ratio_877', 'Sequence_878', 'Ratio_878', 'Sequence_879', 'Ratio_879', 'Sequence_880', 'Ratio_880', 'Sequence_881', 'Ratio_881', 'Sequence_882', 'Ratio_882', 'Sequence_883', 'Ratio_883', 'Sequence_884', 'Ratio_884', 'Sequence_885', 'Ratio_885', 'Sequence_886', 'Ratio_886', 'Sequence_887', 'Ratio_887', 'Sequence_888', 'Ratio_888', 'Sequence_889', 'Ratio_889', 'Sequence_890', 'Ratio_890', 'Sequence_891', 'Ratio_891', 'Sequence_892', 'Ratio_892', 'Sequence_893', 'Ratio_893', 'Sequence_894', 'Ratio_894', 'Sequence_895', 'Ratio_895', 'Sequence_896', 'Ratio_896', 'Sequence_897', 'Ratio_897', 'Sequence_898', 'Ratio_898', 'Sequence_899', 'Ratio_899', 'Sequence_900', 'Ratio_900', 'Sequence_901', 'Ratio_901', 'Sequence_902', 'Ratio_902', 'Sequence_903', 'Ratio_903', 'Sequence_904', 'Ratio_904', 'Sequence_905', 'Ratio_905', 'Sequence_906', 'Ratio_906', 'Sequence_907', 'Ratio_907', 'Sequence_908', 'Ratio_908', 'Sequence_909', 'Ratio_909', 'Sequence_910', 'Ratio_910', 'Sequence_911', 'Ratio_911', 'Sequence_912', 'Ratio_912', 'Sequence_913', 'Ratio_913', 'Sequence_914', 'Ratio_914', 'Sequence_915', 'Ratio_915', 'Sequence_916', 'Ratio_916', 'Sequence_917', 'Ratio_917', 'Sequence_918', 'Ratio_918', 'Sequence_919', 'Ratio_919', 'Sequence_920', 'Ratio_920', 'Sequence_921', 'Ratio_921', 'Sequence_922', 'Ratio_922', 'Sequence_923', 'Ratio_923', 'Sequence_924', 'Ratio_924', 'Sequence_925', 'Ratio_925', 'Sequence_926', 'Ratio_926', 'Sequence_927', 'Ratio_927', 'Sequence_928', 'Ratio_928', 'Sequence_929', 'Ratio_929', 'Sequence_930', 'Ratio_930', 'Sequence_931', 'Ratio_931', 'Sequence_932', 'Ratio_932', 'Sequence_933', 'Ratio_933', 'Sequence_934', 'Ratio_934', 'Sequence_935', 'Ratio_935', 'Sequence_936', 'Ratio_936', 'Sequence_937', 'Ratio_937', 'Sequence_938', 'Ratio_938', 'Sequence_939', 'Ratio_939', 'Sequence_940', 'Ratio_940', 'Sequence_941', 'Ratio_941', 'Sequence_942', 'Ratio_942', 'Sequence_943', 'Ratio_943', 'Sequence_944', 'Ratio_944', 'Sequence_945', 'Ratio_945', 'Sequence_946', 'Ratio_946', 'Sequence_947', 'Ratio_947', 'Sequence_948', 'Ratio_948', 'Sequence_949', 'Ratio_949', 'Sequence_950', 'Ratio_950', 'Sequence_951', 'Ratio_951', 'Sequence_952', 'Ratio_952', 'Sequence_953', 'Ratio_953', 'Sequence_954', 'Ratio_954', 'Sequence_955', 'Ratio_955', 'Sequence_956', 'Ratio_956', 'Sequence_957', 'Ratio_957', 'Sequence_958', 'Ratio_958', 'Sequence_959', 'Ratio_959', 'Sequence_960', 'Ratio_960', 'Sequence_961', 'Ratio_961', 'Sequence_962', 'Ratio_962', 'Sequence_963', 'Ratio_963', 'Sequence_964', 'Ratio_964', 'Sequence_965', 'Ratio_965', 'Sequence_966', 'Ratio_966', 'Sequence_967', 'Ratio_967', 'Sequence_968', 'Ratio_968', 'Sequence_969', 'Ratio_969', 'Sequence_970', 'Ratio_970', 'Sequence_971', 'Ratio_971', 'Sequence_972', 'Ratio_972', 'Sequence_973', 'Ratio_973', 'Sequence_974', 'Ratio_974', 'Sequence_975', 'Ratio_975', 'Sequence_976', 'Ratio_976', 'Sequence_977', 'Ratio_977', 'Sequence_978', 'Ratio_978', 'Sequence_979', 'Ratio_979', 'Sequence_980', 'Ratio_980', 'Sequence_981', 'Ratio_981', 'Sequence_982', 'Ratio_982', 'Sequence_983', 'Ratio_983', 'Sequence_984', 'Ratio_984', 'Sequence_985', 'Ratio_985', 'Sequence_986', 'Ratio_986', 'Sequence_987', 'Ratio_987', 'Sequence_988', 'Ratio_988', 'Sequence_989', 'Ratio_989', 'Sequence_990', 'Ratio_990', 'Sequence_991', 'Ratio_991', 'Sequence_992', 'Ratio_992', 'Sequence_993', 'Ratio_993', 'Sequence_994', 'Ratio_994', 'Sequence_995', 'Ratio_995', 'Sequence_996', 'Ratio_996', 'Sequence_997', 'Ratio_997', 'Sequence_998', 'Ratio_998', 'Sequence_999', 'Ratio_999', 'Sequence_1000', 'Ratio_1000', 'Sequence_1001', 'Ratio_1001', 'Sequence_1002', 'Ratio_1002', 'Sequence_1003', 'Ratio_1003', 'Sequence_1004', 'Ratio_1004', 'Sequence_1005', 'Ratio_1005', 'Sequence_1006', 'Ratio_1006', 'Sequence_1007', 'Ratio_1007', 'Sequence_1008', 'Ratio_1008', 'Sequence_1009', 'Ratio_1009', 'Sequence_1010', 'Ratio_1010', 'Sequence_1011', 'Ratio_1011', 'Sequence_1012', 'Ratio_1012', 'Sequence_1013', 'Ratio_1013', 'Sequence_1014', 'Ratio_1014', 'Sequence_1015', 'Ratio_1015', 'Sequence_1016', 'Ratio_1016', 'Sequence_1017', 'Ratio_1017', 'Sequence_1018', 'Ratio_1018', 'Sequence_1019', 'Ratio_1019', 'Sequence_1020', 'Ratio_1020', 'Sequence_1021', 'Ratio_1021', 'Sequence_1022', 'Ratio_1022', 'Sequence_1023', 'Ratio_1023', 'Sequence_1024', 'Ratio_1024', 'Sequence_1025', 'Ratio_1025', 'Sequence_1026', 'Ratio_1026', 'Sequence_1027', 'Ratio_1027', 'Sequence_1028', 'Ratio_1028', 'Sequence_1029', 'Ratio_1029', 'Sequence_1030', 'Ratio_1030', 'Sequence_1031', 'Ratio_1031', 'Sequence_1032', 'Ratio_1032', 'Sequence_1033', 'Ratio_1033', 'Sequence_1034', 'Ratio_1034', 'Sequence_1035', 'Ratio_1035', 'Sequence_1036', 'Ratio_1036', 'Sequence_1037', 'Ratio_1037', 'Sequence_1038', 'Ratio_1038', 'Sequence_1039', 'Ratio_1039', 'Sequence_1040', 'Ratio_1040', 'Sequence_1041', 'Ratio_1041', 'Sequence_1042', 'Ratio_1042', 'Sequence_1043', 'Ratio_1043', 'Sequence_1044', 'Ratio_1044', 'Sequence_1045', 'Ratio_1045', 'Sequence_1046', 'Ratio_1046', 'Sequence_1047', 'Ratio_1047', 'Sequence_1048', 'Ratio_1048', 'Sequence_1049', 'Ratio_1049', 'Sequence_1050', 'Ratio_1050', 'Sequence_1051', 'Ratio_1051', 'Sequence_1052', 'Ratio_1052', 'Sequence_1053', 'Ratio_1053', 'Sequence_1054', 'Ratio_1054', 'Sequence_1055', 'Ratio_1055', 'Sequence_1056', 'Ratio_1056', 'Sequence_1057', 'Ratio_1057', 'Sequence_1058', 'Ratio_1058', 'Sequence_1059', 'Ratio_1059', 'Sequence_1060', 'Ratio_1060', 'Sequence_1061', 'Ratio_1061', 'Sequence_1062', 'Ratio_1062', 'Sequence_1063', 'Ratio_1063', 'Sequence_1064', 'Ratio_1064', 'Sequence_1065', 'Ratio_1065', 'Sequence_1066', 'Ratio_1066', 'Sequence_1067', 'Ratio_1067', 'Sequence_1068', 'Ratio_1068', 'Sequence_1069', 'Ratio_1069', 'Sequence_1070', 'Ratio_1070', 'Sequence_1071', 'Ratio_1071', 'Sequence_1072', 'Ratio_1072', 'Sequence_1073', 'Ratio_1073', 'Sequence_1074', 'Ratio_1074', 'Sequence_1075', 'Ratio_1075', 'Sequence_1076', 'Ratio_1076', 'Sequence_1077', 'Ratio_1077', 'Sequence_1078', 'Ratio_1078', 'Sequence_1079', 'Ratio_1079', 'Sequence_1080', 'Ratio_1080', 'Sequence_1081', 'Ratio_1081', 'Sequence_1082', 'Ratio_1082', 'Sequence_1083', 'Ratio_1083', 'Sequence_1084', 'Ratio_1084', 'Sequence_1085', 'Ratio_1085', 'Sequence_1086', 'Ratio_1086', 'Sequence_1087', 'Ratio_1087', 'Sequence_1088', 'Ratio_1088', 'Sequence_1089', 'Ratio_1089', 'Sequence_1090', 'Ratio_1090', 'Sequence_1091', 'Ratio_1091', 'Sequence_1092', 'Ratio_1092', 'Sequence_1093', 'Ratio_1093', 'Sequence_1094', 'Ratio_1094', 'Sequence_1095', 'Ratio_1095', 'Sequence_1096', 'Ratio_1096', 'Sequence_1097', 'Ratio_1097', 'Sequence_1098', 'Ratio_1098', 'Sequence_1099', 'Ratio_1099', 'Sequence_1100', 'Ratio_1100', 'Sequence_1101', 'Ratio_1101', 'Sequence_1102', 'Ratio_1102', 'Sequence_1103', 'Ratio_1103', 'Sequence_1104', 'Ratio_1104', 'Sequence_1105', 'Ratio_1105', 'Sequence_1106', 'Ratio_1106', 'Sequence_1107', 'Ratio_1107', 'Sequence_1108', 'Ratio_1108', 'Sequence_1109', 'Ratio_1109', 'Sequence_1110', 'Ratio_1110', 'Sequence_1111', 'Ratio_1111', 'Sequence_1112', 'Ratio_1112', 'Sequence_1113', 'Ratio_1113', 'Sequence_1114', 'Ratio_1114', 'Sequence_1115', 'Ratio_1115', 'Sequence_1116', 'Ratio_1116', 'Sequence_1117', 'Ratio_1117', 'Sequence_1118', 'Ratio_1118', 'Sequence_1119', 'Ratio_1119', 'Sequence_1120', 'Ratio_1120', 'Sequence_1121', 'Ratio_1121', 'Sequence_1122', 'Ratio_1122', 'Sequence_1123', 'Ratio_1123', 'Sequence_1124', 'Ratio_1124', 'Sequence_1125', 'Ratio_1125', 'Sequence_1126', 'Ratio_1126', 'Sequence_1127', 'Ratio_1127', 'Sequence_1128', 'Ratio_1128', 'Sequence_1129', 'Ratio_1129', 'Sequence_1130', 'Ratio_1130', 'Sequence_1131', 'Ratio_1131', 'Sequence_1132', 'Ratio_1132', 'Sequence_1133', 'Ratio_1133', 'Sequence_1134', 'Ratio_1134', 'Sequence_1135', 'Ratio_1135', 'Sequence_1136', 'Ratio_1136', 'Sequence_1137', 'Ratio_1137', 'Sequence_1138', 'Ratio_1138', 'Sequence_1139', 'Ratio_1139', 'Sequence_1140', 'Ratio_1140', 'Sequence_1141', 'Ratio_1141', 'Sequence_1142', 'Ratio_1142', 'Sequence_1143', 'Ratio_1143', 'Sequence_1144', 'Ratio_1144', 'Sequence_1145', 'Ratio_1145', 'Sequence_1146', 'Ratio_1146', 'Sequence_1147', 'Ratio_1147', 'Sequence_1148', 'Ratio_1148', 'Sequence_1149', 'Ratio_1149', 'Sequence_1150', 'Ratio_1150', 'Sequence_1151', 'Ratio_1151', 'Sequence_1152', 'Ratio_1152', 'Sequence_1153', 'Ratio_1153', 'Sequence_1154', 'Ratio_1154', 'Sequence_1155', 'Ratio_1155', 'Sequence_1156', 'Ratio_1156', 'Sequence_1157', 'Ratio_1157', 'Sequence_1158', 'Ratio_1158', 'Sequence_1159', 'Ratio_1159', 'Sequence_1160', 'Ratio_1160', 'Sequence_1161', 'Ratio_1161', 'Sequence_1162', 'Ratio_1162', 'Sequence_1163', 'Ratio_1163', 'Sequence_1164', 'Ratio_1164', 'Sequence_1165', 'Ratio_1165', 'Sequence_1166', 'Ratio_1166', 'Sequence_1167', 'Ratio_1167', 'Sequence_1168', 'Ratio_1168', 'Sequence_1169', 'Ratio_1169', 'Sequence_1170', 'Ratio_1170', 'Sequence_1171', 'Ratio_1171', 'Sequence_1172', 'Ratio_1172', 'Sequence_1173', 'Ratio_1173', 'Sequence_1174', 'Ratio_1174', 'Sequence_1175', 'Ratio_1175', 'Sequence_1176', 'Ratio_1176', 'Sequence_1177', 'Ratio_1177', 'Sequence_1178', 'Ratio_1178', 'Sequence_1179', 'Ratio_1179', 'Sequence_1180', 'Ratio_1180', 'Sequence_1181', 'Ratio_1181', 'Sequence_1182', 'Ratio_1182', 'Sequence_1183', 'Ratio_1183', 'Sequence_1184', 'Ratio_1184', 'Sequence_1185', 'Ratio_1185', 'Sequence_1186', 'Ratio_1186', 'Sequence_1187', 'Ratio_1187', 'Sequence_1188', 'Ratio_1188', 'Sequence_1189', 'Ratio_1189', 'Sequence_1190', 'Ratio_1190', 'Sequence_1191', 'Ratio_1191', 'Sequence_1192', 'Ratio_1192', 'Sequence_1193', 'Ratio_1193', 'Sequence_1194', 'Ratio_1194', 'Sequence_1195', 'Ratio_1195', 'Sequence_1196', 'Ratio_1196', 'Sequence_1197', 'Ratio_1197', 'Sequence_1198', 'Ratio_1198', 'Sequence_1199', 'Ratio_1199', 'Sequence_1200', 'Ratio_1200', 'Sequence_1201', 'Ratio_1201', 'Sequence_1202', 'Ratio_1202', 'Sequence_1203', 'Ratio_1203', 'Sequence_1204', 'Ratio_1204', 'Sequence_1205', 'Ratio_1205', 'Sequence_1206', 'Ratio_1206', 'Sequence_1207', 'Ratio_1207', 'Sequence_1208', 'Ratio_1208', 'Sequence_1209', 'Ratio_1209', 'Sequence_1210', 'Ratio_1210', 'Sequence_1211', 'Ratio_1211', 'Sequence_1212', 'Ratio_1212', 'Sequence_1213', 'Ratio_1213', 'Sequence_1214', 'Ratio_1214', 'Sequence_1215', 'Ratio_1215', 'Sequence_1216', 'Ratio_1216', 'Sequence_1217', 'Ratio_1217', 'Sequence_1218', 'Ratio_1218', 'Sequence_1219', 'Ratio_1219', 'Sequence_1220', 'Ratio_1220', 'Sequence_1221', 'Ratio_1221', 'Sequence_1222', 'Ratio_1222', 'Sequence_1223', 'Ratio_1223', 'Sequence_1224', 'Ratio_1224', 'Sequence_1225', 'Ratio_1225', 'Sequence_1226', 'Ratio_1226', 'Sequence_1227', 'Ratio_1227', 'Sequence_1228', 'Ratio_1228', 'Sequence_1229', 'Ratio_1229', 'Sequence_1230', 'Ratio_1230', 'Sequence_1231', 'Ratio_1231', 'Sequence_1232', 'Ratio_1232', 'Sequence_1233', 'Ratio_1233', 'Sequence_1234', 'Ratio_1234', 'Sequence_1235', 'Ratio_1235', 'Sequence_1236', 'Ratio_1236', 'Sequence_1237', 'Ratio_1237', 'Sequence_1238', 'Ratio_1238', 'Sequence_1239', 'Ratio_1239', 'Sequence_1240', 'Ratio_1240', 'Sequence_1241', 'Ratio_1241', 'Sequence_1242', 'Ratio_1242', 'Sequence_1243', 'Ratio_1243', 'Sequence_1244', 'Ratio_1244', 'Sequence_1245', 'Ratio_1245', 'Sequence_1246', 'Ratio_1246', 'Sequence_1247', 'Ratio_1247', 'Sequence_1248', 'Ratio_1248', 'Sequence_1249', 'Ratio_1249', 'Sequence_1250', 'Ratio_1250', 'Sequence_1251', 'Ratio_1251', 'Sequence_1252', 'Ratio_1252', 'Sequence_1253', 'Ratio_1253', 'Sequence_1254', 'Ratio_1254', 'Sequence_1255', 'Ratio_1255', 'Sequence_1256', 'Ratio_1256', 'Sequence_1257', 'Ratio_1257', 'Sequence_1258', 'Ratio_1258', 'Sequence_1259', 'Ratio_1259', 'Sequence_1260', 'Ratio_1260', 'Sequence_1261', 'Ratio_1261', 'Sequence_1262', 'Ratio_1262', 'Sequence_1263', 'Ratio_1263', 'Sequence_1264', 'Ratio_1264', 'Sequence_1265', 'Ratio_1265', 'Sequence_1266', 'Ratio_1266', 'Sequence_1267', 'Ratio_1267', 'Sequence_1268', 'Ratio_1268', 'Sequence_1269', 'Ratio_1269', 'Sequence_1270', 'Ratio_1270', 'Sequence_1271', 'Ratio_1271', 'Sequence_1272', 'Ratio_1272', 'Sequence_1273', 'Ratio_1273', 'Sequence_1274', 'Ratio_1274', 'Sequence_1275', 'Ratio_1275', 'Sequence_1276', 'Ratio_1276', 'Sequence_1277', 'Ratio_1277', 'Sequence_1278', 'Ratio_1278', 'Sequence_1279', 'Ratio_1279', 'Sequence_1280', 'Ratio_1280', 'Sequence_1281', 'Ratio_1281', 'Sequence_1282', 'Ratio_1282', 'Sequence_1283', 'Ratio_1283', 'Sequence_1284', 'Ratio_1284', 'Sequence_1285', 'Ratio_1285', 'Sequence_1286', 'Ratio_1286', 'Sequence_1287', 'Ratio_1287', 'Sequence_1288', 'Ratio_1288', 'Sequence_1289', 'Ratio_1289', 'Sequence_1290', 'Ratio_1290', 'Sequence_1291', 'Ratio_1291', 'Sequence_1292', 'Ratio_1292', 'Sequence_1293', 'Ratio_1293', 'Sequence_1294', 'Ratio_1294', 'Sequence_1295', 'Ratio_1295', 'Sequence_1296', 'Ratio_1296', 'Sequence_1297', 'Ratio_1297', 'Sequence_1298', 'Ratio_1298', 'Sequence_1299', 'Ratio_1299', 'Sequence_1300', 'Ratio_1300', 'Sequence_1301', 'Ratio_1301', 'Sequence_1302', 'Ratio_1302', 'Sequence_1303', 'Ratio_1303', 'Sequence_1304', 'Ratio_1304', 'Sequence_1305', 'Ratio_1305', 'Sequence_1306', 'Ratio_1306', 'Sequence_1307', 'Ratio_1307', 'Sequence_1308', 'Ratio_1308', 'Sequence_1309', 'Ratio_1309', 'Sequence_1310', 'Ratio_1310', 'Sequence_1311', 'Ratio_1311', 'Sequence_1312', 'Ratio_1312', 'Sequence_1313', 'Ratio_1313', 'Sequence_1314', 'Ratio_1314', 'Sequence_1315', 'Ratio_1315', 'Sequence_1316', 'Ratio_1316', 'Sequence_1317', 'Ratio_1317', 'Sequence_1318', 'Ratio_1318', 'Sequence_1319', 'Ratio_1319', 'Sequence_1320', 'Ratio_1320', 'Sequence_1321', 'Ratio_1321', 'Sequence_1322', 'Ratio_1322', 'Sequence_1323', 'Ratio_1323', 'Sequence_1324', 'Ratio_1324', 'Sequence_1325', 'Ratio_1325', 'Sequence_1326', 'Ratio_1326', 'Sequence_1327', 'Ratio_1327', 'Sequence_1328', 'Ratio_1328', 'Sequence_1329', 'Ratio_1329', 'Sequence_1330', 'Ratio_1330', 'Sequence_1331', 'Ratio_1331', 'Sequence_1332', 'Ratio_1332', 'Sequence_1333', 'Ratio_1333', 'Sequence_1334', 'Ratio_1334', 'Sequence_1335', 'Ratio_1335', 'Sequence_1336', 'Ratio_1336', 'Sequence_1337', 'Ratio_1337', 'Sequence_1338', 'Ratio_1338', 'Sequence_1339', 'Ratio_1339', 'Sequence_1340', 'Ratio_1340', 'Sequence_1341', 'Ratio_1341', 'Sequence_1342', 'Ratio_1342', 'Sequence_1343', 'Ratio_1343', 'Sequence_1344', 'Ratio_1344', 'Sequence_1345', 'Ratio_1345', 'Sequence_1346', 'Ratio_1346', 'Sequence_1347', 'Ratio_1347', 'Sequence_1348', 'Ratio_1348', 'Sequence_1349', 'Ratio_1349', 'Sequence_1350', 'Ratio_1350', 'Sequence_1351', 'Ratio_1351', 'Sequence_1352', 'Ratio_1352', 'Sequence_1353', 'Ratio_1353', 'Sequence_1354', 'Ratio_1354', 'Sequence_1355', 'Ratio_1355', 'Sequence_1356', 'Ratio_1356', 'Sequence_1357', 'Ratio_1357', 'Sequence_1358', 'Ratio_1358', 'Sequence_1359', 'Ratio_1359', 'Sequence_1360', 'Ratio_1360', 'Sequence_1361', 'Ratio_1361', 'Sequence_1362', 'Ratio_1362', 'Sequence_1363', 'Ratio_1363', 'Sequence_1364', 'Ratio_1364', 'Sequence_1365', 'Ratio_1365', 'Sequence_1366', 'Ratio_1366', 'Sequence_1367', 'Ratio_1367', 'Sequence_1368', 'Ratio_1368', 'Sequence_1369', 'Ratio_1369', 'Sequence_1370', 'Ratio_1370', 'Sequence_1371', 'Ratio_1371', 'Sequence_1372', 'Ratio_1372', 'Sequence_1373', 'Ratio_1373', 'Sequence_1374', 'Ratio_1374', 'Sequence_1375', 'Ratio_1375', 'Sequence_1376', 'Ratio_1376', 'Sequence_1377', 'Ratio_1377', 'Sequence_1378', 'Ratio_1378', 'Sequence_1379', 'Ratio_1379', 'Sequence_1380', 'Ratio_1380', 'Sequence_1381', 'Ratio_1381', 'Sequence_1382', 'Ratio_1382', 'Sequence_1383', 'Ratio_1383', 'Sequence_1384', 'Ratio_1384', 'Sequence_1385', 'Ratio_1385', 'Sequence_1386', 'Ratio_1386', 'Sequence_1387', 'Ratio_1387', 'Sequence_1388', 'Ratio_1388', 'Sequence_1389', 'Ratio_1389', 'Sequence_1390', 'Ratio_1390', 'Sequence_1391', 'Ratio_1391', 'Sequence_1392', 'Ratio_1392', 'Sequence_1393', 'Ratio_1393', 'Sequence_1394', 'Ratio_1394', 'Sequence_1395', 'Ratio_1395', 'Sequence_1396', 'Ratio_1396', 'Sequence_1397', 'Ratio_1397', 'Sequence_1398', 'Ratio_1398', 'Sequence_1399', 'Ratio_1399', 'Sequence_1400', 'Ratio_1400', 'Sequence_1401', 'Ratio_1401', 'Sequence_1402', 'Ratio_1402', 'Sequence_1403', 'Ratio_1403', 'Sequence_1404', 'Ratio_1404', 'Sequence_1405', 'Ratio_1405', 'Sequence_1406', 'Ratio_1406', 'Sequence_1407', 'Ratio_1407', 'Sequence_1408', 'Ratio_1408', 'Sequence_1409', 'Ratio_1409', 'Sequence_1410', 'Ratio_1410', 'Sequence_1411', 'Ratio_1411', 'Sequence_1412', 'Ratio_1412', 'Sequence_1413', 'Ratio_1413', 'Sequence_1414', 'Ratio_1414', 'Sequence_1415', 'Ratio_1415', 'Sequence_1416', 'Ratio_1416', 'Sequence_1417', 'Ratio_1417', 'Sequence_1418', 'Ratio_1418', 'Sequence_1419', 'Ratio_1419', 'Sequence_1420', 'Ratio_1420', 'Sequence_1421', 'Ratio_1421', 'Sequence_1422', 'Ratio_1422', 'Sequence_1423', 'Ratio_1423', 'Sequence_1424', 'Ratio_1424', 'Sequence_1425', 'Ratio_1425', 'Sequence_1426', 'Ratio_1426', 'Sequence_1427', 'Ratio_1427', 'Sequence_1428', 'Ratio_1428', 'Sequence_1429', 'Ratio_1429', 'Sequence_1430', 'Ratio_1430', 'Sequence_1431', 'Ratio_1431', 'Sequence_1432', 'Ratio_1432', 'Sequence_1433', 'Ratio_1433', 'Sequence_1434', 'Ratio_1434', 'Sequence_1435', 'Ratio_1435', 'Sequence_1436', 'Ratio_1436', 'Sequence_1437', 'Ratio_1437', 'Sequence_1438', 'Ratio_1438', 'Sequence_1439', 'Ratio_1439', 'Sequence_1440', 'Ratio_1440', 'Sequence_1441', 'Ratio_1441', 'Sequence_1442', 'Ratio_1442', 'Sequence_1443', 'Ratio_1443', 'Sequence_1444', 'Ratio_1444', 'Sequence_1445', 'Ratio_1445', 'Sequence_1446', 'Ratio_1446', 'Sequence_1447', 'Ratio_1447', 'Sequence_1448', 'Ratio_1448', 'Sequence_1449', 'Ratio_1449', 'Sequence_1450', 'Ratio_1450', 'Sequence_1451', 'Ratio_1451', 'Sequence_1452', 'Ratio_1452', 'Sequence_1453', 'Ratio_1453', 'Sequence_1454', 'Ratio_1454', 'Sequence_1455', 'Ratio_1455', 'Sequence_1456', 'Ratio_1456', 'Sequence_1457', 'Ratio_1457', 'Sequence_1458', 'Ratio_1458', 'Sequence_1459', 'Ratio_1459', 'Sequence_1460', 'Ratio_1460', 'Sequence_1461', 'Ratio_1461', 'Sequence_1462', 'Ratio_1462', 'Sequence_1463', 'Ratio_1463', 'Sequence_1464', 'Ratio_1464', 'Sequence_1465', 'Ratio_1465', 'Sequence_1466', 'Ratio_1466', 'Sequence_1467', 'Ratio_1467', 'Sequence_1468', 'Ratio_1468', 'Sequence_1469', 'Ratio_1469', 'Sequence_1470', 'Ratio_1470', 'Sequence_1471', 'Ratio_1471', 'Sequence_1472', 'Ratio_1472', 'Sequence_1473', 'Ratio_1473', 'Sequence_1474', 'Ratio_1474', 'Sequence_1475', 'Ratio_1475', 'Sequence_1476', 'Ratio_1476', 'Sequence_1477', 'Ratio_1477', 'Sequence_1478', 'Ratio_1478', 'Sequence_1479', 'Ratio_1479', 'Sequence_1480', 'Ratio_1480', 'Sequence_1481', 'Ratio_1481', 'Sequence_1482', 'Ratio_1482', 'Sequence_1483', 'Ratio_1483', 'Sequence_1484', 'Ratio_1484', 'Sequence_1485', 'Ratio_1485', 'Sequence_1486', 'Ratio_1486', 'Sequence_1487', 'Ratio_1487', 'Sequence_1488', 'Ratio_1488', 'Sequence_1489', 'Ratio_1489', 'Sequence_1490', 'Ratio_1490', 'Sequence_1491', 'Ratio_1491', 'Sequence_1492', 'Ratio_1492', 'Sequence_1493', 'Ratio_1493', 'Sequence_1494', 'Ratio_1494', 'Sequence_1495', 'Ratio_1495', 'Sequence_1496', 'Ratio_1496', 'Sequence_1497', 'Ratio_1497', 'Sequence_1498', 'Ratio_1498', 'Sequence_1499', 'Ratio_1499', 'Sequence_1500', 'Ratio_1500']]
#3
X=data[['Name','Lenth', 'Sequence_0', 'Ratio_0', 'Sequence_1', 'Ratio_1', 'Sequence_2', 'Ratio_2', 'Sequence_3', 'Ratio_3', 'Sequence_4', 'Ratio_4', 'Sequence_5', 'Ratio_5', 'Sequence_6', 'Ratio_6', 'Sequence_7', 'Ratio_7', 'Sequence_8', 'Ratio_8', 'Sequence_9', 'Ratio_9', 'Sequence_10', 'Ratio_10', 'Sequence_11', 'Ratio_11', 'Sequence_12', 'Ratio_12', 'Sequence_13', 'Ratio_13', 'Sequence_14', 'Ratio_14', 'Sequence_15', 'Ratio_15', 'Sequence_16', 'Ratio_16', 'Sequence_17', 'Ratio_17', 'Sequence_18', 'Ratio_18', 'Sequence_19', 'Ratio_19', 'Sequence_20', 'Ratio_20', 'Sequence_21', 'Ratio_21', 'Sequence_22', 'Ratio_22', 'Sequence_23', 'Ratio_23', 'Sequence_24', 'Ratio_24', 'Sequence_25', 'Ratio_25', 'Sequence_26', 'Ratio_26', 'Sequence_27', 'Ratio_27', 'Sequence_28', 'Ratio_28', 'Sequence_29', 'Ratio_29', 'Sequence_30', 'Ratio_30', 'Sequence_31', 'Ratio_31', 'Sequence_32', 'Ratio_32', 'Sequence_33', 'Ratio_33', 'Sequence_34', 'Ratio_34', 'Sequence_35', 'Ratio_35', 'Sequence_36', 'Ratio_36', 'Sequence_37', 'Ratio_37', 'Sequence_38', 'Ratio_38', 'Sequence_39', 'Ratio_39', 'Sequence_40', 'Ratio_40', 'Sequence_41', 'Ratio_41', 'Sequence_42', 'Ratio_42', 'Sequence_43', 'Ratio_43', 'Sequence_44', 'Ratio_44', 'Sequence_45', 'Ratio_45', 'Sequence_46', 'Ratio_46', 'Sequence_47', 'Ratio_47', 'Sequence_48', 'Ratio_48', 'Sequence_49', 'Ratio_49', 'Sequence_50', 'Ratio_50', 'Sequence_51', 'Ratio_51', 'Sequence_52', 'Ratio_52', 'Sequence_53','Ratio_53', 'Sequence_54', 'Ratio_54', 'Sequence_55', 'Ratio_55', 'Sequence_56', 'Ratio_56', 'Sequence_57', 'Ratio_57', 'Sequence_58', 'Ratio_58', 'Sequence_59', 'Ratio_59', 'Sequence_60', 'Ratio_60', 'Sequence_61', 'Ratio_61', 'Sequence_62', 'Ratio_62', 'Sequence_63', 'Ratio_63', 'Sequence_64', 'Ratio_64', 'Sequence_65', 'Ratio_65', 'Sequence_66', 'Ratio_66', 'Sequence_67', 'Ratio_67', 'Sequence_68', 'Ratio_68', 'Sequence_69', 'Ratio_69', 'Sequence_70', 'Ratio_70', 'Sequence_71', 'Ratio_71', 'Sequence_72', 'Ratio_72', 'Sequence_73', 'Ratio_73', 'Sequence_74', 'Ratio_74', 'Sequence_75', 'Ratio_75', 'Sequence_76', 'Ratio_76', 'Sequence_77', 'Ratio_77', 'Sequence_78', 'Ratio_78', 'Sequence_79', 'Ratio_79', 'Sequence_80', 'Ratio_80', 'Sequence_81', 'Ratio_81', 'Sequence_82', 'Ratio_82', 'Sequence_83', 'Ratio_83', 'Sequence_84', 'Ratio_84', 'Sequence_85', 'Ratio_85', 'Sequence_86', 'Ratio_86', 'Sequence_87', 'Ratio_87', 'Sequence_88', 'Ratio_88', 'Sequence_89', 'Ratio_89', 'Sequence_90', 'Ratio_90', 'Sequence_91', 'Ratio_91', 'Sequence_92', 'Ratio_92', 'Sequence_93', 'Ratio_93', 'Sequence_94', 'Ratio_94', 'Sequence_95', 'Ratio_95', 'Sequence_96', 'Ratio_96', 'Sequence_97', 'Ratio_97', 'Sequence_98', 'Ratio_98', 'Sequence_99', 'Ratio_99', 'Sequence_100', 'Ratio_100', 'Sequence_101', 'Ratio_101', 'Sequence_102', 'Ratio_102', 'Sequence_103', 'Ratio_103', 'Sequence_104', 'Ratio_104', 'Sequence_105', 'Ratio_105', 'Sequence_106', 'Ratio_106', 'Sequence_107', 'Ratio_107', 'Sequence_108', 'Ratio_108', 'Sequence_109', 'Ratio_109', 'Sequence_110', 'Ratio_110', 'Sequence_111', 'Ratio_111', 'Sequence_112', 'Ratio_112', 'Sequence_113', 'Ratio_113', 'Sequence_114', 'Ratio_114', 'Sequence_115', 'Ratio_115', 'Sequence_116', 'Ratio_116', 'Sequence_117', 'Ratio_117', 'Sequence_118', 'Ratio_118', 'Sequence_119', 'Ratio_119', 'Sequence_120', 'Ratio_120', 'Sequence_121', 'Ratio_121', 'Sequence_122', 'Ratio_122', 'Sequence_123', 'Ratio_123', 'Sequence_124', 'Ratio_124', 'Sequence_125', 'Ratio_125', 'Sequence_126', 'Ratio_126', 'Sequence_127', 'Ratio_127', 'Sequence_128', 'Ratio_128', 'Sequence_129', 'Ratio_129', 'Sequence_130', 'Ratio_130', 'Sequence_131', 'Ratio_131', 'Sequence_132', 'Ratio_132', 'Sequence_133', 'Ratio_133', 'Sequence_134', 'Ratio_134', 'Sequence_135', 'Ratio_135', 'Sequence_136', 'Ratio_136', 'Sequence_137', 'Ratio_137', 'Sequence_138', 'Ratio_138', 'Sequence_139', 'Ratio_139', 'Sequence_140', 'Ratio_140', 'Sequence_141', 'Ratio_141', 'Sequence_142', 'Ratio_142', 'Sequence_143', 'Ratio_143', 'Sequence_144', 'Ratio_144', 'Sequence_145', 'Ratio_145', 'Sequence_146', 'Ratio_146', 'Sequence_147', 'Ratio_147', 'Sequence_148', 'Ratio_148', 'Sequence_149', 'Ratio_149', 'Sequence_150', 'Ratio_150', 'Sequence_151', 'Ratio_151', 'Sequence_152', 'Ratio_152', 'Sequence_153', 'Ratio_153', 'Sequence_154', 'Ratio_154', 'Sequence_155', 'Ratio_155', 'Sequence_156', 'Ratio_156', 'Sequence_157', 'Ratio_157', 'Sequence_158', 'Ratio_158', 'Sequence_159', 'Ratio_159', 'Sequence_160', 'Ratio_160', 'Sequence_161', 'Ratio_161', 'Sequence_162', 'Ratio_162', 'Sequence_163', 'Ratio_163', 'Sequence_164', 'Ratio_164', 'Sequence_165', 'Ratio_165', 'Sequence_166', 'Ratio_166', 'Sequence_167', 'Ratio_167', 'Sequence_168', 'Ratio_168', 'Sequence_169', 'Ratio_169', 'Sequence_170', 'Ratio_170', 'Sequence_171', 'Ratio_171', 'Sequence_172', 'Ratio_172', 'Sequence_173', 'Ratio_173', 'Sequence_174', 'Ratio_174', 'Sequence_175', 'Ratio_175', 'Sequence_176', 'Ratio_176', 'Sequence_177', 'Ratio_177', 'Sequence_178', 'Ratio_178', 'Sequence_179', 'Ratio_179', 'Sequence_180', 'Ratio_180', 'Sequence_181', 'Ratio_181', 'Sequence_182', 'Ratio_182', 'Sequence_183', 'Ratio_183', 'Sequence_184', 'Ratio_184', 'Sequence_185', 'Ratio_185', 'Sequence_186', 'Ratio_186', 'Sequence_187', 'Ratio_187', 'Sequence_188', 'Ratio_188', 'Sequence_189', 'Ratio_189', 'Sequence_190', 'Ratio_190', 'Sequence_191', 'Ratio_191', 'Sequence_192', 'Ratio_192', 'Sequence_193', 'Ratio_193', 'Sequence_194', 'Ratio_194', 'Sequence_195', 'Ratio_195', 'Sequence_196', 'Ratio_196', 'Sequence_197', 'Ratio_197', 'Sequence_198', 'Ratio_198', 'Sequence_199', 'Ratio_199', 'Sequence_200', 'Ratio_200', 'Sequence_201', 'Ratio_201', 'Sequence_202', 'Ratio_202', 'Sequence_203', 'Ratio_203', 'Sequence_204', 'Ratio_204', 'Sequence_205', 'Ratio_205', 'Sequence_206', 'Ratio_206', 'Sequence_207', 'Ratio_207', 'Sequence_208', 'Ratio_208', 'Sequence_209', 'Ratio_209', 'Sequence_210', 'Ratio_210', 'Sequence_211', 'Ratio_211', 'Sequence_212', 'Ratio_212', 'Sequence_213', 'Ratio_213', 'Sequence_214', 'Ratio_214', 'Sequence_215', 'Ratio_215', 'Sequence_216', 'Ratio_216', 'Sequence_217', 'Ratio_217', 'Sequence_218', 'Ratio_218', 'Sequence_219', 'Ratio_219', 'Sequence_220', 'Ratio_220', 'Sequence_221', 'Ratio_221', 'Sequence_222', 'Ratio_222', 'Sequence_223', 'Ratio_223', 'Sequence_224', 'Ratio_224', 'Sequence_225', 'Ratio_225', 'Sequence_226', 'Ratio_226', 'Sequence_227', 'Ratio_227', 'Sequence_228', 'Ratio_228', 'Sequence_229', 'Ratio_229', 'Sequence_230', 'Ratio_230', 'Sequence_231', 'Ratio_231', 'Sequence_232', 'Ratio_232', 'Sequence_233', 'Ratio_233', 'Sequence_234', 'Ratio_234', 'Sequence_235', 'Ratio_235', 'Sequence_236', 'Ratio_236', 'Sequence_237', 'Ratio_237', 'Sequence_238', 'Ratio_238', 'Sequence_239', 'Ratio_239', 'Sequence_240', 'Ratio_240', 'Sequence_241', 'Ratio_241', 'Sequence_242', 'Ratio_242', 'Sequence_243', 'Ratio_243', 'Sequence_244', 'Ratio_244', 'Sequence_245', 'Ratio_245', 'Sequence_246', 'Ratio_246', 'Sequence_247', 'Ratio_247', 'Sequence_248', 'Ratio_248', 'Sequence_249', 'Ratio_249', 'Sequence_250', 'Ratio_250', 'Sequence_251', 'Ratio_251', 'Sequence_252', 'Ratio_252', 'Sequence_253', 'Ratio_253', 'Sequence_254', 'Ratio_254', 'Sequence_255', 'Ratio_255', 'Sequence_256', 'Ratio_256', 'Sequence_257', 'Ratio_257', 'Sequence_258', 'Ratio_258', 'Sequence_259', 'Ratio_259', 'Sequence_260', 'Ratio_260', 'Sequence_261', 'Ratio_261', 'Sequence_262', 'Ratio_262', 'Sequence_263', 'Ratio_263', 'Sequence_264', 'Ratio_264', 'Sequence_265', 'Ratio_265', 'Sequence_266', 'Ratio_266', 'Sequence_267', 'Ratio_267', 'Sequence_268', 'Ratio_268', 'Sequence_269', 'Ratio_269', 'Sequence_270', 'Ratio_270', 'Sequence_271', 'Ratio_271', 'Sequence_272', 'Ratio_272', 'Sequence_273', 'Ratio_273', 'Sequence_274', 'Ratio_274', 'Sequence_275', 'Ratio_275', 'Sequence_276', 'Ratio_276', 'Sequence_277', 'Ratio_277', 'Sequence_278', 'Ratio_278', 'Sequence_279', 'Ratio_279', 'Sequence_280', 'Ratio_280', 'Sequence_281', 'Ratio_281', 'Sequence_282', 'Ratio_282', 'Sequence_283', 'Ratio_283', 'Sequence_284', 'Ratio_284', 'Sequence_285', 'Ratio_285', 'Sequence_286', 'Ratio_286', 'Sequence_287', 'Ratio_287', 'Sequence_288', 'Ratio_288', 'Sequence_289', 'Ratio_289', 'Sequence_290', 'Ratio_290', 'Sequence_291', 'Ratio_291', 'Sequence_292', 'Ratio_292', 'Sequence_293', 'Ratio_293', 'Sequence_294', 'Ratio_294', 'Sequence_295', 'Ratio_295', 'Sequence_296', 'Ratio_296', 'Sequence_297', 'Ratio_297', 'Sequence_298', 'Ratio_298', 'Sequence_299', 'Ratio_299', 'Sequence_300', 'Ratio_300', 'Sequence_301', 'Ratio_301', 'Sequence_302', 'Ratio_302', 'Sequence_303', 'Ratio_303', 'Sequence_304', 'Ratio_304', 'Sequence_305', 'Ratio_305', 'Sequence_306', 'Ratio_306', 'Sequence_307', 'Ratio_307', 'Sequence_308', 'Ratio_308', 'Sequence_309', 'Ratio_309', 'Sequence_310', 'Ratio_310', 'Sequence_311', 'Ratio_311', 'Sequence_312', 'Ratio_312', 'Sequence_313', 'Ratio_313', 'Sequence_314', 'Ratio_314', 'Sequence_315', 'Ratio_315', 'Sequence_316', 'Ratio_316', 'Sequence_317', 'Ratio_317', 'Sequence_318', 'Ratio_318', 'Sequence_319', 'Ratio_319', 'Sequence_320', 'Ratio_320', 'Sequence_321', 'Ratio_321', 'Sequence_322', 'Ratio_322', 'Sequence_323', 'Ratio_323', 'Sequence_324', 'Ratio_324', 'Sequence_325', 'Ratio_325', 'Sequence_326', 'Ratio_326', 'Sequence_327', 'Ratio_327', 'Sequence_328', 'Ratio_328', 'Sequence_329', 'Ratio_329', 'Sequence_330', 'Ratio_330', 'Sequence_331', 'Ratio_331', 'Sequence_332', 'Ratio_332', 'Sequence_333', 'Ratio_333', 'Sequence_334', 'Ratio_334', 'Sequence_335', 'Ratio_335', 'Sequence_336', 'Ratio_336', 'Sequence_337', 'Ratio_337', 'Sequence_338', 'Ratio_338', 'Sequence_339', 'Ratio_339', 'Sequence_340', 'Ratio_340', 'Sequence_341', 'Ratio_341', 'Sequence_342', 'Ratio_342']]
#X=data[['Name', 'Class', 'Lenth', 'Sequence_0', 'Ratio_0', 'TF-IDF_0', 'Sequence_1', 'Ratio_1', 'TF-IDF_1', 'Sequence_2', 'Ratio_2', 'TF-IDF_2', 'Sequence_3', 'Ratio_3', 'TF-IDF_3', 'Sequence_4', 'Ratio_4', 'TF-IDF_4', 'Sequence_5', 'Ratio_5', 'TF-IDF_5', 'Sequence_6', 'Ratio_6', 'TF-IDF_6', 'Sequence_7', 'Ratio_7', 'TF-IDF_7', 'Sequence_8', 'Ratio_8', 'TF-IDF_8', 'Sequence_9', 'Ratio_9', 'TF-IDF_9', 'Sequence_10', 'Ratio_10', 'TF-IDF_10', 'Sequence_11', 'Ratio_11', 'TF-IDF_11', 'Sequence_12', 'Ratio_12', 'TF-IDF_12', 'Sequence_13', 'Ratio_13', 'TF-IDF_13', 'Sequence_14', 'Ratio_14', 'TF-IDF_14', 'Sequence_15', 'Ratio_15', 'TF-IDF_15', 'Sequence_16', 'Ratio_16', 'TF-IDF_16', 'Sequence_17', 'Ratio_17', 'TF-IDF_17', 'Sequence_18', 'Ratio_18', 'TF-IDF_18', 'Sequence_19', 'Ratio_19', 'TF-IDF_19', 'Sequence_20', 'Ratio_20', 'TF-IDF_20', 'Sequence_21', 'Ratio_21', 'TF-IDF_21', 'Sequence_22', 'Ratio_22', 'TF-IDF_22', 'Sequence_23', 'Ratio_23', 'TF-IDF_23', 'Sequence_24', 'Ratio_24', 'TF-IDF_24', 'Sequence_25', 'Ratio_25', 'TF-IDF_25', 'Sequence_26', 'Ratio_26', 'TF-IDF_26', 'Sequence_27', 'Ratio_27', 'TF-IDF_27', 'Sequence_28', 'Ratio_28', 'TF-IDF_28', 'Sequence_29', 'Ratio_29', 'TF-IDF_29', 'Sequence_30', 'Ratio_30', 'TF-IDF_30', 'Sequence_31', 'Ratio_31', 'TF-IDF_31', 'Sequence_32', 'Ratio_32', 'TF-IDF_32', 'Sequence_33', 'Ratio_33', 'TF-IDF_33', 'Sequence_34', 'Ratio_34', 'TF-IDF_34', 'Sequence_35', 'Ratio_35', 'TF-IDF_35', 'Sequence_36', 'Ratio_36', 'TF-IDF_36', 'Sequence_37', 'Ratio_37', 'TF-IDF_37', 'Sequence_38', 'Ratio_38', 'TF-IDF_38', 'Sequence_39', 'Ratio_39', 'TF-IDF_39', 'Sequence_40', 'Ratio_40', 'TF-IDF_40', 'Sequence_41', 'Ratio_41', 'TF-IDF_41', 'Sequence_42', 'Ratio_42', 'TF-IDF_42', 'Sequence_43', 'Ratio_43', 'TF-IDF_43', 'Sequence_44', 'Ratio_44', 'TF-IDF_44', 'Sequence_45', 'Ratio_45', 'TF-IDF_45', 'Sequence_46', 'Ratio_46', 'TF-IDF_46', 'Sequence_47', 'Ratio_47', 'TF-IDF_47', 'Sequence_48', 'Ratio_48', 'TF-IDF_48']]
y=data['Class']#print(y)
#分割X,y属性集,选择FATAL作为分类标签
X_train,X_test,y_train,y_test=train_test_split(X,y,test_size=0.4,random_state=33)#在进行特征转换之前,我们先对数据集进行切分
#将字符型数据转化为特征向量,不然会无法直接填充
vec= DictVectorizer(sparse=False)#不用稀疏矩阵表示
#训练集改变为拟合所需要的格式
X_train=vec.fit_transform(X_train.to_dict(orient='record'))
#测试集改变为预测所需的格式
X_test=vec.transform(X_test.to_dict(orient='record'))
#print(X_train[3071,4095])
#print(vec.feature_names_)
#print(X_train)
knn=neighbors.KNeighborsClassifier(n_neighbors=120, weights='uniform', algorithm='auto',
leaf_size=15,p=2, metric='minkowski', metric_params=None, n_jobs=-1)
#n_neighbors 是用来确定多数投票规则里的K值,也就是在点的周围选取K个值最为总体范围
#weights 它的作用是在进行分类判断的时候给最近邻的点加上权重,它的默认值是'uniform',也就是等权重
#algorithm 是分类时采取的算法,有 {‘auto’, ‘ball_tree’, ‘kd_tree’, ‘brute’},一般情况下选择auto就可以,它会自动进行选择最合适的算法。
#KNN和限定半径最近邻法使用的算法algorithm,默认三种brute蛮力实现,‘kd_tree’对应KD树实现,‘ball_tree’对应球树实现, ‘auto’则会在上面三种算法中做权衡
#
knn.fit(X_train,y_train)#训练数据
attack_predict=knn.predict(X_test)#预测数据
N_true=metrics.confusion_matrix(y_test,attack_predict)[0][0]
N_false=metrics.confusion_matrix(y_test,attack_predict)[0][1]
Y_true=metrics.confusion_matrix(y_test,attack_predict)[1][1]
Y_false=metrics.confusion_matrix(y_test,attack_predict)[1][0]
num=N_true+N_false+Y_true+Y_false
print("正确分类实例:",N_true+Y_true," ",(N_true+Y_true)/num*100,"%")
print("错误分类实例:",N_false+Y_false," ",(N_false+Y_false)/num*100,"%")
print("kappa系数:",metrics.cohen_kappa_score(y_test,attack_predict))
#print("平均绝对误差:",metrics.mean_absolute_error(y_test,attack_predict))
#print("均方根误差:",metrics.mean_squared_error(y_test,attack_predict))
print("预测样本总数:",num,'\n')
print(" TP:",N_true/num*100,'%\n',
"FP",N_false/num*100,'%\n',
"TN",Y_true/num*100,'%\n',
"FN",Y_false/num*100,'%\n')
print("分类报告:")
print(metrics.classification_report(y_test,attack_predict))
print("混淆矩阵:\n")
print(metrics.confusion_matrix(y_test,attack_predict))
print("分类准确度:")
print(metrics.accuracy_score(y_test, attack_predict))
end = time.clock()
print ("运行时间:",end-start,'s') | [
"noreply@github.com"
] | asd14277.noreply@github.com |
1d2c819b23516b160a1928440e6c4cdafbdb9373 | 0afeec011169ad8810a074b82336e4e45ddd022c | /mysite/blog/forms.py | 9b4caf7df6d44a27dcfb0fd6884d048cd8c576a3 | [] | no_license | Radhi13/PCP--Project | aaa0c3cad07b9719d0082636dd01328d87c49c25 | e650894d0a100622111ea6a79441f0726f101663 | refs/heads/master | 2021-08-31T15:42:58.562069 | 2017-12-21T22:56:23 | 2017-12-21T22:56:23 | 115,054,149 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 210 | py | from django import forms
from .models import Post
class PostForm(forms.ModelForm):
class Meta:
model = Post
fields = [
"title",
"body",
] | [
"radhika.hegde13@gmail.com"
] | radhika.hegde13@gmail.com |
00ab9fc7105908e5066353d20cdeb40bcfc5caab | e4e89c9f7139d09344b139883d44e8fa8b6eecf7 | /pe_reader/section_type.py | 610783714a02922f3b8179447b182b43c04dae67 | [] | no_license | huhuang03/pe-reader.py | c13b4efe8f281591778b3effd97ce190c5221a56 | 5272e0b949226d00b9c14ae7d39eb792c47dbe9c | refs/heads/master | 2022-12-10T15:18:38.764075 | 2020-08-31T09:59:02 | 2020-08-31T09:59:02 | 291,245,810 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,490 | py | class SectionType():
def __init__(self, value, name, meaning):
super().__init__()
self.value = value
self.name = name
self.meaning = meaning
s_null = SectionType(0x0, "SHT_NULL", "Section header table entry unused")
s_progbits = SectionType(0x1, "SHT_PROGBITS", "Program data")
s_symtab = SectionType(0x2, "SHT_SYMTAB", "Symbol table")
s_strtab = SectionType(0x3, "SHT_STRTAB", "String table")
s_rela = SectionType(0x4, "SHT_RELA", "Relocation entries with addends")
s_hash = SectionType(0x5, "SHT_HASH", "Symbol hash table")
s_dynamic = SectionType(0x6, "SHT_DYNAMIC", "Dynamic linking information")
s_note = SectionType(0x7, "SHT_NOTE", "Notes")
s_nobits = SectionType(0x8, "SHT_NOBITS", "Program space with no data (bss)")
s_rel = SectionType(0x9, "SHT_REL", "Relocation entries, no addends")
s_shlib = SectionType(0xa, "SHT_SHLIB", "Reserved")
s_dynsym = SectionType(0xb, "SHT_DYNSYM", "Dynamic linker symbol table")
s_init_array = SectionType(0xe, "SHT_INIT_ARRAY", "Array of constructors")
s_fini_array = SectionType(0xf, "SHT_FINI_ARRAY", "Array of destructors")
s_preinit_array = SectionType(0x10, "SHT_PREINIT_ARRAY", "Array of pre-constructors")
s_group = SectionType(0x11, "SHT_GROUP", "Section group")
s_symtab_shndx = SectionType(0x12, "SHT_SYMTAB_SHNDX", "Extended section indices")
s_num = SectionType(0x13, "SHT_NUM", "Number of defined types.")
s_loos = SectionType(0x60000000, "SHT_LOOS", "Start OS-specific.") | [
"york@duomai.com"
] | york@duomai.com |
10a99eb8e1f2e8748857d82f10a629bf3a5c7775 | b429676888da0b88e2099810a9add20e71cd92f2 | /Python/Big-O calculation.py | 32aa76965a18b31ac384e56bd43929a20bcab721 | [] | no_license | meesont/RGSW | 4368223d81a6b7588580ef15a908000953422003 | 10c0044b2ee9cbb69c0ad6d0e152ad53c1a18f41 | refs/heads/master | 2022-12-23T02:01:13.059192 | 2019-07-03T15:13:53 | 2019-07-03T15:13:53 | 118,895,371 | 1 | 0 | null | 2022-12-09T02:14:46 | 2018-01-25T09:59:17 | Python | UTF-8 | Python | false | false | 869 | py | # This program counts the number of print statements
# executed for a list of a given size
# This gives a measure of how many print statements are executed
# The number of times the loops are executed is more significant
#than the number of statements in each loop
def bigO(aList):
numberOfPrints = 0
n = len(aList)
for i in range(0,n):
print("In outer loop: ",aList[i])
numberOfPrints = numberOfPrints + 1
for j in range(int(n/2)):
print(" In inner loop: ",aList[j])
numberOfPrints = numberOfPrints + 1
print("\n")
print("number Of Print statements executed: ", numberOfPrints)
numberOfPrints = numberOfPrints + 1
print("\nAdd an extra one, so total =: ", numberOfPrints)
listOfItems = [1,2,3,4,5,6]
bigO(listOfItems)
print ("length of list: ", len(listOfItems)) | [
"13624619+meesont@users.noreply.github.com"
] | 13624619+meesont@users.noreply.github.com |
0ffaf2057456cc19aef95d11df83f021238ebf22 | 3711a2379e0c40e35e5295a6178a8693c6869cf4 | /oas/python-flask-server/jatdb_server/models/universal_resource.py | 0009119f46ef4ce1b184babceaceb74b7303f80b | [
"MIT"
] | permissive | NGenetzky/jatdb | dfd4f8d62d6e0f1d1c03a7aa31a804f3f5bf55c5 | 518e0cedca1b61b8a744aef5e02255d8501bf8eb | refs/heads/master | 2021-09-27T17:47:59.480796 | 2018-11-07T05:08:56 | 2018-11-07T05:08:56 | 116,591,381 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,801 | py | # coding: utf-8
from __future__ import absolute_import
from datetime import date, datetime # noqa: F401
from typing import List, Dict # noqa: F401
from jatdb_server.models.base_model_ import Model
from jatdb_server import util
class UniversalResource(Model):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, uri=None, uuid=None, date=None): # noqa: E501
"""UniversalResource - a model defined in Swagger
:param uri: The uri of this UniversalResource. # noqa: E501
:type uri: str
:param uuid: The uuid of this UniversalResource. # noqa: E501
:type uuid: str
:param date: The date of this UniversalResource. # noqa: E501
:type date: date
"""
self.swagger_types = {
'uri': str,
'uuid': str,
'date': date
}
self.attribute_map = {
'uri': 'uri',
'uuid': 'uuid',
'date': 'date'
}
self._uri = uri
self._uuid = uuid
self._date = date
@classmethod
def from_dict(cls, dikt):
"""Returns the dict as a model
:param dikt: A dict.
:type: dict
:return: The UniversalResource of this UniversalResource. # noqa: E501
:rtype: UniversalResource
"""
return util.deserialize_model(dikt, cls)
@property
def uri(self):
"""Gets the uri of this UniversalResource.
:return: The uri of this UniversalResource.
:rtype: str
"""
return self._uri
@uri.setter
def uri(self, uri):
"""Sets the uri of this UniversalResource.
:param uri: The uri of this UniversalResource.
:type uri: str
"""
if uri is None:
raise ValueError("Invalid value for `uri`, must not be `None`") # noqa: E501
self._uri = uri
@property
def uuid(self):
"""Gets the uuid of this UniversalResource.
:return: The uuid of this UniversalResource.
:rtype: str
"""
return self._uuid
@uuid.setter
def uuid(self, uuid):
"""Sets the uuid of this UniversalResource.
:param uuid: The uuid of this UniversalResource.
:type uuid: str
"""
self._uuid = uuid
@property
def date(self):
"""Gets the date of this UniversalResource.
:return: The date of this UniversalResource.
:rtype: date
"""
return self._date
@date.setter
def date(self, date):
"""Sets the date of this UniversalResource.
:param date: The date of this UniversalResource.
:type date: date
"""
self._date = date
| [
"nathan@genetzky.us"
] | nathan@genetzky.us |
b987b35bd1a590586c2780606740f75eb2ba793d | 7981a91c1e60be98e03deb0e7477496754ebab72 | /login/forms.py | 4b23645fd67aa6ac98e77d91ce34933966510b06 | [] | no_license | zhangwa5/mysite4 | ffec03a7d996d27bcdd558d14ad5541f49352a67 | acfa2c1884d57729b85e41c06fedfcd322f2cc21 | refs/heads/master | 2020-06-11T19:37:42.946082 | 2019-06-27T09:14:44 | 2019-06-27T09:14:44 | 194,063,128 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 287 | py | from django import forms
from captcha.fields import CaptchaField
class UserForm(forms.Form):
username = forms.CharField(label="用户名", max_length=128)
password = forms.CharField(label="密码", max_length=256, widget=forms.PasswordInput)
captcha = CaptchaField(label="验证码")
| [
"522232813@qq.com"
] | 522232813@qq.com |
65ad7779c0771342e26563f4829f19544fe0d22a | b27dba9265e3fc46453293af33e215784cc60d15 | /pype/plugins/standalonepublisher/publish/extract_review.py | fbc14785a4091115ef61a811360275e740e545fd | [
"MIT"
] | permissive | tws0002/pype | f8f655f51282128b7ac42df77fca58957f416dcd | 80b1aad9990f6c7efabf0430a3da6633054bf4a8 | refs/heads/develop | 2020-04-29T21:51:22.583645 | 2019-12-09T04:23:17 | 2019-12-09T04:23:17 | 176,426,875 | 0 | 0 | MIT | 2019-12-09T04:23:18 | 2019-03-19T04:56:38 | Python | UTF-8 | Python | false | false | 7,630 | py | import os
import tempfile
import pyblish.api
from pype.vendor import clique
import pype.api
class ExtractReviewSP(pyblish.api.InstancePlugin):
"""Extracting Review mov file for Ftrack
Compulsory attribute of representation is tags list with "review",
otherwise the representation is ignored.
All new represetnations are created and encoded by ffmpeg following
presets found in `pype-config/presets/plugins/global/publish.json:ExtractReview:outputs`. To change the file extension
filter values use preset's attributes `ext_filter`
"""
label = "Extract Review SP"
order = pyblish.api.ExtractorOrder + 0.02
families = ["review"]
hosts = ["standalonepublisher"]
def process(self, instance):
# adding plugin attributes from presets
presets = instance.context.data["presets"]
try:
publish_presets = presets["plugins"]["standalonepublisher"]["publish"]
plugin_attrs = publish_presets[self.__class__.__name__]
except KeyError:
raise KeyError("Preset for plugin \"{}\" are not set".format(
self.__class__.__name__
))
output_profiles = plugin_attrs.get("outputs", {})
fps = instance.data.get("fps")
start_frame = instance.data.get("frameStart")
self.log.debug("Families In: `{}`".format(instance.data["families"]))
# get specific profile if was defined
specific_profiles = instance.data.get("repreProfiles")
new_repres = []
# filter out mov and img sequences
for repre in instance.data["representations"]:
tags = repre.get("tags", [])
if "review" not in tags:
continue
staging_dir = repre["stagingDir"]
for name in specific_profiles:
profile = output_profiles.get(name)
if not profile:
self.log.warning(
"Profile \"{}\" was not found in presets".format(name)
)
continue
self.log.debug("Processing profile: {}".format(name))
ext = profile.get("ext", None)
if not ext:
ext = "mov"
self.log.debug((
"`ext` attribute not in output profile \"{}\"."
" Setting to default ext: `mov`"
).format(name))
if isinstance(repre["files"], list):
collections, remainder = clique.assemble(repre["files"])
full_input_path = os.path.join(
staging_dir,
collections[0].format("{head}{padding}{tail}")
)
filename = collections[0].format('{head}')
if filename.endswith("."):
filename = filename[:-1]
else:
full_input_path = os.path.join(staging_dir, repre["files"])
filename = repre["files"].split(".")[0]
# prepare output file
repr_file = filename + "_{0}.{1}".format(name, ext)
out_stagigng_dir = tempfile.mkdtemp(prefix="extract_review_")
full_output_path = os.path.join(out_stagigng_dir, repr_file)
self.log.info("input {}".format(full_input_path))
self.log.info("output {}".format(full_output_path))
repre_new = repre.copy()
new_tags = [x for x in tags if x != "delete"]
p_tags = profile.get("tags", [])
self.log.info("p_tags: `{}`".format(p_tags))
for _tag in p_tags:
if _tag not in new_tags:
new_tags.append(_tag)
self.log.info("new_tags: `{}`".format(new_tags))
input_args = []
# overrides output file
input_args.append("-y")
# preset's input data
input_args.extend(profile.get("input", []))
# necessary input data
# adds start arg only if image sequence
if isinstance(repre["files"], list):
input_args.extend([
"-start_number {}".format(start_frame),
"-framerate {}".format(fps)
])
input_args.append("-i {}".format(full_input_path))
output_args = []
# preset's output data
output_args.extend(profile.get("output", []))
if isinstance(repre["files"], list):
# set length of video by len of inserted files
video_len = len(repre["files"])
else:
video_len = repre["frameEnd"] - repre["frameStart"] + 1
output_args.append(
"-frames {}".format(video_len)
)
# letter_box
lb_string = (
"-filter:v "
"drawbox=0:0:iw:round((ih-(iw*(1/{0})))/2):t=fill:c=black,"
"drawbox=0:ih-round((ih-(iw*(1/{0})))/2):iw:"
"round((ih-(iw*(1/{0})))/2):t=fill:c=black"
)
letter_box = profile.get("letter_box", None)
if letter_box:
output_args.append(lb_string.format(letter_box))
# output filename
output_args.append(full_output_path)
ffmpeg_path = os.getenv("FFMPEG_PATH", "")
if ffmpeg_path:
ffmpeg_path += "/ffmpeg"
else:
ffmpeg_path = "ffmpeg"
mov_args = [
ffmpeg_path,
" ".join(input_args),
" ".join(output_args)
]
subprcs_cmd = " ".join(mov_args)
# run subprocess
self.log.debug("Executing: {}".format(subprcs_cmd))
output = pype.api.subprocess(subprcs_cmd)
self.log.debug("Output: {}".format(output))
# create representation data
repre_new.update({
"name": name,
"ext": ext,
"files": repr_file,
"stagingDir": out_stagigng_dir,
"tags": new_tags,
"outputName": name,
"startFrameReview": 1,
"endFrameReview": video_len
})
# cleanup thumbnail from new repre
if repre_new.get("thumbnail"):
repre_new.pop("thumbnail")
if "thumbnail" in repre_new["tags"]:
repre_new["tags"].remove("thumbnail")
# adding representation
self.log.debug("Adding: {}".format(repre_new))
# cleanup repre from preview
if "preview" in repre:
repre.pop("preview")
if "preview" in repre["tags"]:
repre["tags"].remove("preview")
new_repres.append(repre_new)
for repre in instance.data["representations"]:
if "delete" in repre.get("tags", []):
instance.data["representations"].remove(repre)
for repre in new_repres:
self.log.debug("Adding repre: \"{}\"".format(
repre
))
instance.data["representations"].append(repre)
| [
"jakub.trllo@gmail.com"
] | jakub.trllo@gmail.com |
168c735848987ad72b686f76a95014ee5340c596 | 1489e8f3dac1099bf6cce9cc6977a4d5c209f000 | /DB_Tests/apps.py | f1028a269753970ea5532adbd25308f86c9b25ac | [] | no_license | I-Need-A-Forecast/Django-server-spike-v2 | b80c056f5a2f3385ab7646581edca948eaa0a8a2 | 894a4b674a19e81563103b212368c9479be3c629 | refs/heads/master | 2020-04-18T19:45:03.249074 | 2019-02-19T16:57:45 | 2019-02-19T16:57:45 | 167,719,911 | 0 | 0 | null | 2019-01-28T17:45:17 | 2019-01-26T17:51:33 | Python | UTF-8 | Python | false | false | 169 | py | from django.apps import AppConfig
class DbTestsConfig(AppConfig):
name = 'DB_Tests'
#class current_observationConfig(AppConfig):
# name = 'current_observation' | [
"Colin.Oraskovich@gmail.com"
] | Colin.Oraskovich@gmail.com |
819ff6ab0594922528da4d79e8be19d32e18fad2 | 73a0f661f1423d63e86489d4b2673f0103698aab | /python/oneflow/test/modules/test_contiguous.py | 4d589b551f159a895bb5b71bb58e4fb4ae3bb792 | [
"Apache-2.0"
] | permissive | Oneflow-Inc/oneflow | 4fc3e081e45db0242a465c4330d8bcc8b21ee924 | 0aab78ea24d4b1c784c30c57d33ec69fe5605e4a | refs/heads/master | 2023-08-25T16:58:30.576596 | 2023-08-22T14:15:46 | 2023-08-22T14:15:46 | 81,634,683 | 5,495 | 786 | Apache-2.0 | 2023-09-14T09:44:31 | 2017-02-11T06:09:53 | C++ | UTF-8 | Python | false | false | 4,922 | py | """
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
from collections import OrderedDict
from random import shuffle
import numpy as np
from oneflow.test_utils.automated_test_util import *
from oneflow.test_utils.test_util import GenArgList
import oneflow.unittest
import oneflow as flow
@flow.unittest.skip_unless_1n1d()
class TestContiguous(flow.unittest.TestCase):
@autotest(n=5)
def test_transpose_with_random_data(test_case):
device = random_device()
x = random_tensor(ndim=4).to(device)
y = torch.transpose(x, dim0=random(1, 3).to(int), dim1=random(1, 3).to(int))
z = y.contiguous()
return z
@autotest(n=5, auto_backward=False)
def test_transpose_with_bool_data(test_case):
device = random_device()
x = random_tensor(ndim=4, requires_grad=False).to(device).to(torch.bool)
y = torch.transpose(x, dim0=random(1, 3).to(int), dim1=random(1, 3).to(int))
z = y.contiguous()
return z
@autotest(n=5, auto_backward=False)
def test_transpose_with_int_data(test_case):
device = random_device()
x = random_tensor(ndim=4, requires_grad=False).to(device).to(torch.int)
y = torch.transpose(x, dim0=random(1, 3).to(int), dim1=random(1, 3).to(int))
z = y.contiguous()
return z
@autotest(n=5, auto_backward=False)
def test_contiguous_with_half_data(test_case):
device = random_device()
x = random_tensor(ndim=4, requires_grad=False).to(device).to(torch.float16)
y = torch.transpose(x, dim0=random(1, 3).to(int), dim1=random(1, 3).to(int))
z = y.contiguous()
return z
@autotest(n=10, check_graph=True)
def test_permute2d_tensor_with_random_data(test_case):
device = random_device()
ndim = 2
permute_list = [0, 1]
shuffle(permute_list)
x = random_tensor(
ndim=ndim, dim0=random(1, 32).to(int), dim1=random(1, 59).to(int),
).to(device)
y = x.permute(permute_list)
z = y.contiguous()
return z
@autotest(n=10, check_graph=True)
def test_permute3d_tensor_with_random_data(test_case):
device = random_device()
ndim = 3
permute_list = [0, 1, 2]
shuffle(permute_list)
x = random_tensor(
ndim=ndim,
dim0=random(1, 7).to(int),
dim1=random(1, 15).to(int),
dim2=random(1, 9).to(int),
).to(device)
y = x.permute(permute_list)
z = y.contiguous()
return z
@autotest(n=10, check_graph=True)
def test_permute4d_tensor_with_random_data(test_case):
device = random_device()
ndim = 4
permute_list = [0, 1, 2, 3]
shuffle(permute_list)
x = random_tensor(
ndim=ndim,
dim0=random(1, 7).to(int),
dim1=random(1, 15).to(int),
dim2=random(1, 9).to(int),
dim3=random(1, 19).to(int),
).to(device)
y = x.permute(permute_list)
z = y.contiguous()
return z
@profile(torch.Tensor.contiguous)
def profile_contiguous(test_case):
x = torch.ones(32, 3, 128, 128)
x.contiguous()
def _test_inplace_contiguous(test_case, device):
arr = np.random.randn(4, 5, 6, 7).astype(np.float32)
input = flow.tensor(arr, device=device)
x = input.permute(0, 3, 2, 1) # x is non-contiguous tensor
test_case.assertTrue(x.is_contiguous() == False)
# y1 is normal version of tensor contiguous
y1 = x.contiguous()
# y2 is inplace version of tensor contiguous
y2 = x.contiguous_()
test_case.assertTrue(np.array_equal(y1.cpu().numpy(), y2.cpu().numpy()))
test_case.assertTrue(id(x) != id(y1))
test_case.assertTrue(id(x) == id(y2))
test_case.assertTrue(x.is_contiguous() == True)
test_case.assertTrue(y1.is_contiguous() == True)
test_case.assertTrue(y2.is_contiguous() == True)
@flow.unittest.skip_unless_1n1d()
class TestInplaceContiguous(flow.unittest.TestCase):
def test_inplace_contiguous(test_case):
arg_dict = OrderedDict()
arg_dict["test_fun"] = [
_test_inplace_contiguous,
]
arg_dict["device"] = ["cpu", "cuda"]
for arg in GenArgList(arg_dict):
arg[0](test_case, *arg[1:])
if __name__ == "__main__":
unittest.main()
| [
"noreply@github.com"
] | Oneflow-Inc.noreply@github.com |
41d172b6f2eb4d4875242451609b434f84844125 | 6f0c1766e7a6ed608a386a0b7d1a5708abe34b63 | /transforms/__init__.py | 4d9a4febb3d8cfadb6738d9c806939d1389a2393 | [
"MIT"
] | permissive | borhanMorphy/facial-keypoints-detection | 05065bf66abf23bf268084f0dbe379d96561190c | d2a0fe077d04ae6f2701af107b8322566f27a432 | refs/heads/master | 2022-12-21T05:53:31.571142 | 2020-09-15T15:16:08 | 2020-09-15T15:16:08 | 290,889,742 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 71 | py | from torchvision.transforms import *
from .train import TrainTransforms | [
"borhano.f.42@gmail.com"
] | borhano.f.42@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.