seq_id stringlengths 4 11 | text stringlengths 113 2.92M | repo_name stringlengths 4 125 ⌀ | sub_path stringlengths 3 214 | file_name stringlengths 3 160 | file_ext stringclasses 18
values | file_size_in_byte int64 113 2.92M | program_lang stringclasses 1
value | lang stringclasses 93
values | doc_type stringclasses 1
value | stars int64 0 179k ⌀ | dataset stringclasses 3
values | pt stringclasses 78
values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
452414635 | import pandas as pd
from datetime import datetime
# Covert UNIX timestamp to regular timestamp
def toTimestamp(ts):
ts = int(ts)
return datetime.utcfromtimestamp(ts).strftime('%Y-%m-%d %H:%M:%S')
# Save pd.DataFrame as .csv
def save(df, filepath):
df.to_csv(filepath)
# Load stored dataframes (with specified structure)
def load_DataFrame(filepath, kind):
df = pd.read_csv(filepath, index_col=False)
del df['Unnamed: 0']
if kind =="posts":
if list(df.columns) == list(['ID', 'Title', 'Text', 'Score', 'UpvoteRatio', 'NumberComents', 'Author', 'Timestamp']):
return df
elif kind == "comments":
if list(df.columns) == list(['ID', 'Text', 'Score', 'Author', 'isRoot', 'Timestamp', 'SubmissionID']):
return df
else:
print("Error, structure not matching")
| marcvernet31/redditScrapper | auxiliar.py | auxiliar.py | py | 838 | python | en | code | 2 | github-code | 90 |
21177786097 |
import os
import json
import argparse
from six.moves import cPickle, xrange
from collections import defaultdict
import pdb
def precook(s, n=4, out=False):
"""
Takes a string as input and returns an object that can be given to
either cook_refs or cook_test. This is optional: cook_refs and cook_test
can take string arguments as well.
:param s: string : sentence to be converted into ngrams
:param n: int : number of ngrams for which representation is calculated
:return: term frequency vector for occuring ngrams
"""
words = s.split()
counts = defaultdict(int)
for k in xrange(1,n+1):
for i in xrange(len(words)-k+1):
ngram = tuple(words[i:i+k])
counts[ngram] += 1
return counts
def cook_refs(refs, n=4): ## lhuang: oracle will call with "average"
'''Takes a list of reference sentences for a single segment
and returns an object that encapsulates everything that BLEU
needs to know about them.
:param refs: list of string : reference sentences for some image
:param n: int : number of ngrams for which (ngram) representation is calculated
:return: result (list of dict)
'''
return [precook(ref, n) for ref in refs]
def create_crefs(refs):
crefs = []
for ref in refs:
# ref is a list of 5 captions
crefs.append(cook_refs(ref))
return crefs
def compute_doc_freq(crefs):
'''
Compute term frequency for reference data.
This will be used to compute idf (inverse document frequency later)
The term frequency is stored in the object
:return: None
'''
document_frequency = defaultdict(float)
for refs in crefs:
# refs, k ref captions of one image
for ngram in set([ngram for ref in refs for (ngram,count) in ref.items()]):
document_frequency[ngram] += 1
# maxcounts[ngram] = max(maxcounts.get(ngram,0), count)
return document_frequency
def build_dict(videos, wtoi, params):
wtoi['<eos>'] = 0
count_vid = 0
refs_words = []
refs_idxs = []
for vid in videos:
ref_words = []
ref_idxs = []
for cap in videos[vid]['final_captions']:
tmp_tokens = cap
tmp_tokens = [_ if _ in wtoi else 'UNK' for _ in tmp_tokens]
ref_words.append(' '.join(tmp_tokens))
ref_idxs.append(' '.join([str(wtoi[_]) for _ in tmp_tokens]))
refs_words.append(ref_words)
refs_idxs.append(ref_idxs)
count_vid += 1
print('total videos:', count_vid)
ngram_words = compute_doc_freq(create_crefs(refs_words))
ngram_idxs = compute_doc_freq(create_crefs(refs_idxs))
return ngram_words, ngram_idxs, count_vid
def main(params):
videos = json.load(open(params['input_json'], 'r'))
itow = json.load(open(params['info_json'], 'r'))['ix_to_word']
wtoi = {w:i for i,w in itow.items()}
ngram_words, ngram_idxs, ref_len = build_dict(videos, wtoi, params)
#cPickle.dump({'document_frequency': ngram_words, 'ref_len': ref_len}, open(params['output_pkl']+'-words.p','w'), protocol=cPickle.HIGHEST_PROTOCOL)
cPickle.dump({'document_frequency': ngram_idxs, 'ref_len': ref_len}, open(params['output_pkl']+'-idxs.p','wb'), protocol=cPickle.HIGHEST_PROTOCOL)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
# input json
parser.add_argument('--input_json', default='caption_train_english.json', help='input json file to process into hdf5')
parser.add_argument('--info_json', default='info_train_english.json', help='info json file')
parser.add_argument('--output_pkl', default='vatex_training', help='output pickle file')
#parser.add_argument('--split', default='train', help='test, val, train, all')
args = parser.parse_args()
params = vars(args) # convert to ordinary dict
main(params) | c3cannon/multilingual_video_captioning | prepro_tokens.py | prepro_tokens.py | py | 3,671 | python | en | code | 1 | github-code | 90 |
38334885781 | from tkinter import *
from tkinter import filedialog,messagebox
from os import getlogin,path
import piexif
from PIL import Image
from adjust import Adjust
from filters import Filters
from rotate import Rotate
class EditingButtons(Frame):
def __init__(self,master=None):
Frame.__init__(self,master=master , bg='#3d4453')
menu = Menu(self.master)
self.master.config(menu=menu)
fileMenu = Menu(menu , tearoff=0)
fileMenu.add_command(label="Import" , command = self.NewPictureImported)
fileMenu.add_command(label='Save' , command = self.SavePicture)
fileMenu.add_command(label='Save As', command = self.SavePictureAs)
fileMenu.add_separator()
fileMenu.add_command(label="Exit" , command = self.Exit)
menu.add_cascade(label="File", menu=fileMenu)
self.RotateButton = Button(self , text='Rotate' )
self.DrawButton = Button(self , text='Draw' )
self.CropButton = Button(self , text='Crop' )
self.AdjustButton = Button(self , text='Adjust' )
self.FiltersButton = Button(self , text='Filter' )
self.ClearButton = Button(self , text='Clear' )
self.UndoButton = Button(self, text='Undo' )
self.ExtractExifButton = Button(self, text='Extract Exif' )
self.RotateButton .bind("<ButtonRelease-1>",self.Rotate )
self.DrawButton .bind("<ButtonRelease-1>",self.StartDraw )
self.CropButton .bind("<ButtonRelease-1>",self.StartCroping)
self.AdjustButton .bind("<ButtonRelease-1>",self.EditAdjust )
self.FiltersButton .bind("<ButtonRelease-1>",self.ApplyFilters)
self.ClearButton .bind("<ButtonRelease-1>",self.Clear )
self.UndoButton .bind("<ButtonRelease-1>",self.Undo )
self.ExtractExifButton.bind("<ButtonRelease-1>",self.ExtractExif )
self.ExtractExifButton.pack(side=BOTTOM)
self.RotateButton .pack(side=LEFT)
self.DrawButton .pack(side=LEFT)
self.CropButton .pack(side=LEFT)
self.AdjustButton .pack(side=LEFT)
self.FiltersButton .pack(side=LEFT)
self.UndoButton .pack(side=LEFT)
self.ClearButton .pack()
self.FilesTypesImport = [("All Picture file",("*.bmp","*.png","*.jpeg","*.jpg","*.jpe","*.ico","*.tiff",".*tif","*.webp")) ,("Bitmap Files","*.bmp"),("PNG (*.png)","*.png") , ("JPEG (*.jpg,*.jpeg,*.jpe)",("*.jpeg","*.jpg","*.jpe")) , ("ICO (*.ico)","*.ico") , ("WEBP (*.webp)","*.webp"),("TIFF",("*.tiff",".*tif"))]
self.FilesTypesSave = [("Bitmap Files","*.bmp"),("PNG (*.png)","*.png") , ("JPEG (*.jpg,*.jpeg,*.jpe)",("*.jpeg","*.jpg","*.jpe")) , ("ICO (*.ico)","*.ico") , ("WEBP (*.webp)","*.webp"),("TIFF",("*.tiff",".*tif")) ]
self.Default = path.join('C:','Users',getlogin(),'Desktop')
self.ImageIsSelected = False
self.ChangesSaved = False
self.master.protocol("WM_DELETE_WINDOW", self.BeforeClosing)
def NewPictureImported(self):
filename = filedialog.askopenfilename( initialdir=self.Default,filetypes=self.FilesTypesImport)
image = Image.open(filename)
if image is not None:
self.master.filename = filename
self.master.OriginalImage = image.copy()
self.master.EditedImage = image.copy()
self.master.BackUpImage = image.copy()
self.type = image.format
self.master.viewimage.ShowImage()
self.ImageIsSelected = True
def SavePicture(self):
if self.ImageIsSelected:
SavedImage = self.master.EditedImage
SavedImage.save(self.master.filename,quality=100, optimize=True)
self.ChangesSaved = True
else:
messagebox.showinfo(title='Image Editor', message='Please import a picrure first.')
self.NewPictureImported()
def SavePictureAs(self):
if self.ImageIsSelected:
try:
filename = filedialog.asksaveasfilename(initialdir= self.Default ,filetypes= self.FilesTypesSave , defaultextension='.png' )
self.master.EditedImage.save(filename, quality=100, optimize=True)
self.master.filename = filename
self.ChangesSaved = True
except ValueError as e:
pass
else:
messagebox.showinfo(title='Image Editor', message='Please import a picrure first.')
self.NewPictureImported()
def Exit(self):
self.BeforeClosing()
def Rotate(self,event):
if self.winfo_containing(event.x_root, event.y_root) == self.RotateButton:
if self.ImageIsSelected:
if self.master.DrawStatus:
self.master.viewimage.DeactivateDraw()
if self.master.CropStatus:
self.master.viewimage.DeactivateCrop()
self.master.rotate_frame = Rotate(master=self.master)
self.master.rotate_frame.grab_set()
else:
messagebox.showinfo(title='Image Editor', message='Please import a picrure first.')
self.NewPictureImported()
def StartDraw(self,event):
if self.winfo_containing(event.x_root, event.y_root) == self.DrawButton:
if self.ImageIsSelected:
if self.master.CropStatus:
self.master.viewimage.DeactivateCrop()
if self.master.DrawStatus:
self.master.viewimage.DeactivateDraw()
else:
self.master.viewimage.StartDrawing()
else:
messagebox.showinfo(title='Image Editor', message='Please import a picrure first.')
self.NewPictureImported()
def StartCroping(self,event):
if self.winfo_containing(event.x_root, event.y_root) == self.CropButton:
if self.ImageIsSelected:
if self.master.DrawStatus:
self.master.viewimage.DeactivateDraw()
if self.master.CropStatus:
self.master.viewimage.DeactivateCrop()
else:
self.master.viewimage.StarCrop()
else:
messagebox.showinfo(title='Image Editor', message='Please import a picrure first.')
self.NewPictureImported()
def EditAdjust(self,event):
if self.winfo_containing(event.x_root, event.y_root) == self.AdjustButton:
if self.ImageIsSelected:
if self.master.DrawStatus:
self.master.viewimage.DeactivateDraw()
if self.master.CropStatus:
self.master.viewimage.DeactivateCrop()
self.master.adjust_frame = Adjust(master=self.master)
self.master.adjust_frame.grab_set()
else:
messagebox.showinfo(title='Image Editor', message='Please import a picrure first.')
self.NewPictureImported()
def ApplyFilters(self,event):
if self.winfo_containing(event.x_root, event.y_root) == self.FiltersButton:
if self.ImageIsSelected:
if self.master.DrawStatus:
self.master.viewimage.DeactivateDraw()
if self.master.CropStatus:
self.master.viewimage.DeactivateCrop()
self.master.filters_frame = Filters(master=self.master)
self.master.filters_frame.grab_set()
else:
messagebox.showinfo(title='Image Editor', message='Please import a picrure first.')
self.NewPictureImported()
def Clear(self, event):
if self.winfo_containing(event.x_root, event.y_root) == self.ClearButton:
if self.ImageIsSelected:
if self.master.DrawStatus:
self.master.viewimage.DeactivateDraw()
if self.master.CropStatus:
self.master.viewimage.DeactivateCrop()
self.master.EditedImage = self.master.OriginalImage.copy()
self.master.viewimage.ClearCanvas()
else:
messagebox.showinfo(title='Image Editor', message='Please import a picrure first.')
self.NewPictureImported()
def Undo(self,event):
if self.winfo_containing(event.x_root , event.y_root) == self.UndoButton:
if self.ImageIsSelected:
if self.master.DrawStatus:
self.master.viewimage.DeactivateDraw()
if self.master.CropStatus:
self.master.viewimage.DeactivateCrop()
self.master.EditedImage = self.master.BackUpImage
self.master.viewimage.ClearCanvas()
else:
messagebox.showinfo(title='Image Editor', message='Please import a picrure first.')
self.NewPictureImported()
def ExtractExif(self,event):
if self.winfo_containing(event.x_root , event.y_root) == self.ExtractExifButton:
if self.ImageIsSelected:
if self.master.DrawStatus:
self.master.viewimage.DeactivateDraw()
if self.master.CropStatus:
self.master.viewimage.DeactivateCrop()
if self.type in ('JPEG','WEBP','TIFF'):
filename = self.master.filename.split("/")[-1]
filename = filename.split(".")[0]
try:
exif_dict = piexif.load(self.master.filename)
thumbnail = exif_dict.pop('thumbnail')
if thumbnail is not None:
with open(path.join('C:\\','Users',getlogin(),'AppData','Local','Temp','thumbnail.jpg'), 'wb') as f:
f.write(thumbnail)
with open(path.join('C:\\','Users',getlogin(),'Desktop\\',(filename+".txt")), 'w') as f:
checkvalue = 0
for key,value in exif_dict.items():
if exif_dict[key]:
pass
else:
checkvalue = checkvalue+1
if checkvalue == 5:
f.write("Picture doesn't have exif\n\n")
for ifd in exif_dict:
f.write(ifd+":")
f.write('\n')
for tag in exif_dict[ifd]:
tag_name = piexif.TAGS[ifd][tag]["name"]+": "
tag_value = exif_dict[ifd][tag]
if isinstance(tag_value, bytes):
tag_value = tag_value.decode("utf-8")
data = tag_name+str(tag_value)
f.write(data)
f.write("\n")
f.write('\n')
messagebox.showinfo(title='Image Editor', message='Data saved on '+path.join('C:\\','Users',getlogin(),'Desktop\\',(filename+".txt")))
except ValueError:
l = ["0th:\n\n","Exif:\n\n","GPS:\n\n","Interop:\n\n","1st:\n\n"]
with open(path.join('C:\\','Users',getlogin(),'Desktop\\',(filename+".txt")), 'w') as f:
f.write("Picture doesn't have exif\n\n")
f.writelines(l)
messagebox.showinfo(title='Image Editor', message='Data saved on '+path.join('C:\\','Users',getlogin(),'Desktop\\',(filename+".txt")))
else:
messagebox.showinfo(title='Image Editor', message=' Only JPEG, WebP and TIFF pictures formats are supported.')
else:
messagebox.showinfo(title='Image Editor', message='Please import a picrure first.')
self.NewPictureImported()
def BeforeClosing(self):
if self.ImageIsSelected:
if self.ChangesSaved == False:
test = messagebox.askyesnocancel("Image Editor","Do you want to save changes?")
if test:
self.SavePictureAs()
if test == False:
self.master.destroy()
else:
self.master.destroy()
| Tharbouch/ImageEditor | editingbuttons.py | editingbuttons.py | py | 12,807 | python | en | code | 1 | github-code | 90 |
12114578837 | import datetime
from datetime import datetime as dt
from django.conf import settings
from django.contrib import messages
from django.contrib.auth.decorators import login_required
from django.contrib.auth.mixins import UserPassesTestMixin
from django.core.paginator import Paginator
from django.shortcuts import get_object_or_404
from django.urls import reverse
from django.utils.decorators import method_decorator
from django.utils.text import slugify
from django.views.generic import (CreateView, DeleteView, DetailView, FormView,
UpdateView)
from django_filters.views import FilterView
from projects.filters import (ProjectListFilter, ProjectReviewListFilter,
ProjectsOwnedListFilter)
from projects.forms import (ProjectCreateForm, ProjectReviewForm,
ProjectUpdateForm)
from projects.models import Project
from topics.forms import TopicCreateForm
PROJECTS_PAGINATE_BY = 5
TOPICS_PAGINATE_BY = 5
def generate_project_slug(project):
project.slug = f'{slugify(project.title)}'
projects_same_slug = Project.objects.filter(
slug=project.slug
)
if len(projects_same_slug) > 0:
project.slug = "temp"
project.save()
project.slug = "{}_{}".format(
f'{slugify(project.title)}', project.id)
project.save()
return project
@method_decorator(login_required, name='dispatch')
class ProjetCreateView(CreateView):
model = Project
form_class = ProjectCreateForm
template_name = 'projects/pages/project_create.html'
def get_success_url(self) -> str:
return reverse("projects:project_detail", kwargs={"slug": self.object.slug})
def form_valid(self, form):
self.object = form.save(commit=False)
self.object.owner = self.request.user
self.object = generate_project_slug(self.object)
return super().form_valid(form)
@method_decorator(login_required, name='dispatch')
class ProjectUpdateView(UserPassesTestMixin, UpdateView):
model = Project
template_name = 'projects/pages/project_update.html'
form_class = ProjectUpdateForm
def test_func(self):
return (
self.request.user == self.get_object().owner and
self.get_object().is_published == False
)
def get_success_url(self):
return reverse("projects:project_detail", kwargs={'slug': self.object.slug})
def form_valid(self, form):
self.object = form.save(commit=False)
if self.request.user == self.object.owner and not self.object.is_published:
self.object.is_reproved = False
self.object.reproval_reason = ""
self.object = generate_project_slug(self.object)
return super().form_valid(form)
@method_decorator(login_required, name='dispatch')
class ProjectDeleteView(UserPassesTestMixin, DeleteView):
model = Project
template_name = 'projects/pages/project_delete.html'
def test_func(self):
return (
self.request.user == self.get_object().owner and
self.get_object().is_published == False
)
def get_success_url(self) -> str:
return reverse("projects:projects_owned")
class ProjectDetailView(UserPassesTestMixin, DetailView):
model = Project
context_object_name = 'project'
template_name = 'projects/pages/project_detail.html'
paginate_by = TOPICS_PAGINATE_BY
def test_func(self):
return (
self.request.user == self.get_object().owner or
self.request.user.is_staff or
self.get_object().is_published == True
)
def get_context_data(self, *args, **kwargs):
ctx = super().get_context_data(*args, **kwargs)
project = self.get_object()
ctx['is_finished'] = dt.now().date() > project.deadline
ctx['progress'] = round(
(100 * project.collected) / project.goal)
form_data = self.request.session.get('form')
if form_data is not None:
del self.request.session['form']
if self.request.user.is_authenticated:
if form_data is not None:
ctx['form'] = TopicCreateForm(form_data)
else:
ctx['form'] = TopicCreateForm(initial={
'user': self.request.user,
'project': project,
})
ctx['is_withdrawal_available'] = project.is_withdrawal_available()
if not ctx['is_withdrawal_available']:
ctx['withdrawal_available_date'] = (
project.deadline +
datetime.timedelta(
days=settings.DAYS_FOR_WITHDRAWAL_REQUEST)
)
ctx['DAYS_FOR_WITHDRAWAL_REQUEST'] = settings.DAYS_FOR_WITHDRAWAL_REQUEST
if self.request.user.is_staff and not project.is_published:
ctx['approve_form'] = ProjectReviewForm(initial={
'project': project,
'approve': True,
})
topics = project.topic_set.all().order_by('-created')
paginator = Paginator(topics, self.paginate_by)
page = self.request.GET.get('page')
ctx['topics'] = paginator.get_page(page)
ctx['page_obj'] = ctx['topics']
ctx['paginator'] = ctx['topics'].paginator
ctx['is_paginated'] = ctx['paginator'].num_pages > 1
return ctx
class ProjectsListView(FilterView):
model = Project
context_object_name = 'projects'
ordering = ['-created']
template_name = 'projects/pages/project_list.html'
paginate_by = PROJECTS_PAGINATE_BY
filterset_class = ProjectListFilter
def get_queryset(self, *args, **kwargs):
qs = super().get_queryset(*args, **kwargs)
qs = qs.filter(
is_published=True,
)
return qs
@method_decorator(login_required, name='dispatch')
class ProjectsOwnedListView(FilterView):
model = Project
context_object_name = 'projects'
ordering = ['-created']
template_name = 'projects/pages/projects_owned_list.html'
paginate_by = PROJECTS_PAGINATE_BY
filterset_class = ProjectsOwnedListFilter
def get_queryset(self, *args, **kwargs):
qs = super().get_queryset(*args, **kwargs)
qs = qs.filter(
owner=self.request.user
)
return qs
@method_decorator(login_required, name='dispatch')
class ProjectsReviewListView(UserPassesTestMixin, FilterView):
model = Project
context_object_name = 'projects'
ordering = ['-created']
template_name = 'projects/pages/projects_review_list.html'
paginate_by = PROJECTS_PAGINATE_BY
filterset_class = ProjectReviewListFilter
def test_func(self):
return self.request.user.is_staff
def get_queryset(self, *args, **kwargs):
qs = super().get_queryset(*args, **kwargs)
qs = qs.filter(
is_published=False,
)
return qs
@method_decorator(login_required, name='dispatch')
class ReviewProjectView(UserPassesTestMixin, FormView):
form_class = ProjectReviewForm
template_name = 'projects/pages/project_reprove.html'
def test_func(self):
project = get_object_or_404(
Project,
slug=self.kwargs['slug']
)
return (
self.request.user.is_staff and
not project.is_published
)
def get_initial(self):
project = get_object_or_404(
Project,
slug=self.kwargs['slug'],
)
return {
'project': project,
'reproval_reason': project.reproval_reason,
'approve': False,
}
def get_context_data(self, *args, **kwargs):
ctx = super().get_context_data(*args, **kwargs)
ctx['project'] = get_object_or_404(
Project,
slug=self.kwargs['slug'],
)
return ctx
def form_valid(self, form):
project = form.cleaned_data.get('project')
approve = form.cleaned_data.get('approve')
reproval_reason = form.cleaned_data.get('reproval_reason')
if approve:
if dt.now().date() > project.deadline:
messages.error(
self.request, f"Erro ao aprovar o projeto! O projeto \"{project.title}\" já foi encerrado.")
else:
project.is_reproved = False
project.reproval_reason = ""
project.is_published = True
messages.success(
self.request, f"O projeto \"{project.title}\" foi aprovado.")
else:
if not project.is_reproved:
messages.error(
self.request, f"O projeto \"{project.title}\" foi reprovado.")
elif project.reproval_reason != reproval_reason:
messages.error(
self.request, f"O motivo da reprovação do projeto \"{project.title}\" foi atualizado.")
project.is_reproved = True
project.reproval_reason = reproval_reason
project.save()
self.success_url = reverse(
"projects:project_detail",
kwargs={
'slug': project.slug
}
)
return super().form_valid(form)
def form_invalid(self, form):
return super().form_invalid(form)
| cewebbr/mover-se_superacao-coletiva | projects/views/project.py | project.py | py | 9,393 | python | en | code | 0 | github-code | 90 |
19388930638 | import _init_path
from lib.model import PartialUNet, UNet
from lib.utils import to_var
import torchvision_sunner.transforms as sunnertransforms
import torchvision_sunner.data as sunnerData
import torchvision.transforms as transforms
import torch.nn as nn
import numpy as np
import argparse
import torch
import cv2
import os
"""
This script can inpaint the image by using U-Net which adopt partial convolution technique
You can select to assign the mask or not
"""
def parse():
parser = argparse.ArgumentParser()
parser.add_argument('--model_path', default = './model.pth', type = str, help = 'The path of training model result')
parser.add_argument('--image_path', default = "./data/test_input.jpg", type = str, help = 'The original image you want to deal with')
parser.add_argument('--mask_path', default = None, type = str, help = 'The mask you want to adopt')
parser.add_argument('--size', default = 256, type = int, help = 'The size of input')
args = parser.parse_args()
return args
def generateMask(img, mask_value = 0):
"""
Generate the mask by specific value
Arg: img - The numpy array object, rank format is HW3
mask_value - The value to represent the mask region
Ret: The mask array, rank format is HW3
"""
h, w, c = np.shape(img)
result = np.ones_like(img) * 255
for i in range(h):
for j in range(w):
if img[i][j][0] == 0 and img[i][j][1] == 0 and img[i][j][2] == 0:
result[i][j][0] = 0
result[i][j][1] = 0
result[i][j][2] = 0
return result
if __name__ == '__main__':
args = parse()
# Load the model
if not os.path.exists(args.model_path):
raise Exception('You should train the model first...')
model = PartialUNet()
#model = UNet()
model = model.cuda() if torch.cuda.is_available() else model
model.load_state_dict(torch.load(args.model_path))
# Prepare image and mask
img = cv2.imread(args.image_path)
origin_size = (np.shape(img)[1], np.shape(img)[0])
if args.mask_path is not None:
mask = cv2.imread(args.mask_path)
else:
mask = generateMask(img)
# Preprocessing
proc_list = [
sunnertransforms.Rescale((args.size, args.size)),
sunnertransforms.ToTensor(),
sunnertransforms.ToFloat(),
sunnertransforms.Transpose(sunnertransforms.BHWC2BCHW),
sunnertransforms.Normalize()
]
for op in proc_list:
img = op(img)
mask = op(mask)
img = torch.stack([img, img], 0)
mask = torch.stack([mask, mask], 0)
mask = (mask + 1) / 2
# Work!
if args.mask_path is not None:
model.setInput(target = img, mask = mask)
else:
model.setInput(image = img, mask = mask)
model.eval()
model.forward()
# Save
_, recon_img, _ = model.getOutput()
show_img = sunnertransforms.tensor2Numpy(recon_img,
transform = transforms.Compose([
sunnertransforms.UnNormalize(),
sunnertransforms.Transpose(sunnertransforms.BCHW2BHWC),
]))
show_img = show_img.astype(np.uint8)
show_img = cv2.resize(show_img[0], origin_size)
cv2.imwrite("test.png", show_img)
| SunnerLi/P-Conv | test.py | test.py | py | 3,277 | python | en | code | 15 | github-code | 90 |
18197459779 | X, N = [int(i) for i in input().split()]
P = [int(i) for i in input().split()]
# Xは0~101の値になる
result = 0
current = 102
for i in range(102):
if i not in P:
if abs(X - i) < current:
result = i
current = abs(X - i)
print(result) | Aasthaengg/IBMdataset | Python_codes/p02641/s084432793.py | s084432793.py | py | 273 | python | en | code | 0 | github-code | 90 |
31547364793 | """Module containing various pop up display settings"""
# pylint: disable=E0203
# pylint: disable=E1101
from pygame import mouse, Surface, Rect, font, draw
from gui.gui_settings import (POPUP_SCREEN_WIDTH_OFFSET, POPUP_SCREEN_HEIGHT_OFFSET,
POPUP_SCREEN_HEIGHT, POPUP_SCREEN_WIDTH, HELVETICA_FONT,
COLOURS, POPUP_SCREEN_COLOUR, FONT_COLOUR, PROMOTION_SPRITES,
PROMOTION_HOVER, PROMOTION_PIECES, ENDGAME_BUTTON_SETTINGS,
ENDGAME_QUIT_BUTTON_RECT, ENDGAME_RESTART_BUTTON_RECT)
class PopUpScreen():
"""Base class for basic pop up screen"""
def __init__(self, host_screen):
self.host_screen = host_screen
self.surface = Surface((POPUP_SCREEN_WIDTH, POPUP_SCREEN_HEIGHT)).convert_alpha()
self.surface_rect = self.surface.get_rect()
self.surface_rect.move_ip(POPUP_SCREEN_WIDTH_OFFSET, POPUP_SCREEN_HEIGHT_OFFSET)
self.block = False
def blit_popup_screen(self):
"""Add the basic pop up screen surface to the host screen"""
self.surface.fill(COLOURS[POPUP_SCREEN_COLOUR])
self.host_screen.blit(self.surface, self.surface_rect)
def get_multiline_surf(self, message, font_=HELVETICA_FONT, font_colour=FONT_COLOUR):
"""Returns a surface with message text on multiple lines"""
multiline_surf = MultiLineSurface(
message,
font_,
Rect(POPUP_SCREEN_WIDTH_OFFSET, POPUP_SCREEN_HEIGHT_OFFSET,
POPUP_SCREEN_WIDTH, POPUP_SCREEN_HEIGHT),
COLOURS[font_colour],
COLOURS[POPUP_SCREEN_COLOUR],
justification=1)
return multiline_surf
class PromotionScreen(PopUpScreen):
"""Handles special pawn promotion flow"""
def __init__(self, host_screen):
super(PromotionScreen, self).__init__(host_screen)
self.text = self.get_multiline_surf(
'Promote Pawn:\nHover to select piece')
def blit(self, allegiance):
"""Decide screen propeties and blit Pop up screen"""
self.blit_popup_screen()
if self.surface_rect.collidepoint(mouse.get_pos()):
self.host_screen.blits(PROMOTION_SPRITES[allegiance])
rects = list(zip(*PROMOTION_SPRITES[allegiance]))[1]
for rect in rects:
if rect.collidepoint(mouse.get_pos()):
draw.rect(self.host_screen, COLOURS[PROMOTION_HOVER], rect, 5)
else:
self.host_screen.blit(self.text.surf, self.text.rect)
def get_selection(self):
"""Determine selected promotion piece"""
rects = list(zip(*PROMOTION_SPRITES['white']))[1]
pieces = PROMOTION_PIECES
for rect, piece in zip(rects, pieces):
if rect.collidepoint(mouse.get_pos()):
return piece
return None
def reset(self):
"""REset the promotion screen state"""
self.block = False
class EndGameScreen(PopUpScreen):
"""Handles what to do when the endgame is reached"""
def __init__(self, host_screen):
super(EndGameScreen, self).__init__(host_screen)
self.quit_button = None
self.restart_button = None
self.reset()
def blit(self, result, allegiance):
"""Decide screen propeties and blit to main screen"""
self.blit_popup_screen()
screen_message = self.get_screen_message(result, allegiance)
text = self.get_multiline_surf(screen_message)
self.host_screen.blit(text.surf, text.rect)
self.restart_button.update(self.host_screen)
self.quit_button.update(self.host_screen)
def handle_selection(self):
"""See which button was clicked if any"""
self.restart_button.check_click()
self.quit_button.check_click()
def reset(self):
"""Reset buttons"""
self.block = False
self.quit_button = Button(ENDGAME_QUIT_BUTTON_RECT,
text='Quit',
**ENDGAME_BUTTON_SETTINGS)
self.restart_button = Button(ENDGAME_RESTART_BUTTON_RECT,
text='Restart',
**ENDGAME_BUTTON_SETTINGS)
@staticmethod
def get_screen_message(result, allegiance):
"""Produce the endgame screen message"""
if result == 'Checkmate':
return 'Checkmate!\n%s Wins' % allegiance.capitalize()
return 'Stalemate!\nThe Game is Drawn'
class MultiLineSurface(Surface):
"""Class for surface containing the passed text string, reformatted
to fit within the given rect, word-wrapping as necessary. The text
will be anti-aliased.
Parameters
----------
string - the text you wish to render. \n begins a new line.
font - a Font object
rect - a rect style giving the size of the surface requested.
font_colour - a three-byte tuple of the rgb value of the
text colour. ex (0, 0, 0) = BLACK
bg_colour - a three-byte tuple of the rgb value of the surface.
justification - 0 (default) left-justified
1 horizontally centered
2 right-justified
"""
def __init__(self, string, font_, rect, font_colour, bg_colour, justification=0):
self.string = string
self.font = font_
self.rect_ext = rect
self.font_colour = font_colour
self.bg_colour = bg_colour
self.justification = justification
super(MultiLineSurface, self).__init__(self.rect_ext.size)
self.surf = self.convert_alpha()
self.rect = self.surf.get_rect()
self.move_rect_ip(self.rect_ext.left, self.rect_ext.top)
self.surf.fill(self.bg_colour)
self.lines = []
self.get_lines()
self.do_blit()
def move_rect_ip(self, left, top):
"""Move rect inplace"""
self.rect.move_ip(left, top)
def get_lines(self):
"""Create a series of lines that will fit on the provided rect"""
font.init()
requested_lines = self.string.splitlines()
for requested_line in requested_lines:
if self.font.size(requested_line)[0] > self.rect_ext.width:
words = requested_line.split(' ')
# if any of our words are too long to fit, return.
for word in words:
if self.font.size(word)[0] >= self.rect_ext.width:
raise Exception(
'The word %s is too long to fit in the rect '
'passed.' % word)
# Start a new line
accumulated_line = ''
for word in words:
test_line = accumulated_line + word + ' '
# Build the line while the words fit.
if self.font.size(test_line)[0] < self.rect_ext.width:
accumulated_line = test_line
else:
self.lines.append(accumulated_line)
accumulated_line = word + ' '
self.lines.append(accumulated_line)
else:
self.lines.append(requested_line)
def do_blit(self):
"""Blit text to the surface given the justification"""
accumulated_height = self.rect_ext.height / \
2 - self.font.size(self.lines[0])[1]
for line in self.lines:
if accumulated_height + self.font.size(line)[1] >= self.rect_ext.height:
raise Exception(
'Once word-wrapped, the text string was too tall to fit '
'in the rect.')
if line != '':
temp_surface = self.font.render(line, 1, self.font_colour)
if self.justification == 0:
self.surf.blit(temp_surface, (0, accumulated_height))
elif self.justification == 1:
self.surf.blit(temp_surface,
((self.rect_ext.width - temp_surface.get_width()) / 2,
accumulated_height))
elif self.justification == 2:
self.surf.blit(temp_surface,
(self.rect_ext.width - temp_surface.get_width(),
accumulated_height))
else:
raise Exception(
"Invalid justification argument: %s" % str(self.justification))
accumulated_height += self.font.size(line)[1]
class Button(object):
"""A fairly straight forward button class."""
def __init__(self, rect, colour, **kwargs):
self.rect = Rect(rect)
self.colour = colour
self.clicked = False
self.hovered = False
self.hover_text = None
self.clicked_text = None
self.process_kwargs(kwargs)
self.render_text()
def process_kwargs(self, kwargs):
"""Various optional customization you can change by passing kwargs."""
settings = {"text": None,
"width": 0,
"font": font.Font(None, 16),
"hover_colour": None,
"hover_width": 0,
"clicked_colour": None,
"clicked_width": 0,
"font_colour": (255, 255, 255),
"hover_font_colour": None,
"clicked_font_colour": None,
"click_sound": None,
"hover_sound": None}
for kwarg in kwargs:
if kwarg in settings:
settings[kwarg] = kwargs[kwarg]
else:
raise AttributeError("Button has no keyword: %s" % kwarg)
self.__dict__.update(settings)
def render_text(self):
"""Pre render the button text."""
if self.text:
if self.hover_font_colour:
colour = self.hover_font_colour
self.hover_text = self.font.render(self.text, True, colour)
if self.clicked_font_colour:
colour = self.clicked_font_colour
self.clicked_text = self.font.render(self.text, True, colour)
self.text = self.font.render(self.text, True, self.font_colour)
def check_click(self):
"""Check mouse position has collided with this button, if so it is clicked"""
if self.rect.collidepoint(mouse.get_pos()):
self.clicked = True
def check_hover(self):
"""Check mouse position has collided with this button, if so it is hovering"""
if self.rect.collidepoint(mouse.get_pos()):
if not self.hovered:
self.hovered = True
if self.hover_sound:
self.hover_sound.play()
else:
self.hovered = False
def update(self, surface):
"""Update needs to be called every frame in the main loop."""
colour = self.colour
text = self.text
width = self.width
self.check_hover()
if self.clicked and self.clicked_colour:
colour = self.clicked_colour
width = self.clicked_width
if self.clicked_font_colour:
text = self.clicked_text
elif self.hovered and self.hover_colour:
colour = self.hover_colour
width = self.hover_width
if self.hover_font_colour:
text = self.hover_text
draw.rect(surface, colour, self.rect, width)
if self.text:
text_rect = text.get_rect(center=self.rect.center)
surface.blit(text, text_rect)
| rhys-hodio/chess-py | gui/gui_screens.py | gui_screens.py | py | 11,652 | python | en | code | 0 | github-code | 90 |
32488437165 | from django.conf import settings
from django.conf.urls import include, url
from django.contrib import admin
from django.views import generic
from django.shortcuts import render
from . import forms
def index_view(request):
context = {
'login': forms.LoginForm(),
'registration': forms.RegistrationForm(),
'checkout': forms.CheckoutForm(),
'order': forms.OrderForm(),
'comment': forms.CommentForm(),
'bank': forms.BankForm()
}
return render(request, 'index.html', context)
urlpatterns = [
url(r'^$', index_view),
url(r'^demo/login/$', generic.FormView.as_view(
form_class=forms.LoginForm, success_url='/demo/login/', template_name="demo.html")),
url(r'^demo/registration/$', generic.FormView.as_view(
form_class=forms.RegistrationForm, success_url='/demo/registration/', template_name="demo.html")),
url(r'^demo/contact/$', generic.FormView.as_view(
form_class=forms.ContactForm, success_url='/demo/contact/', template_name="demo.html")),
url(r'^demo/order/$', generic.FormView.as_view(
form_class=forms.OrderForm, success_url='/demo/order/', template_name="demo.html")),
url(r'^demo/checkout/$', generic.FormView.as_view(
form_class=forms.CheckoutForm, success_url='/demo/checkout/', template_name="demo.html")),
url(r'^demo/comment/$', generic.FormView.as_view(
form_class=forms.CommentForm, success_url='/demo/comment/', template_name="demo.html")),
url(r'^demo/bank/$', generic.FormView.as_view(
form_class=forms.BankForm, success_url='/demo/bank/', template_name="demo.html")),
url(r'^admin/', include(admin.site.urls)),
url(r'^foundation/basic/', generic.RedirectView.as_view(url='/?cache=no', permanent=False))
]
if 'zinnia' in settings.INSTALLED_APPS:
urlpatterns += [url(r'^weblog/', include('zinnia.urls', namespace='zinnia'))]
| gengue/django-material | tests/urls.py | urls.py | py | 1,902 | python | en | code | null | github-code | 90 |
18138072649 | import sys
n = int( sys.stdin.readline() )
cards = { pattern:[False]*13 for pattern in ( 'S', 'H', 'C', 'D' ) }
for i in range( n ):
pattern, num = sys.stdin.readline().split( " " )
cards[ pattern ][ int( num )-1 ] = True
for pattern in ( 'S', 'H', 'C', 'D' ):
for i in range( 13 ):
if not cards[ pattern ][ i ]:
print( "{:s} {:d}".format( pattern, i+1 ) ) | Aasthaengg/IBMdataset | Python_codes/p02408/s489276879.py | s489276879.py | py | 366 | python | en | code | 0 | github-code | 90 |
25494924054 | import copy
import json
import uuid
import datetime
from elasticsearch import Elasticsearch
from elasticsearch.exceptions import NotFoundError, ConflictError
from django.conf import settings
from django.core.serializers.json import DjangoJSONEncoder
from rest_framework import serializers
from rest_framework.serializers import Serializer
from rest_framework import exceptions
from rest_framework.response import Response
from rest_framework import status
from rest_framework import mixins
from rest_framework import viewsets
from rest_framework.fields import empty
from utils import c_permissions
from utils.es import es
from mgmt.models import Table
from mgmt.models import Field
from . import app_serializers
from . import views
FIELD_TYPE_MAP = Field.FIELD_TYPE_MAP
def empty_none(self, val):
if(val == ""):
return None
return val
# def remove_empty_str(data):
# for k, v in data.items():
# if v == "":
# data.pop(k)
# def serializer_init(self, instance=None, data=empty, **kwargs):
# if data is not empty:
# for k,v in data.items():
# if(v==""):
# data.pop(k)
# super(Serializer, self).__init__(instance, data, **kwargs)
def add_serializer(table):
fields = table.fields.all()
attributes = {}
for field in fields:
args = {
"label": field.alias
}
if not field.required:
args["default"] = None
args["allow_null"] = True
if field.type == 3:
args["format"] = "%Y-%m-%dT%H:%M:%S"
elif field.type == 6:
args["protocol"] = "IPv4"
f = FIELD_TYPE_MAP[field.type](**args)
if(field.is_multi):
attributes[field.name] = serializers.ListField(default=[], child=f)
else:
attributes[field.name] = f
# if(field.type == 0):
# attributes["validate_{}".format(field.name)] = empty_none
#创建者拿到视图aQ!
# attributes["S-creator"] = serializers.CharField(read_only=True, default=serializers.CurrentUserDefault())
attributes["S-creation-time"] = serializers.DateTimeField(read_only=True, format="%Y-%m-%dT%H:%M:%S",
default=datetime.datetime.now)
attributes["S-last-modified"] = serializers.CharField(default=None, allow_null=True, read_only=True, label="最后修改人")
serializer = type(table.name, (Serializer, ), attributes)
setattr(app_serializers, table.name, serializer)
def add_viewset(table):
data_index = table.name
record_data_index = "{}.".format(table.name)
deleted_data_index = "{}..".format(table.name)
def list(self, request, *args, **kwargs):
page = int(request.query_params.get("page", 1))
page_size = int(request.query_params.get("page_size", 10))
res = es.search(index=data_index, doc_type="data", size=page_size, from_=(page-1)*page_size)
return Response(res["hits"])
def create(self, request, *args, **kwargs):
serializer = self.get_serializer(data=request.data)
serializer.is_valid(raise_exception=True)
data = serializer.data
data["S-creator"] = request.user.username
try:
res = es.create(index=data_index, doc_type="data", id=str(uuid.uuid1()).replace("-", ""), body=data)
except ConflictError as exc:
raise exceptions.ParseError("Document is exists")
headers = self.get_success_headers(serializer.data)
return Response(res, status=status.HTTP_201_CREATED, headers=headers)
def retrieve(self, request, *args, **kwargs):
try:
res = es.get(index=data_index, doc_type="data", id=kwargs["pk"])
except NotFoundError as exc:
raise exceptions.NotFound("Document {} was not found in Type {} of Index {}".format(kwargs["pk"],"data", data_index))
return Response(res)
def update(self, request, *args, **kwargs):
try:
res = es.get(index=data_index, doc_type="data", id=kwargs["pk"])
except NotFoundError as exc:
raise exceptions.NotFound("Document {} was not found in Type {} of Index {}".format(kwargs["pk"],"data", data_index))
partial = kwargs.get("partial", False)
serializer = self.get_serializer(data=request.data, partial=partial)
serializer.is_valid(raise_exception=True)
his_data = res["_source"]
data = copy.copy(his_data)
is_equal = True
for k, v in serializer.validated_data.items():
if k[0] == "S":
continue
if isinstance(serializer.fields.fields[k], serializers.DateTimeField):
if isinstance(v, type([])):
v = list(map(lambda x: x.isoformat(), v))
elif v != None:
v = v.isoformat()
data[k] = v
if his_data[k] != v:
is_equal = False
if is_equal:
raise exceptions.ParseError(detail="No field changes")
his_data.pop("S-creator")
his_data.pop("S-creation-time")
his_data["S-data-id"] = kwargs["pk"]
his_data["S-changer"] = data["S-last-modified"]
his_data["S-update-time"] = datetime.datetime.now().strftime("%Y-%m-%dT%H:%M:%S")
data["S-last-modified"] = request.user.username
his_data.pop("S-last-modified")
es.index(index=record_data_index, doc_type="record-data", id=str(uuid.uuid1()).replace("-", ""), body=his_data)
res = es.index(index=data_index, doc_type="data", id=kwargs["pk"], body=data)
return Response(res)
def destroy(self, request, *args, **kwargs):
try:
res = es.get(index=data_index, doc_type="data", id=kwargs["pk"])
data = res["_source"]
data.pop("S-last-modified")
data["S-delete-time"] = datetime.datetime.now().strftime("%Y-%m-%dT%H:%M:%S")
data["S-delete-people"] = request.user.username
res = es.create(index=deleted_data_index, doc_type="deleted-data", id=kwargs["pk"], body=data)
es.delete(index=data_index, doc_type="data", id=kwargs["pk"])
es.delete_by_query(index=record_data_index, doc_type="record-data", body={"query": {"term": {"S-data-id": kwargs["pk"]}}})
except NotFoundError as exc:
raise exceptions.ParseError("Document {} was not found in Type {} of Index {}".format(kwargs["pk"],"data", table.name))
return Response(res, status=status.HTTP_204_NO_CONTENT)
serializer_class = getattr(app_serializers, table.name)
viewset = type(table.name, (mixins.ListModelMixin,
mixins.CreateModelMixin,
mixins.RetrieveModelMixin,
mixins.UpdateModelMixin,
mixins.DestroyModelMixin,
viewsets.GenericViewSet),
dict(serializer_class=serializer_class, permission_classes=(c_permissions.TableLevelPermission, ),
list=list, create=create, retrieve=retrieve, update=update, destroy=destroy))
setattr(views, table.name, viewset)
return viewset
| open-cmdb/cmdb | apps/data/initialize.py | initialize.py | py | 7,174 | python | en | code | 966 | github-code | 90 |
70249091178 | # Using SQLAlchemy to connect to the Database
from sqlalchemy import create_engine,MetaData
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker
from sqlalchemy.ext.asyncio import create_async_engine, AsyncSession
from .config import Config
from .utils.log_helper import *
def create_async_uri(uri):
return uri.replace('postgresql', 'postgresql+asyncpg')
engine = create_engine(Config.API_DB_URI, echo=False, pool_pre_ping=True, pool_size=20, max_overflow=0)
Session = sessionmaker(autocommit=False, autoflush=False, bind=engine)
# LocalSession = sessionmaker(autocommit=False, autoflush=True, bind=async_engine, expire_on_commit=True)
session = Session()
Base = declarative_base(metadata=MetaData(schema=Config.TARGET_DB_SCHEMA))
def get_db():
db = Session()
try:
yield db
finally:
db.close()
async def get_async_db():
async with async_session() as db:
try:
yield db
finally:
await async_engine.dispose()
# async def get_refreshed_db(query):
# async with engine.begin() as conn:
# await conn.execute(
# query
# )
# await engine.dispose() | LACMTA/metro-api-v2 | fastapi/app/database.py | database.py | py | 1,217 | python | en | code | 0 | github-code | 90 |
18112278239 | import sys
ERROR_INPUT = 'input is invalid'
ERROR_INPUT_NOT_UNIQUE = 'input is not unique'
def main():
S = get_input1()
T = get_input2()
count = 0
for t in T:
if linner_search(S, t):
count += 1
print(count)
def linner_search(li, key):
li.append(key)
i = 0
while li[i] != key:
i += 1
return i != len(li) - 1
def get_input1():
n = int(input())
if n > 10000:
print(ERROR_INPUT)
sys.exit(1)
li = []
for x in input().split(' '):
if int(x) < 0 or int(x) > 10 ** 9:
print(ERROR_INPUT)
sys.exit(1)
li.append(x)
return li
def get_input2():
n = int(input())
if n > 500:
print(ERROR_INPUT)
sys.exit(1)
li = []
for x in input().split(' '):
if int(x) < 0 or int(x) > 10 ** 9:
print(ERROR_INPUT)
sys.exit(1)
elif int(x) in li:
print(ERROR_INPUT_NOT_UNIQUE)
sys.exit(1)
li.append(x)
return li
main() | Aasthaengg/IBMdataset | Python_codes/p02267/s536747158.py | s536747158.py | py | 1,050 | python | en | code | 0 | github-code | 90 |
6059507202 | class Employee:
noOfLeaves = 5
# Constructor
def __init__(self, name, salary, role):
self.name = name
self.salary = salary
self.role = role
# Object Method
def printDetails(self):
return f"The name is {self.name}. Salary is {self.salary}. And role is {self.role}"
# Class Method
@classmethod
def changeLeaves(cls, leaves):
cls.noOfLeaves = leaves
# Class Method as an Alternate Constructor
@classmethod
def fromString(cls, string):
return cls(*string.split("-"))
# params = string.split("-")
# print(params)
# return cls(params[0], params[1], params[2])
harry = Employee("Harry", 1000, "Instructor")
rohan = Employee("Rohan", 500, "Student")
karan = Employee.fromString("Karan-1500-Senior Instructor")
print(karan.salary, karan.role)
rohan.changeLeaves(10)
print(Employee.noOfLeaves)
print(rohan.printDetails())
print(harry.printDetails())
| SuryanshuTomar/Python | PythonTopics/AlternativeConstructor.py | AlternativeConstructor.py | py | 1,001 | python | en | code | 0 | github-code | 90 |
39501006079 | from django.db.models import Avg
from rest_framework import serializers
from .models import Product, Rating, Comment
class ProductSerializer(serializers.ModelSerializer):
class Meta:
model = Product
fields = ["id", "name", "description", "price"]
class RatingSerializer(serializers.ModelSerializer):
class Meta:
model = Rating
fields = ["id", "product", "value", "created"]
class CommentSerializer(serializers.ModelSerializer):
class Meta:
model = Comment
fields = ["id", "product", "text", "author", "created"]
class ProductRatingSerializer(serializers.Serializer):
product_id = serializers.IntegerField()
def validate_product_id(self, value):
try:
product = Product.objects.get(id=value)
except Product.DoesNotExist:
raise serializers.ValidationError("Product with this id does not exist")
return value
def to_representation(self, instance):
product = Product.objects.get(id=instance.validated_data["product_id"])
average_rating = Rating.objects.filter(product=product).aggregate(Avg("value"))["value__avg"]
comments = Comment.objects.filter(product=product)
ratings_data = RatingSerializer(product.ratings, many=True).data
comments_data = CommentSerializer(comments, many=True).data
return {
"product": product.name,
"average_rating": average_rating,
"ratings": ratings_data,
"comments": comments_data
} | cyberpunk3033/Comment_Product | products/serializers.py | serializers.py | py | 1,542 | python | en | code | 0 | github-code | 90 |
26588540872 | import re
hand=open('C:/Users/PC/Documents/Assignment1/regex_sum_42.txt')
sum=0
for line in hand:
line=line.rstrip()
x=re.findall('[0-9]+',line)
for n in x:
print(n)
sum+=int(n)
print(sum)
| KhadidjaArezki/PY4E | Using_Python_to_Access_Web_Data/WEEK2/Assignment2/TestSum.py | TestSum.py | py | 218 | python | en | code | 0 | github-code | 90 |
18176980809 | N,K = input().split()
scores = [int(s) for s in input().split()]
N = int(N)
K = int(K)
for i in range(K,N):
if scores[i] <= scores[i-K]:
print("No")
else:
print("Yes") | Aasthaengg/IBMdataset | Python_codes/p02602/s693006227.py | s693006227.py | py | 193 | python | en | code | 0 | github-code | 90 |
5098867928 | s=input()
s1=[]
s2=[]
ans=0
for i in range(len(s)):
if(s[i]=="\\"):
s1.append(i)
elif(s[i]=="/" and s1):
j = s1.pop()
a=i-j
ans+=a
while(s2 and s2[-1][0]>j):
a+=s2.pop()[1]
s2.append([j,a])
print(ans)
print(len(s2),*(a for j, a in s2)) | WAT36/procon_work | procon_python/src/aoj/ALDS1_3_D_Areas_on_the_Cross-Section_Diagram.py | ALDS1_3_D_Areas_on_the_Cross-Section_Diagram.py | py | 305 | python | en | code | 1 | github-code | 90 |
73467973418 | import random
line = input().rstrip().split(",")
for enemy in line:
print(enemy + "が現れた!")
num = len(line)
print("敵は" + str(num) + "匹")
attack = random.randrange(num)
print(line[attack] + "に会心の一撃" + line[attack] + "を倒した") | yuuyas222/python_lesson4 | app.py | app.py | py | 266 | python | en | code | 0 | github-code | 90 |
43007953860 | import math
P1 = input().split(" ")
P2 = input().split(" ")
XP1, YP1 = P1
XP2, YP2 = P2
XP1 = float(XP1)
YP1 = float(YP1)
XP2 = float(XP2)
YP2 = float(YP2)
distancia = math.sqrt(((XP2 - XP1)**2) + ((YP2 - YP1)**2))
distancia = format(distancia, ".4f")
print(str(distancia)) | arthursns/beecrowd-online-judge-beginner-solutions | 1015 - Distância Entre Dois Pontos/1015.py | 1015.py | py | 279 | python | en | code | 1 | github-code | 90 |
43263966381 | #input variables to Zapier Code Transfer Step 10: Create Marketo Email
input={
'token': 'Token', #from Step 4: Get Marketo Access Token
'parent id': 'fid', #from Step 5: Get Parent ID or Create Parent Folder
}
import requests
import datetime
import urllib.parse
import re
import ast
templates = {"Nurture Series 01" : "1662","Nurture Series 02" : "1671","Nurture Series 03" : "1675","Nurture Series 04" : "1689","Nurture Series 05" : "1697","Nurture Series 06" : "1698","Nurture Series 07" : "1699","Nurture Series 08" : "1700","Nurture Series 09" : "1650", "Transactional" : "1991"}
dictionary = ast.literal_eval(input['dict'])
print(templates[input['template']])
jibberish = ["—", "’"]
char = ["--", "'"]
#print('Before: ', input['value'])
for i in range(0, len(jibberish)):
dictionary['Subject Line A']=dictionary['Subject Line A'].replace(jibberish[i], char[i])
email_name = "Email " + dictionary['Email Name']
url = "https://028-jjw-728.mktorest.com/rest/asset/v1/emails.json"
authorization = "Bearer " + input['token']
payload = 'folder={"id":' + input['folder_id'] +',"type":'+input['folder_type'] + '}&template=' + templates[input['template']] + '&subject=' + dictionary['Subject Line A'] + '&fromName=' + dictionary['From'] + '&fromEmail=' + dictionary['From Address'] + '&replyEmail=' + dictionary['Reply Address'] + '&operational=' + input['operational'] + '&name=' + email_name
headers = {
'Content-Type': 'application/x-www-form-urlencoded',
'Authorization': authorization
}
response = requests.request("POST", url, headers=headers, data=payload)
print(response.text)
eid = re.search('\[{"id":(\d*),', response.text ).group(1)
return {'email_id': eid}
| tyron-pretorius/zapier | email_request_creation/create_marketo_email.py | create_marketo_email.py | py | 1,696 | python | en | code | 1 | github-code | 90 |
19621142121 | import os
def limpa_ecra():
os.system("cls")
def cabecalho(texto):
limpa_ecra()
print("=" * 25)
print(texto)
print("=" * 25)
def inscrever_jogador():
cabecalho("INSCREVER JOGADOR(ES)")
contador = len(lista_dados) + 1
while True:
print("Registo Nº ", contador)
numero = int(input(" Número (0 p/terminar): "))
if numero == 0:
break
nome = input(" Nome.................: ")
valor_mercado = float(input(" Valor de Mercado.................: "))
registo = [numero, nome, valor_mercado]
lista_dados.append(registo)
contador = contador + 1
def consultar_jogador():
cabecalho("CONSULTAR JOGADOR(ES)")
print("# Número Nome Valor de Mercado")
print("------------------------------------------------------------------")
n = len(lista_dados)
for i in range(n):
numero = lista_dados[i][0]
nome = lista_dados[i][1]
valor_mercado = lista_dados[i][2]
print((i + 1), " " * (4 - len(str(i))), numero, " " * (10 - len(str(numero))), nome, " " * (30 - len(nome)), valor_mercado)
print()
input("ENTER p/ terminar...")
def mostrar_mediavalormercado():
cabecalho("MÉDIA VALOR DE MERCADO")
n = len(lista_dados)
soma = 0
for i in range(n):
soma = soma + lista_dados[i][2]
if n > 0:
media = soma / n
print(F"Média Valor de Mercado: {media}")
else:
print("Sem dados....")
print()
input("ENTER p/ terminar...")
def mostrar_maximovalormercado():
cabecalho("VALOR DE MERCADO MAIS ELEVADO")
n = len(lista_dados)
if n > 0:
max = lista_dados[0][2]
posicao = 0
else:
print("Sem dados....")
for i in range(n):
if lista_dados[i][2] > max:
max = lista_dados[i][2]
posicao = i
print()
if n > 0:
print(f"Valor de Mercado mais elevado...: {max}")
print(f"Número......: {lista_dados[posicao][0]}")
print(f"Nome........: {lista_dados[posicao][1]}")
print(f"Valor de Mercado........: {lista_dados[posicao][2]}")
print()
input("ENTER p/ terminar...")
def guardar_ficheiro():
cabecalho("GUARDAR FICHEIRO")
ficheiro = open("dados_jogador(es).txt", "w", encoding='utf8')
ficheiro.write("Número, Nome, Valor de Mercado\n")
for item in lista_dados:
ficheiro.write(str(item[0]) + ", " + item[1] + ", " + str(item[2]) + "\n")
ficheiro.close()
print()
print("Ficheiro guardado!")
input("ENTER p/ terminar...")
def abrir_ficheiro():
cabecalho("ABRIR FICHEIRO")
ficheiro = open("dados_jogador(es).txt", "r", encoding='utf8')
contador = 1
for linha in ficheiro.readlines():
if contador > 1:
lista_campos = linha.split(",")
registo = [int(lista_campos[0]), lista_campos[1].strip(), float(lista_campos[2])]
lista_dados.append(registo)
contador = contador + 1
print(lista_dados)
print()
print("Ficheiro aberto!")
input("ENTER p/ terminar...")
def alterar_dados():
cabecalho("ALTERAR DADOS")
print()
n = len(lista_dados)
for i in range(n):
registo = int(input("Nº Registo a alterar (#).: "))
if registo == 0:
break
if registo > 0 and registo < n:
registo = registo - 1
print()
print(f"Número.....: {lista_dados[registo][0]}")
print(f"Nome.......: {lista_dados[registo][1]}")
print(f"Valor de Mercado.......: {lista_dados[registo][2]}")
print()
print("Alterar registo:")
novo_nome = input(" Alterar nome...............: ")
novo_valormercado = float(input(" Alterar valor de mercado...............: "))
lista_dados[registo][1] = novo_nome
lista_dados[registo][2] = novo_valormercado
print()
print("Registo alterado!")
input("ENTER p/ continuar....")
def eliminar_dados():
cabecalho("ELIMINAR DADOS")
print()
print("# Número Nome Valor de Mercado")
print("------------------------------------------------------------------")
contador = 1
for elemento in lista_dados:
numero = elemento[0]
nome = elemento[1]
valor_mercado = elemento[2]
print(contador, " " * (3 - len(str(contador))), numero, " " * (8 - len(str(numero))), nome, " " * (30 - len(str(nome))), valor_mercado)
contador = contador + 1
print()
registo = int(input("Nº Registo a eliminar (#).: "))
if registo > 0 and registo < contador:
registo = registo - 1
print()
print(f"Número.....: {lista_dados[registo][0]}")
print(f"Nome.......: {lista_dados[registo][1]}")
print(f"Valor de Mercado.......: {lista_dados[registo][2]}")
lista_dados.pop(registo)
print()
print("Registo eiminado!")
input("ENTER p/ continuar....")
def main():
global lista_dados
lista_dados = []
while True:
cabecalho("LIGA PORTUGAL")
print(" 1. INSCREVER JOGADOR(ES)")
print(" 2. CONSULTAR JOGADOR(ES)")
print(" 3. MÉDIA VALOR DE MERCADO")
print(" 4. VALOR DE MERCADO MAIS ELEVADO")
print(" 5. GUARDAR FICHEIRO")
print(" 6. ABRIR FICHEIRO")
print(" 7. ALTERAR DADOS")
print(" 8. ELIMINAR DADOS")
print(" 9. SAIR")
print()
opcao = int(input(" Escolha a opção: "))
if opcao == 1:
inscrever_jogador()
elif opcao == 2:
consultar_jogador()
elif opcao == 3:
mostrar_mediavalormercado()
elif opcao == 4:
mostrar_maximovalormercado()
elif opcao == 5:
guardar_ficheiro()
elif opcao == 6:
abrir_ficheiro()
elif opcao == 7:
alterar_dados()
elif opcao == 8:
eliminar_dados()
elif opcao == 9:
break
else:
print("Opção Errada!")
if __name__ == "__main__":
main() | isla-lei/2020-2021-LEI-1-D-Fundamentos-Programacao | Trabalhos/Jose Martins/Global.py | Global.py | py | 6,360 | python | pt | code | 0 | github-code | 90 |
43004758138 | from nipype import Function
from nipype.algorithms import confounds
from nipype.interfaces import afni, fsl, utility
from PUMI.engine import NestedNode as Node, QcPipeline
from PUMI.engine import FuncPipeline
from PUMI.pipelines.multimodal.image_manipulation import pick_volume, timecourse2png
from PUMI.utils import calc_friston_twenty_four, calculate_FD_Jenkinson, mean_from_txt, max_from_txt
from PUMI.plot.carpet_plot import plot_carpet
@FuncPipeline(inputspec_fields=['in_file'],
outputspec_fields=['out_file'])
def despiking_afni(wf, **kwargs):
"""
Removes 'spikes' from functional 3d+time images.
Inputs:
in_file (str): Path to the 4d image.
Outputs:
out_file (str): 4d Image with spikes removed.
Sinking:
- The output image
"""
despike = Node(interface=afni.Despike(**kwargs), name='despike')
despike.inputs.outputtype = 'NIFTI_GZ'
wf.connect('inputspec', 'in_file', despike, 'in_file')
wf.connect(despike, 'out_file', 'outputspec', 'out_file')
qc_wf = qc('qc_wf')
wf.connect(despike, 'out_file', qc_wf, 'in_file')
@QcPipeline(inputspec_fields=['in_file'],
outputspec_fields=['out_file'])
def qc(wf):
"""
Creates carpet plot after dispiking.
Inputs:
in_file (str): Path to dispiked 4d image.
Outputs:
out_file (Axes): Matplotlib Axes to be used in composite figures.
Sinking:
Carpet plot as png image.
"""
plot_interface = Function(
input_names=['img', 'save_carpet'],
output_names=['ax1'],
function=plot_carpet)
# Important because the default of save_carpet is False
plot_interface.inputs.save_carpet = True
carpet_node = Node(name='carpet_node',
interface=plot_interface)
wf.connect('inputspec', 'in_file', carpet_node, 'img')
wf.connect(carpet_node, 'ax1', 'outputspec', 'out_file')
@QcPipeline(inputspec_fields=['func', 'motion_correction', 'plot_motion_trans', 'FD_figure'],
outputspec_fields=[],
default_regexp_sub=False,
regexp_sub=[(r'(.*\/)([^\/]+)\/([^\/]+)\/([^\/]+)$', r'\g<1>qc_motion_correction/\g<3>-\g<2>.png'),
('_subject_', 'sub-')])
def qc_motion_correction_mcflirt(wf, **kwargs):
"""
Save quality check images for mcflirt motion-correction
Inputs:
func (str):
motion_correction (str):
plot_motion_trans (str):
FD_figure (str):
Sinking:
- rotations plot
- translations plot
- FD plot
- timeseries
"""
mc_timecourse = timecourse2png('mc_timeseries', sink=False) # sink=False important for qc-folder-struktur
wf.connect('inputspec', 'func', mc_timecourse, 'func')
# sinking
wf.connect(mc_timecourse, 'out_file', 'sinker', 'mc_timeseries')
wf.connect('inputspec', 'motion_correction', 'sinker', 'mc_rotations')
wf.connect('inputspec', 'plot_motion_trans', 'sinker', 'mc_translations')
wf.connect('inputspec', 'FD_figure', 'sinker', 'FD')
@FuncPipeline(inputspec_fields=['in_file'],
outputspec_fields=['func_out_file', 'mat_file', 'mc_par_file', 'friston24_file', 'FD_file'])
def motion_correction_mcflirt(wf, reference_vol='middle', FD_mode='Power', **kwargs):
"""
Use FSL MCFLIRT to do the motion correction of the 4D functional data and use the 6df rigid body motion parameters
to calculate friston24 parameters for later nuissance regression step.
Parameters:
reference_vol (str): Either "first", "middle", "last", "mean", or the index of the volume which the rigid body
registration (motion correction) will use as reference.
Default is 'middle'.
FD_mode: Either "Power" or "Jenkinson"
Inputs:
in_file (str): Reoriented functional file
Outputs:
func_out_file (str): Path to motion-corrected timeseries
mat_file (str): Path to motion-correction transformation matrices
mc_par_file (str): Path to file with motion parameters
friston24_file (str): Path to file with friston24 parameters
FD_file (str): Path to file with FD
Sinking:
- motion-corrected timeseries
- motion-correction transformation matrices
- absolute and relative displacement parameters
- friston24 parameters
- FD
- FDmax
- quality check images (FD/rotations/translations and timeseries plot)
Acknowledgements:
Adapted from Balint Kincses (2018)
Modified version of PAC.func_preproc.func_preproc
(https://github.com/FCP-INDI/C-PAC/blob/main/CPAC/func_preproc/func_preproc.py)
and CPAC.generate_motion_statistics.generate_motion_statistics
(https://github.com/FCP-INDI/C-PAC/blob/main/CPAC/generate_motion_statistics/generate_motion_statistics.py)
"""
if FD_mode not in ['Power', 'Jenkinson']:
raise ValueError(f'FD_mode has to be "Power" or "Jenkinson"! %s is not a valid option!' % FD_mode)
refvol = pick_volume(volume=reference_vol, name='refvol')
wf.connect('inputspec', 'in_file', refvol, 'in_file')
mcflirt = Node(interface=fsl.MCFLIRT(interpolation="spline", stats_imgs=False), name='mcflirt')
if reference_vol == "mean":
mcflirt.inputs.mean_vol = True
mcflirt.inputs.dof = 6
mcflirt.inputs.save_mats = True
mcflirt.inputs.save_plots = True
mcflirt.inputs.save_rms = True
mcflirt.inputs.stats_imgs = False
wf.connect('inputspec', 'in_file', mcflirt, 'in_file')
if reference_vol != "mean":
wf.connect(refvol, 'out_file', mcflirt, 'ref_file')
calc_friston = Node(
utility.Function(
input_names=['in_file'], output_names=['out_file'],
function=calc_friston_twenty_four
),
name='calc_friston'
)
wf.connect(mcflirt, 'par_file', calc_friston, 'in_file')
if FD_mode == "Power":
calculate_FD = Node(
confounds.FramewiseDisplacement(
parameter_source='FSL',
save_plot=True,
out_figure='fd_power_2012.png'
),
name='calculate_FD_Power'
)
elif FD_mode == "Jenkinson":
calculate_FD = Node(
utility.Function(
input_names=['in_file'],
output_names=['out_file'],
function=calculate_FD_Jenkinson
),
name='calculate_FD_Jenkinson'
)
wf.connect(mcflirt, 'par_file', calculate_FD, 'in_file')
mean_FD = Node(
utility.Function(
input_names=['in_file', 'axis', 'header', 'out_file'],
output_names=['mean_file'],
function=mean_from_txt
),
name='meanFD'
)
mean_FD.inputs.axis = 0 # global mean
mean_FD.inputs.header = True # global mean
mean_FD.inputs.out_file = 'FD.txt'
wf.connect(calculate_FD, 'out_file', mean_FD, 'in_file')
max_FD = Node(
utility.Function(
input_names=['in_file', 'axis', 'header', 'out_file'],
output_names=['max_file'],
function=max_from_txt
),
name='maxFD'
)
max_FD.inputs.axis = 0 # global mean
max_FD.inputs.header = True
max_FD.inputs.out_file = 'FDmax.txt'
wf.connect(calculate_FD, 'out_file', max_FD, 'in_file')
plot_motion_rot = Node(
interface=fsl.PlotMotionParams(in_source='fsl'),
name='plot_motion_rot')
plot_motion_rot.inputs.plot_type = 'rotations'
wf.connect(mcflirt, 'par_file', plot_motion_rot, 'in_file')
plot_motion_trans = Node(
interface=fsl.PlotMotionParams(in_source='fsl'),
name='plot_motion_trans')
plot_motion_trans.inputs.plot_type = 'translations'
wf.connect(mcflirt, 'par_file', plot_motion_trans, 'in_file')
qc_mc = qc_motion_correction_mcflirt('qc_mc')
wf.connect(plot_motion_rot, 'out_file', qc_mc, 'motion_correction')
wf.connect(plot_motion_trans, 'out_file', qc_mc, 'plot_motion_trans')
wf.connect(calculate_FD, 'out_figure', qc_mc, 'FD_figure')
wf.connect(mcflirt, 'out_file', qc_mc, 'func')
# sinking
wf.connect(mcflirt, 'out_file', 'sinker', 'mc_func')
wf.connect(mcflirt, 'par_file', 'sinker', 'mc_par')
wf.connect(mcflirt, 'rms_files', 'sinker', 'mc_rms')
wf.connect(calc_friston, 'out_file', 'sinker', 'mc_first24')
wf.connect(mean_FD, 'mean_file', 'sinker', 'FD')
wf.connect(max_FD, 'max_file', 'sinker', 'FDmax')
# output
wf.connect(mcflirt, 'out_file', 'outputspec', 'func_out_file')
wf.connect(mcflirt, 'mat_file', 'outputspec', 'mat_file')
wf.connect(mcflirt, 'par_file', 'outputspec', 'mc_par_file')
wf.connect(calculate_FD, 'out_file', 'outputspec', 'FD_file')
wf.connect(calc_friston, 'out_file', 'outputspec', 'friston24_file')
@QcPipeline(inputspec_fields=['in_file'],
outputspec_fields=['out_file'])
def qc_nuisance_removal(wf, **kwargs):
"""
Create quality check images for nuisance removal.
Inputs:
in_file (str): Filtered data
Outputs:
out_file (str): Path to quality check image
Sinking:
- The quality check image
"""
nuisance_removal_qc = timecourse2png('nuisance_removal_qc')
wf.connect('inputspec', 'in_file', nuisance_removal_qc, 'func')
# outputspec
wf.connect(nuisance_removal_qc, 'out_file', 'outputspec', 'out_file')
# sinking
wf.connect(nuisance_removal_qc, 'out_file', 'sinker', 'qc_nuisance_removal')
@FuncPipeline(inputspec_fields=['in_file', 'design_file'],
outputspec_fields=['out_file'])
def nuisance_removal(wf, **kwargs):
"""
Perform nuisance removal.
CAUTION: Name in the old PUMI was nuissremov_workflow
Parameters:
Inputs:
in_file (str): Path to reoriented motion corrected functional data.
design_file (str): Path to matrix which contains all the nuissance regressors (motion + compcor noise + ...).
Outputs:
- Path to the filtered data
Sinking:
- Filtered data
Acknowledgements:
Adapted from Balint Kincses (2018)
"""
import nipype.interfaces.fsl as fsl
nuisance_regression = Node(interface=fsl.FilterRegressor(filter_all=True), name='nuisance_regression')
wf.connect('inputspec', 'in_file', nuisance_regression, 'in_file')
wf.connect('inputspec', 'design_file', nuisance_regression, 'design_file')
# qc
qc = qc_nuisance_removal('qc')
wf.connect(nuisance_regression, 'out_file', qc, 'in_file')
# sinking
wf.connect(nuisance_regression, 'out_file', 'sinker', 'func_nuis_corrected')
# output
wf.connect(nuisance_regression, 'out_file', 'outputspec', 'out_file')
# TODO: test nuisance removal wf
| pni-lab/PUMI | PUMI/pipelines/func/deconfound.py | deconfound.py | py | 10,830 | python | en | code | 1 | github-code | 90 |
17956306659 | from itertools import permutations
from scipy.sparse.csgraph import floyd_warshall
n,m,r=map(int,input().split())
R=list(map(int,input().split()))
l=[[float('inf')]*n for _ in range(n)]
for _ in range(m):
a,b,c,=map(int,input().split())
a-=1
b-=1
l[a][b]=c
l[b][a]=c
for i in range(n):
l[i][i] = 0 #自身のところに行くコストは0
def warshall_floyd(d):
for k in range(n):
for i in range(n):
for j in range(n):
d[i][j]=min(d[i][j],d[i][k]+d[k][j])
return d
#F=warshall_floyd(l)
F1 = floyd_warshall(l)
ans=float('inf')
for v in permutations(R):
temp=0
for i in range(r-1):
temp+=F1[v[i]-1][v[i+1]-1]
ans=min(ans,temp)
print(int(ans)) | Aasthaengg/IBMdataset | Python_codes/p03608/s348092998.py | s348092998.py | py | 748 | python | en | code | 0 | github-code | 90 |
18208122939 | def main():
n = int(input().strip())
L = list(map(int, input().strip().split()))
rem=sum(L)
cur=1
ans=0
for i in range(n+1):
ans+=cur
rem-=L[i]
cur-=L[i]
if cur<0 or cur==0 and i!=n:
print(-1)
return
if cur<rem:
cur=min(cur*2,rem)
print(ans)
if '__main__' == __name__:
main()
| Aasthaengg/IBMdataset | Python_codes/p02665/s286621146.py | s286621146.py | py | 403 | python | en | code | 0 | github-code | 90 |
18497620739 | from sys import stdin, setrecursionlimit
input = stdin.buffer.readline
H, W = map(int, input().split())
A = [list(map(int, input().split())) for _ in range(H)]
answer = []
for i in range(H):
for j in range(W):
if A[i][j] % 2:
if j < W - 1:
answer.append((i + 1, j + 1, i + 1, j + 2))
A[i][j] -= 1
A[i][j + 1] += 1
else:
if i < H - 1:
answer.append((i + 1, j + 1, i + 2, j + 1))
A[i][j] -= 1
A[i + 1][j] += 1
print(len(answer))
for a in answer:
print(*a) | Aasthaengg/IBMdataset | Python_codes/p03263/s053247569.py | s053247569.py | py | 620 | python | en | code | 0 | github-code | 90 |
17440972443 | n=int(input("How many rows are there?\n"))
k=2
for a in range(1,n+1):
for b in range(1,2*n):
if a+b==n+1 or b-a==n-1:
print("*",end=" ")
elif a==n and b!=k:
print("*",end=" ")
k=k+2
else:
print(" ",end=" ")
print()
| Mahendra710/Star_Pattern | 8.8- Printing Star.py | 8.8- Printing Star.py | py | 306 | python | en | code | 0 | github-code | 90 |
9968706585 | import requests
import os
from dotenv import load_dotenv
load_dotenv()
def searchTweets(search_term:str):
'''
Return tweets for a keyword seach term.
'''
url = "https://api.twitter.com/2/tweets/search/recent?max_results=25&expansions=referenced_tweets.id&tweet.fields=text&query=lang%3Aen%20"+search_term
payload={}
headers = {
'Authorization': os.getenv("BEARER_TOKEN"),
'Cookie': 'guest_id=v1%3A165292464833657250; guest_id_ads=v1%3A165292464833657250; guest_id_marketing=v1%3A165292464833657250; personalization_id="v1_D50leSEsdlQN9nTvwQ6B+g=="'
}
try:
response = requests.request("GET", url, headers=headers, data=payload).json()['includes']['tweets']
if len(response)<5:
return 0
else:
tweets = [i['text'] for i in response]
return tweets
except Exception as e:
error_message = f"An error occured in searchTweets(): {e}"
return error_message
def call_trends():
url = "https://api.twitter.com/1.1/trends/place.json?id=23424977"
headers = {
'Authorization': os.getenv("BEARER_TOKEN"),
'Cookie': 'guest_id=v1%3A165292464833657250; guest_id_ads=v1%3A165292464833657250; guest_id_marketing=v1%3A165292464833657250; personalization_id="v1_D50leSEsdlQN9nTvwQ6B+g=="'
}
try:
response = requests.request("GET",url,headers=headers).json()[0]['trends']
response.sort(key=lambda x:0 if x["tweet_volume"] is None else x["tweet_volume"],reverse=True)
results = [trend['name'] for trend in response]
return results
except Exception as e:
error_message = f"An error occured in call_trends(): {e}"
return error_message | ggsmith842/sentiment-api-hum | app/twitterapi.py | twitterapi.py | py | 1,621 | python | en | code | 0 | github-code | 90 |
40734712711 | from __future__ import print_function
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.utils.data
from torch.autograd import Variable
# custom weights initialization called on netG and netD
def weights_init(m):
classname = m.__class__.__name__
if classname.find('Conv') != -1:
m.weight.data.normal_(0.0, 0.02)
elif classname.find('BatchNorm') != -1:
m.weight.data.normal_(1.0, 0.02)
m.bias.data.fill_(0)
class SGAN(object):
class _netG(nn.Module):
def __init__(self, ngpu, nc, ngf, nz, **kw):
super().__init__()
self.ngpu = ngpu
self.main = nn.Sequential(
# input is nz x 5 x 5
# output is nc x 127 x 127
nn.ConvTranspose2d(nz, ngf * 8, 5, 2, 2, bias=False),
nn.BatchNorm2d(ngf * 8),
nn.ReLU(True),
nn.ConvTranspose2d(ngf * 8, ngf * 4, 5, 2, 2, bias=False),
nn.BatchNorm2d(ngf * 4),
nn.ReLU(True),
nn.ConvTranspose2d(ngf * 4, ngf * 2, 5, 2, 2, bias=False),
nn.BatchNorm2d(ngf * 2),
nn.ReLU(True),
nn.ConvTranspose2d(ngf * 2, ngf, 5, 2, 2, bias=False),
nn.BatchNorm2d(ngf),
nn.ReLU(True),
nn.ConvTranspose2d(ngf, nc, 5, 2, 2, bias=False),
nn.Tanh()
)
def forward(self, input):
if isinstance(input.data, torch.cuda.FloatTensor) and self.ngpu > 1:
output = nn.parallel.data_parallel(self.main, input, range(self.ngpu))
else:
output = self.main(input)
return output
class _netD(nn.Module):
def __init__(self, ngpu, nc, ndf, **kw):
super().__init__()
self.ngpu = ngpu
self.main = nn.Sequential(
# input is nc x 127 x 127
# output is 1 x 5 x 5
nn.Conv2d(nc, ndf, 5, 2, 2, bias=False),
nn.BatchNorm2d(ndf),
nn.LeakyReLU(0.2, inplace=True),
nn.Conv2d(ndf, ndf * 2, 5, 2, 2, bias=False),
nn.BatchNorm2d(ndf * 2),
nn.LeakyReLU(0.2, inplace=True),
nn.Conv2d(ndf * 2, ndf * 4, 5, 2, 2, bias=False),
nn.BatchNorm2d(ndf * 4),
nn.LeakyReLU(0.2, inplace=True),
nn.Conv2d(ndf * 4, ndf * 8, 5, 2, 2, bias=False),
nn.BatchNorm2d(ndf * 8),
nn.LeakyReLU(0.2, inplace=True),
nn.Conv2d(ndf * 8, 1, 5, 2, 2, bias=False),
nn.Sigmoid()
)
def forward(self, input):
if isinstance(input.data, torch.cuda.FloatTensor) and self.ngpu > 1:
output = nn.parallel.data_parallel(self.main, input, range(self.ngpu))
else:
output = self.main(input)
return output.view(-1, 1).squeeze(1)
| saumyasinha/DynamicCamouflage | Module_Generator/SGAN_Model.py | SGAN_Model.py | py | 3,008 | python | en | code | 0 | github-code | 90 |
72978460458 | from binaryninja import *
from binaryninjaui import WidgetPane, UIActionHandler, UIActionHandler, UIAction, Menu, UIContext, UIContextNotification
from pefile import ExceptionsDirEntryData, PE, PEFormatError
from PySide6 import QtCore
from PySide6.QtCore import Qt
from PySide6.QtWidgets import QHBoxLayout, QVBoxLayout, QLabel, QWidget, QListWidget, QListWidgetItem, QTextEdit, QCheckBox, QPushButton
from PySide6.QtGui import QMouseEvent
class SEHListItem(QListWidgetItem):
def __init__(self, base: int, entry: ExceptionsDirEntryData):
self.entry = entry
QListWidgetItem.__init__(self, hex(
base + entry.struct.BeginAddress) + "-" + hex(base + entry.struct.EndAddress))
class AddrLabel(QLabel):
def __init__(self, address, bv: BinaryView, opt_text = None):
self.bv = bv
self.addr = address
self.opt_text = opt_text
if self.addr != None:
if opt_text != None:
QLabel.__init__(self, opt_text + hex(self.addr))
else:
QLabel.__init__(self, hex(self.addr))
else:
QLabel.__init__(self, "")
def mouseReleaseEvent(self, event: QMouseEvent) -> None:
button = event.button()
modifiers = event.modifiers()
if modifiers == Qt.NoModifier and button == Qt.LeftButton:
if self.addr != None:
self.bv.offset = self.addr
return super().mouseReleaseEvent(event)
def setAddr(self, addr):
self.addr = addr
if self.addr != None and self.opt_text != None:
self.setText(self.opt_text + hex(self.addr))
elif self.addr != None:
self.setText(hex(self.addr))
elif self.opt_text != None:
self.setText(self.opt_text)
else:
self.clear()
def setOptText(self, text):
self.opt_text = text
self.setAddr(self.addr)
class SEHNotifications(UIContextNotification):
def __init__(self, widget):
UIContextNotification.__init__(self)
self.widget = widget
self.widget.destroyed.connect(self.destroyed)
UIContext.registerNotification(self)
def destroyed(self):
UIContext.unregisterNotification(self)
def OnAddressChange(self, context, frame, view, location):
if self.widget.follow_cb.isChecked():
self.widget.gotoAddr(location.getOffset())
class SEHWidget(QWidget, UIContextNotification):
def gotoAddr(self, addr):
for x in self.dict:
if x[0] <= addr < x[1]:
row = self.dict[x]
self.list.setCurrentRow(row)
self.listItemClicked(self.list.item(row))
return
self.list.clearSelection()
self.listItemClicked(None)
def gotoButtonClicked(self):
self.gotoAddr(self.bv.offset)
def __init__(self, bv: BinaryView, file: PE):
QWidget.__init__(self)
layout = QVBoxLayout()
header_layout = QHBoxLayout()
self.follow_cb = QCheckBox()
follow_layout = QHBoxLayout()
follow_layout.addWidget(QLabel("Follow Cursor: "))
follow_layout.addWidget(self.follow_cb)
goto_button = QPushButton()
goto_button.setText("Goto Cursor")
goto_button.clicked.connect(self.gotoButtonClicked)
header_layout.addWidget(goto_button)
header_layout.addStretch()
header_layout.addLayout(follow_layout)
self.begin_addr = AddrLabel(None, bv)
begin_addr_layout = QHBoxLayout()
begin_addr_layout.addWidget(QLabel("Begin Address: "))
begin_addr_layout.addWidget(self.begin_addr)
self.end_addr = AddrLabel(None, bv)
end_addr_layout = QHBoxLayout()
end_addr_layout.addWidget(QLabel("End Address: "))
end_addr_layout.addWidget(self.end_addr)
self.unwind_addr = AddrLabel(None, bv)
unwind_addr_layout = QHBoxLayout()
unwind_addr_layout.addWidget(QLabel("Unwind Address: "))
unwind_addr_layout.addWidget(self.unwind_addr)
unwind_layout = QVBoxLayout()
self.unwind_version = QLabel("")
self.unwind_prolog_size = AddrLabel(None, bv, "")
self.unwind_code_count = QLabel("")
self.unwind_frame_register = QLabel("")
self.unwind_frame_offset = QLabel("")
self.unwind_flags = QLabel("")
self.unwind_codes = QTextEdit()
self.unwind_codes.setReadOnly(True)
self.unwind_exception_handler = AddrLabel(None, bv)
title = QLabel("Unwind Info")
title.setAlignment(QtCore.Qt.AlignCenter)
unwind_layout.addWidget(title)
unwind_verison_layout = QHBoxLayout()
unwind_verison_layout.addWidget(QLabel("Version: "))
unwind_verison_layout.addWidget(self.unwind_version)
unwind_layout.addLayout(unwind_verison_layout)
unwind_flags_layout = QHBoxLayout()
unwind_flags_layout.addWidget(QLabel("Flags: "))
unwind_flags_layout.addWidget(self.unwind_flags)
unwind_layout.addLayout(unwind_flags_layout)
unwind_exception_handler_layout = QHBoxLayout()
unwind_exception_handler_layout.addWidget(
QLabel("Exception Handler: "))
unwind_exception_handler_layout.addWidget(
self.unwind_exception_handler)
unwind_layout.addLayout(unwind_exception_handler_layout)
unwind_prolog_size_layout = QHBoxLayout()
unwind_prolog_size_layout.addWidget(QLabel("Prolog Size: "))
unwind_prolog_size_layout.addWidget(self.unwind_prolog_size)
unwind_layout.addLayout(unwind_prolog_size_layout)
unwind_code_count_layout = QHBoxLayout()
unwind_code_count_layout.addWidget(QLabel("Code Count: "))
unwind_code_count_layout.addWidget(self.unwind_code_count)
unwind_layout.addLayout(unwind_code_count_layout)
unwind_frame_register_layout = QHBoxLayout()
unwind_frame_register_layout.addWidget(QLabel("Frame Register: "))
unwind_frame_register_layout.addWidget(self.unwind_frame_register)
unwind_layout.addLayout(unwind_frame_register_layout)
unwind_frame_offset_layout = QHBoxLayout()
unwind_frame_offset_layout.addWidget(QLabel("Frame Offset: "))
unwind_frame_offset_layout.addWidget(self.unwind_frame_offset)
unwind_layout.addLayout(unwind_frame_offset_layout)
unwind_codes_layout = QHBoxLayout()
unwind_codes_layout.addWidget(QLabel("Codes: "))
unwind_codes_layout.addWidget(self.unwind_codes)
unwind_layout.addLayout(unwind_codes_layout)
self.dict = {}
self.list = QListWidget()
self.list.currentItemChanged.connect(self.listItemClicked)
ctr = 0
for entry in file.DIRECTORY_ENTRY_EXCEPTION:
item = SEHListItem(file.OPTIONAL_HEADER.ImageBase, entry)
self.list.addItem(item)
self.dict[(file.OPTIONAL_HEADER.ImageBase + entry.struct.BeginAddress,
file.OPTIONAL_HEADER.ImageBase + entry.struct.EndAddress)] = ctr
ctr += 1
list_layout = QHBoxLayout()
list_layout.addWidget(QLabel("Entries: "))
list_layout.addWidget(self.list)
layout.addLayout(header_layout)
layout.addLayout(list_layout)
layout.addLayout(begin_addr_layout)
layout.addLayout(end_addr_layout)
layout.addLayout(unwind_addr_layout)
layout.addLayout(unwind_layout)
self.setLayout(layout)
self.file = file
self.bv = bv
self.notifications = SEHNotifications(self)
def listItemClicked(self, clickedItem):
if clickedItem == None:
self.begin_addr.setAddr(None)
self.end_addr.setAddr(None)
self.unwind_addr.setAddr(None)
self.unwind_version.clear()
self.unwind_flags.clear()
self.unwind_prolog_size.clear()
self.unwind_code_count.clear()
self.unwind_frame_register.clear()
self.unwind_frame_offset.clear()
self.unwind_codes.clear()
self.unwind_exception_handler.clear()
else:
self.begin_addr.setAddr(
self.file.OPTIONAL_HEADER.ImageBase + clickedItem.entry.struct.BeginAddress)
self.end_addr.setAddr(
self.file.OPTIONAL_HEADER.ImageBase + clickedItem.entry.struct.EndAddress)
self.unwind_addr.setAddr(
self.file.OPTIONAL_HEADER.ImageBase + clickedItem.entry.struct.UnwindData)
self.unwind_version.setText(str(clickedItem.entry.unwindinfo.Version))
unwind_flags = []
if clickedItem.entry.unwindinfo.Flags == 0:
unwind_flags.append("UNW_FLAG_NHANDLER")
if clickedItem.entry.unwindinfo.Flags & 1:
unwind_flags.append("UNW_FLAG_EHANDLER")
if clickedItem.entry.unwindinfo.Flags & 2:
unwind_flags.append("UNW_FLAG_UHANDLER")
if clickedItem.entry.unwindinfo.Flags & 4:
unwind_flags.append("UNW_FLAG_CHAININFO")
self.unwind_flags.setText(str(clickedItem.entry.unwindinfo.Flags) + " (" + (", ".join(unwind_flags)) + ")")
if clickedItem.entry.unwindinfo.SizeOfProlog != 0:
self.unwind_prolog_size.setOptText(
str(clickedItem.entry.unwindinfo.SizeOfProlog) + " bytes, ends at: ")
self.unwind_prolog_size.setAddr(self.file.OPTIONAL_HEADER.ImageBase + clickedItem.entry.struct.BeginAddress + clickedItem.entry.unwindinfo.SizeOfProlog)
else:
self.unwind_prolog_size.setOptText(
str(clickedItem.entry.unwindinfo.SizeOfProlog) + " bytes")
self.unwind_prolog_size.setAddr(None)
self.unwind_code_count.setText(
str(clickedItem.entry.unwindinfo.CountOfCodes))
self.unwind_frame_register.setText(
str(clickedItem.entry.unwindinfo.FrameRegister))
self.unwind_frame_offset.setText(
str(clickedItem.entry.unwindinfo.FrameOffset))
codes = ""
for x in clickedItem.entry.unwindinfo.UnwindCodes:
codes += str(x) + '\n'
self.unwind_codes.setText(codes)
if hasattr(clickedItem.entry.unwindinfo, 'ExceptionHandler'):
self.unwind_exception_handler.setAddr(
self.file.OPTIONAL_HEADER.ImageBase + clickedItem.entry.unwindinfo.ExceptionHandler)
else:
self.unwind_exception_handler.clear()
@staticmethod
def createPane(context):
if context.context and context.binaryView and context.binaryView.parent_view:
data = context.binaryView.parent_view.read(
0, context.binaryView.parent_view.length)
widget = SEHWidget(context.binaryView, PE(data=data))
pane = WidgetPane(widget, "Structured Exception Handlers")
context.context.openPane(pane)
@staticmethod
def canCreatePane(context):
if context.context and context.binaryView and context.binaryView.parent_view:
try:
data = context.binaryView.parent_view.read(
0, context.binaryView.parent_view.length)
PE(data=data, fast_load=True)
return True
except PEFormatError:
return False
except:
raise
return False
UIAction.registerAction("SEH Helper")
UIActionHandler.globalActions().bindAction(
"SEH Helper", UIAction(SEHWidget.createPane, SEHWidget.canCreatePane)
)
Menu.mainMenu("Tools").addAction("SEH Helper", "SEH Helper")
| EliseZeroTwo/SEH-Helper | __init__.py | __init__.py | py | 11,783 | python | en | code | 77 | github-code | 90 |
9551544073 | # -*- coding: utf-8 -*-
from PyQt5 import QtWidgets, QtCore # type: ignore
from pineboolib.core import decorators
from typing import Any, Union
class QDateEdit(QtWidgets.QDateEdit):
_parent = None
_date = None
separator_ = "-"
def __init__(self, parent=None, name=None) -> None:
super(QDateEdit, self).__init__(parent)
super(QDateEdit, self).setDisplayFormat("dd-MM-yyyy")
if name:
self.setObjectName(name)
self.setSeparator("-")
self._parent = parent
self.date_ = super(QDateEdit, self).date().toString(QtCore.Qt.ISODate)
# if not project.DGI.localDesktop():
# project.DGI._par.addQueque("%s_CreateWidget" % self._parent.objectName(), "QDateEdit")
def getDate(self) -> Any:
ret = super(QDateEdit, self).date().toString(QtCore.Qt.ISODate)
if ret != "2000-01-01":
return ret
else:
return None
def setDate(self, v: Union[str, Any]) -> None:
if not isinstance(v, str):
if hasattr(v, "toString"):
v = v.toString("yyyy%sMM%sdd" % (self.separator(), self.separator()))
else:
v = str(v)
date = QtCore.QDate.fromString(v[:10], "yyyy-MM-dd")
super(QDateEdit, self).setDate(date)
# if not project.DGI.localDesktop():
# project.DGI._par.addQueque("%s_setDate" % self._parent.objectName(), "QDateEdit")
date = property(getDate, setDate) # type: ignore
@decorators.NotImplementedWarn
def setAutoAdvance(self, b):
pass
def setSeparator(self, c) -> None:
self.separator_ = c
self.setDisplayFormat("dd%sMM%syyyy" % (self.separator(), self.separator()))
def separator(self) -> str:
return self.separator_
def __getattr__(self, name) -> Any:
if name == "date":
return super(QDateEdit, self).date().toString(QtCore.Qt.ISODate)
| deavid/pineboo | pineboolib/qt3_widgets/qdateedit.py | qdateedit.py | py | 1,949 | python | en | code | 4 | github-code | 90 |
18381073799 | n=int(input())
tab = []
for i in range(n):
a,b = map(int,input().split())
tab+=[[a,b]]
def cle(x):
return x[1]
tab.sort(key = cle)
ans = "Yes"
temps = 0
for a,b in tab:
temps+=a
if temps>b: ans = "No"
print(ans)
| Aasthaengg/IBMdataset | Python_codes/p02996/s594844640.py | s594844640.py | py | 223 | python | en | code | 0 | github-code | 90 |
18538064889 | a, b, c = map(int, input().split())
k = int(input())
ABC = [a, b, c]
sort_ACB = sorted(ABC)
MAX = sort_ACB[-1]
sum_ABC = sum(ABC) - MAX
for _ in range(k):
MAX*=2
print(sum_ABC + MAX) | Aasthaengg/IBMdataset | Python_codes/p03360/s144164199.py | s144164199.py | py | 186 | python | en | code | 0 | github-code | 90 |
18560989069 | N=int(input())
S=list(map(str,input().split()))
flg=0
for i in range(N) :
if S[i]=="Y" :
flg=1
if flg==1 :
print("Four")
else :
print("Three") | Aasthaengg/IBMdataset | Python_codes/p03424/s546131098.py | s546131098.py | py | 152 | python | zh | code | 0 | github-code | 90 |
18730986310 | # This code is written by harsh.
from collections import namedtuple
if __name__ == "__main__":
n = int(input())
fields = input()
s = namedtuple("s", fields)
totalMarks = 0
for i in range(n):
temp = s(*input().split())
totalMarks += int(temp.MARKS)
print(totalMarks / n)
| harshsinghs1058/python_hackerrank_solutions | Collections_namedtuple_.py | Collections_namedtuple_.py | py | 311 | python | en | code | 1 | github-code | 90 |
27889973632 | class Solution:
def partition(self, s):
ans = []
n = len(s)
self.dfs([],0, n, ans,s)
return ans
def dfs(self, cur, start, end, ans,s):
if start >= end:
ans.append(cur)
return
for i in range(start, end):
tmp = s[start:i+1]
if self.isP(tmp):
self.dfs(cur+[tmp], i+1, end, ans, s)
def isP(self, str):
return str == str[::-1]
| samshaq19912009/Leetcode_in_Python_my_practise | dfs/palindrome_partition.py | palindrome_partition.py | py | 466 | python | en | code | 0 | github-code | 90 |
41207238271 | import pathlib
import re
path = str(pathlib.Path(__file__).parent.absolute())
while True:
pattern = input('Enter a regular expression: ')
if pattern == 'exit':
break
try:
fhand = open(path + r'/../mbox.txt')
try:
counter = 0
for line in fhand:
if re.search(pattern, line):
counter += 1
print('mbox.txt had {} lines that matched {}'.format(counter, pattern))
except re.error:
print('The pattern is not valid')
finally:
fhand.close()
except FileNotFoundError:
print('File mbox.txt not found')
break
| macmgeneration/05-Python | Topics/12. StdLib/solutions/01Grep.py | 01Grep.py | py | 668 | python | en | code | 4 | github-code | 90 |
39045349828 | import json
import warnings
from os import path
import dask.dataframe as dd
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from tqdm import tqdm
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.nn.utils.rnn import pad_sequence
def ts_to_tensors(sample):
times = torch.tensor(sample["mjd"].values, dtype=torch.float)
bands = torch.tensor(sample["passband"].values, dtype=torch.long)
masks = torch.zeros(times.shape[0], 6).type(torch.ByteTensor)
masks.scatter_(1, bands.reshape(-1, 1), 1)
return times, masks
class GaussianCurve(nn.Module):
def __init__(self, batch_size=20, n_bands=6):
super().__init__()
self._batch_size = batch_size
self._n_bands = n_bands
self.params = nn.ParameterDict(
{
"f0": nn.Parameter(torch.zeros(batch_size, dtype=torch.float)),
"lambda": nn.Parameter(torch.ones(batch_size, dtype=torch.float)),
"t0": nn.Parameter(torch.ones(batch_size, dtype=torch.float)),
"fm": nn.Parameter(torch.ones(batch_size, n_bands, dtype=torch.float)),
}
)
self.params["lambda"].data *= 10
self.params["t0"].data *= 0.5
def set_refine(self):
self.params["f0"].data *= 0
self.params["lambda"].data *= 0
self.params["lambda"].data += 50
self.params["fm"].data *= 0
self.params["fm"].data += 1
def forward(self, times, masks=None):
z = (times - self.params["t0"].unsqueeze(1)) ** 2 * torch.clamp(
self.params["lambda"].unsqueeze(1), min=0
)
z = torch.exp(-z)
y = self.params["fm"].unsqueeze(1) * z.unsqueeze(2) + self.params["f0"].reshape(
-1, 1, 1
)
if masks is not None:
y = y[masks]
return y
def predict(self, times, n):
z = (times - self.params["t0"][n]) ** 2 * torch.clamp(
self.params["lambda"][n], min=0
)
z = torch.exp(-z)
y = (
self.params["fm"][n, :].reshape(1, -1) * z.reshape(-1, 1)
+ self.params["f0"][n]
)
return y
class ExpRatioCurve(nn.Module):
def __init__(self, batch_size=20, n_bands=6, refine=False):
super().__init__()
self.params = nn.ParameterDict(
{
"f0": nn.Parameter(torch.zeros(batch_size, dtype=torch.float)),
"lambda_rise": nn.Parameter(torch.ones(batch_size, dtype=torch.float)),
"lambda_fall": nn.Parameter(torch.ones(batch_size, dtype=torch.float)),
"t0": nn.Parameter(torch.ones(batch_size, n_bands, dtype=torch.float)),
"fm": nn.Parameter(torch.ones(batch_size, n_bands, dtype=torch.float)),
}
)
if refine:
self.params["lambda_rise"].data *= 50
self.params["lambda_fall"].data *= 50
else:
self.params["lambda_rise"].data *= 1
self.params["lambda_fall"].data *= 1
self.params["t0"].data *= 0.5
def set_init_params(self, init_t0, init_lambda=None):
# assuming using t0 from gaussian:
self.params["t0"].data *= 0
self.params["t0"].data += torch.clamp(init_t0, 0, 1).reshape(-1, 1)
if init_lambda is not None:
self.params["lambda_rise"].data *= 0
self.params["lambda_rise"].data += init_lambda
self.params["lambda_fall"].data *= 0
self.params["lambda_fall"].data += init_lambda
def forward(self, times, masks=None):
t_f = -(times.unsqueeze(2) - self.params["t0"].unsqueeze(1)) * torch.clamp(
self.params["lambda_fall"].reshape(-1, 1, 1), min=0
)
t_r = -(times.unsqueeze(2) - self.params["t0"].unsqueeze(1)) * torch.clamp(
self.params["lambda_rise"].reshape(-1, 1, 1), min=0
)
z = torch.exp(t_f - torch.log1p(torch.exp(t_r)))
y = self.params["fm"].unsqueeze(1) * z + self.params["f0"].reshape(-1, 1, 1)
if masks is not None:
y = y[masks]
return y
def predict(self, times, n):
t_f = -(times.unsqueeze(1) - self.params["t0"][n].unsqueeze(0)) * torch.clamp(
self.params["lambda_fall"][n], min=0
)
t_r = -(times.unsqueeze(1) - self.params["t0"][n].unsqueeze(0)) * torch.clamp(
self.params["lambda_rise"][n], min=0
)
z = torch.exp(t_f - torch.log1p(torch.exp(t_r)))
y = self.params["fm"][n, :].reshape(1, -1) * z + self.params["f0"][n]
return y
NORMALISATION_PARAMS = {"mjd_min": 59580.0338, "mjd_max": 60674.363}
def penalty(t0, k=1):
return torch.mean(
k * torch.sum((t0 - torch.mean(t0, dim=1, keepdim=True)) ** 2, dim=1)
)
def batch_fit(
chunk,
n_iters_gaussian=5000,
n_iters_exp=10000,
sample_plot=False,
augmentation_set=False,
):
# normalise mjd
# mjd_min = chunk['mjd'].min()
# mjd_max = chunk['mjd'].max()
group_col = "augmentation_id" if augmentation_set else "object_id"
chunk["mjd"] = (chunk["mjd"] - NORMALISATION_PARAMS["mjd_min"]) / (
NORMALISATION_PARAMS["mjd_max"] - NORMALISATION_PARAMS["mjd_min"]
)
# normalise flux and flux_err per object
flux_mean = chunk.groupby(group_col)["flux"].transform("mean")
flux_std = chunk.groupby(group_col)["flux"].transform("std")
chunk["flux"] = (chunk["flux"] - flux_mean) / flux_std
chunk["flux_err"] = chunk["flux_err"] / flux_std
print()
print("Tensor conversion...")
selected = chunk
groups = selected.groupby(group_col)
times_list = []
masks_list = []
for object_id, group in groups:
times, masks = ts_to_tensors(group)
times_list.append(times)
masks_list.append(masks)
times = pad_sequence(times_list, batch_first=True)
masks = pad_sequence(masks_list, batch_first=True)
fluxes = torch.tensor(selected["flux"].values, dtype=torch.float)
detected = torch.tensor(selected["detected"].values, dtype=torch.float)
times = times.cuda()
masks = masks.cuda()
fluxes = fluxes.cuda()
detected = detected.cuda()
print("Gaussian curve fitting...")
curve = GaussianCurve(batch_size=selected[group_col].unique().shape[0]).cuda()
optimiser = optim.Adam(lr=3e-2, params=curve.parameters())
# scheduler = optim.lr_scheduler.ExponentialLR(optimiser, gamma=1-1e-4)
fac = 5
sample_weights = detected * (fac - 1) + 1
for i in range(n_iters_gaussian):
f_pred = curve.forward(times, masks)
loss = torch.mean(sample_weights * (f_pred - fluxes) ** 2)
curve.zero_grad()
loss.backward()
for k, param in curve.params.items():
if torch.sum(torch.isnan(param.grad)) > 0:
param.grad.data[torch.isnan(param.grad)] = 0
warnings.warn(f"NaN encountered in grad of {k}", RuntimeWarning)
nn.utils.clip_grad_norm_(
curve.parameters(), 1
) # clip gards to ensure stability
optimiser.step()
# scheduler.step()
if i % 1000 == 0:
print(f"loss={loss.detach().cpu().numpy()}")
print("Exp ratio curve fitting...")
curve2 = ExpRatioCurve(
batch_size=selected[group_col].unique().shape[0], refine=True
).cuda()
curve2.set_init_params(init_t0=curve.params["t0"].data)
del optimiser, curve
optimiser = optim.Adam(lr=1e-2, params=curve2.parameters())
scheduler = optim.lr_scheduler.ExponentialLR(optimiser, gamma=1 - 1e-4)
# fac = 5
# sample_weights = detected * (fac - 1) + 1
for i in range(n_iters_exp):
f_pred = curve2.forward(times, masks)
p = penalty(curve2.params["t0"], k=10)
loss = torch.mean(sample_weights * (f_pred - fluxes) ** 2) + p
curve2.zero_grad()
loss.backward()
for k, param in curve2.params.items():
if torch.sum(torch.isnan(param.grad)) > 0:
param.grad.data[torch.isnan(param.grad)] = 0
warnings.warn(f"NaN encountered in grad of {k}", RuntimeWarning)
nn.utils.clip_grad_norm_(
curve2.parameters(), 1
) # clip gards to ensure stability
optimiser.step()
scheduler.step()
if i % 1000 == 0:
print(
f"loss={loss.detach().cpu().numpy():.5f}, penalty={p.detach().cpu().numpy():5f}"
)
if sample_plot:
for n in range(10):
obj = selected[selected[group_col] == selected[group_col].unique()[n]]
f, ax = plt.subplots(figsize=(12, 6))
colors = ["red", "orange", "yellow", "green", "blue", "purple"]
ax.scatter(
x=obj["mjd"],
y=obj["flux"],
c=[colors[b] for b in obj["passband"]],
s=10,
)
ax.vlines(
obj["mjd"],
obj["flux"] - obj["flux_err"],
obj["flux"] + obj["flux_err"],
colors=[colors[b] for b in obj["passband"]],
linewidth=1,
)
ax.autoscale(False)
t_range = np.linspace(-0.2, 1.2, 1000)
y = (
curve2.predict(torch.tensor(t_range, dtype=torch.float).cuda(), n)
.detach()
.cpu()
.numpy()
)
for band in range(6):
ax.plot(t_range, y[:, band], c=colors[band], alpha=0.5)
ax.set_title(
f"object {obj[group_col].iloc[0]}, "
f'tau_rise: {(1 / curve2.params["lambda_rise"][n]).detach().cpu().numpy()}, '
f'tau_fall: {(1 / curve2.params["lambda_fall"][n]).detach().cpu().numpy()}\n'
f't0: {curve2.params["t0"][n, :].detach().cpu().numpy()}\n'
f'fm: {curve2.params["fm"][n, :].detach().cpu().numpy()}'
)
plt.savefig(f"experiments/run_data/plots/plot_{n}.png")
print("Saving fitted parameters...")
raw_loss = sample_weights * (f_pred - fluxes) ** 2
per_obj_loss = (
pd.Series(raw_loss.detach().cpu().numpy()).groupby(selected[group_col]).mean()
)
if augmentation_set:
params_df = pd.DataFrame(
{
"augmentation_id": selected[
"augmentation_id"
].unique(), # use only for augmentation
"exp_ratio_fitting_loss": per_obj_loss,
"tau_rise": 1
/ curve2.params["lambda_rise"].data.detach().cpu().numpy(),
"tau_fall": 1
/ curve2.params["lambda_fall"].data.detach().cpu().numpy(),
"f0": curve2.params["f0"].data.detach().cpu().numpy(),
}
)
else:
params_df = pd.DataFrame(
{
"object_id": selected[
"object_id"
].unique(), # use only for non-augmentation
"exp_ratio_fitting_loss": per_obj_loss,
"tau_rise": 1
/ curve2.params["lambda_rise"].data.detach().cpu().numpy(),
"tau_fall": 1
/ curve2.params["lambda_fall"].data.detach().cpu().numpy(),
"f0": curve2.params["f0"].data.detach().cpu().numpy(),
}
)
fm = curve2.params["fm"].data.detach().cpu().numpy()
t0 = curve2.params["t0"].data.detach().cpu().numpy()
for b in range(6):
params_df[f"fm_{b}"] = fm[:, b]
params_df[f"t0_{b}"] = t0[:, b]
del times, masks, fluxes, detected, curve2, optimiser, scheduler
return params_df
def main():
torch.manual_seed(7777)
OUTPUT_DIR = "data/gp_augmented"
N_gaussian = 2000
N_exp = 10000
"""
Fitting training set
"""
print("*" * 20 + "\nReading data...")
train_series = pd.read_csv("data/training_set.csv")
params_df = batch_fit(train_series)
params_df.to_csv(path.join(OUTPUT_DIR, "train_exp_ratio_fitted.csv"), index=False)
"""
Fitting aug 10x set
"""
print("Reading data...")
test_series = dd.read_csv("data/augmented/augmented_*.csv", blocksize=None)
for i, part in tqdm(
enumerate(test_series.partitions), total=test_series.npartitions
):
chunk = part.compute().reset_index()
params_df = batch_fit(
chunk, n_iters_gaussian=N_gaussian, n_iters_exp=N_exp, sample_plot=True
)
break
params_df.to_csv(
path.join(OUTPUT_DIR, f"test_exp_ratio_fitted_{i}.csv"), index=False
)
"""
Fitting aug 30x set
"""
print("*" * 20 + "\nReading data...")
train_series = pd.read_csv(
"data/gp_augmented/gp_augmented_ddf_to_nonddf_class_52.csv"
)
params_df = batch_fit(train_series, sample_plot=True, augmentation_set=True)
params_df.to_csv(path.join(OUTPUT_DIR, "train_exp_ratio_fitted.csv"), index=False)
if __name__ == "__main__":
main()
| tatigabru/kaggle-plasticc | src/feature_extractors/curve_fitting_features.py | curve_fitting_features.py | py | 13,053 | python | en | code | 2 | github-code | 90 |
39087886647 | import unittest
import json
from app import app, menu
class TestMenuManagement(unittest.TestCase):
def setUp(self):
self.app = app.test_client()
app.config['TESTING'] = True
def test_add_dish(self):
response = self.app.post('/add_dish', data=json.dumps({'dish_id': '1', 'name': 'Burger', 'price': 10, 'available': True}),
content_type='application/json')
self.assertEqual(response.status_code, 200)
def test_get_dish(self):
response = self.app.post('/add_dish', data=json.dumps({'dish_id': '2', 'name': 'Pizza', 'price': 12, 'available': True}),
content_type='application/json')
self.assertEqual(response.status_code, 200)
response = self.app.get('/get_dish/2')
data = json.loads(response.get_data(as_text=True))
self.assertEqual(data['name'], 'Pizza')
if __name__ == '__main__':
unittest.main()
| imSAJJAKALI/prompt_eng | S2D3/LEVEL1/tests/test_menu.py | test_menu.py | py | 956 | python | en | code | 1 | github-code | 90 |
71714963817 | from telegram.ext import Updater, CommandHandler, MessageHandler, Filters
import telegram
import logging
import time
import os
from apiclient.discovery import build
from apiclient.errors import HttpError
from oauth2client.client import GoogleCredentials
import dialogflow_v2 as dialogflow
import handler_tools
from handler_tools import MyLogsHandler
def echo(bot, update):
chat_id = update.message.chat_id
user_message = update.message.text
project_id = os.environ['project_id']
try:
bot_answer = handler_tools.detect_intent_texts(project_id, chat_id, user_message, 'ru-RU')
update.message.reply_text(bot_answer)
except Exception:
logger.exception("Проблема при получении и отправке сообщений")
def start(bot, update):
update.message.reply_text('Ура! Я живой!')
if __name__ == '__main__':
logger = logging.getLogger()
logger.setLevel(logging.INFO)
logger.addHandler(MyLogsHandler())
logger.info("Бот для общения в Телеграме запущен")
try:
telegram_token = os.environ['telegram_token']
updater = Updater(telegram_token)
dp = updater.dispatcher
dp.add_handler(CommandHandler("start", start))
echo_handler = MessageHandler (Filters.text, echo)
dp.add_handler(echo_handler)
updater.start_polling()
updater.idle()
except Exception:
logger.exception('Возникла ошибка в боте для общения в Телеграме ↓')
| asvirin/conversation-bots | bot-tg.py | bot-tg.py | py | 1,594 | python | en | code | 23 | github-code | 90 |
17981678739 | N=int(input())
A=list(map(int,input().split()))
X=[A[i] for i in range(0,N,2)]
Y=[A[i] for i in range(1,N,2)]
if N%2:
A=X[::-1]
B=Y
else:
A=Y[::-1]
B=X
print(" ".join(map(str,A+B)))
| Aasthaengg/IBMdataset | Python_codes/p03673/s841084858.py | s841084858.py | py | 201 | python | en | code | 0 | github-code | 90 |
1746549579 | import torch
import numpy as np
from lib.Uncertainty import normalize_batch_uncertainty
def memory_computation(unc_vals,output_dir,rel_class_num,
obj_class_num,obj_feature_dim=1024,
rel_feature_dim=1936,obj_weight_type='both',
rel_weight_type='both',
obj_mem=False,obj_unc=False,
include_bg_mem = False):
unc_vals.stats2()
unc_list_rel = unc_vals.unc_list_rel
unc_list_obj = unc_vals.unc_list_obj
cls_rel_uc = unc_vals.cls_rel_uc
cls_obj_uc = unc_vals.cls_obj_uc
obj_emb_path = output_dir+'obj_embeddings/'
rel_emb_path = output_dir+'rel_embeddings/'
if not include_bg_mem:
obj_class_num = obj_class_num -1
obj_norm_factor = torch.zeros(obj_class_num)
obj_memory = torch.zeros(obj_class_num,obj_feature_dim)
rel_norm_factor = {}
rel_memory = {}
for rel in rel_class_num.keys():
rel_norm_factor[rel] = torch.zeros(rel_class_num[rel])
rel_memory[rel] = torch.zeros(rel_class_num[rel],rel_feature_dim)
if obj_weight_type == 'both':
# obj_all_u = ['al','ep']
obj_all_u = ['both']
elif obj_weight_type == 'al':
obj_all_u = ['al']
elif obj_weight_type == 'ep':
obj_all_u = ['ep']
else:
obj_all_u = None
if rel_weight_type == 'both':
# rel_all_u = ['al','ep']
rel_all_u = ['both']
elif rel_weight_type == 'al':
rel_all_u = ['al']
elif rel_weight_type == 'ep':
rel_all_u = ['ep']
else:
rel_all_u = None
for i in unc_list_rel.keys() :
rel_features = torch.tensor(np.load(rel_emb_path+str(i)+'.npy',allow_pickle=True))
if not obj_all_u and obj_mem:
obj_features = torch.tensor(np.load(obj_emb_path+str(i)+'.npy',allow_pickle=True))
batch_unc = torch.tensor(unc_list_obj[i]['al']) # obj_num x c
index,batch_classes = torch.where(batch_unc!=0)
batch_unc[index,batch_classes] = 1
obj_memory += torch.matmul(batch_unc.T,obj_features)
unq_batch_classes = torch.unique(batch_classes)
for k in unq_batch_classes:
unq_idx = torch.where(batch_classes == k)[0]
obj_norm_factor[k] = obj_norm_factor[k] + torch.sum(batch_unc[index[unq_idx],batch_classes[unq_idx]])
# for idx,k in zip(index,batch_classes) :
# obj_norm_factor[k] = obj_norm_factor[k] + batch_unc[idx,k]
if not rel_all_u:
for rel in rel_class_num.keys():
batch_unc = torch.tensor(unc_list_rel[i][rel]['al'])
index,batch_classes = torch.where(batch_unc!=0)
batch_unc[index,batch_classes] = 1
rel_memory[rel] += torch.matmul(batch_unc.T,rel_features)
unq_batch_classes = torch.unique(batch_classes)
for k in unq_batch_classes:
unq_idx = torch.where(batch_classes == k)[0]
rel_norm_factor[rel][k] = rel_norm_factor[rel][k] + torch.sum(batch_unc[index[unq_idx],batch_classes[unq_idx]])
# for idx,k in zip(index,batch_classes) :
# rel_norm_factor[rel][k] = rel_norm_factor[rel][k] + batch_unc[idx,k]
else:
unc_list_rel[i],unc_list_obj[i] = normalize_batch_uncertainty(unc_list_rel[i],cls_rel_uc,
unc_list_obj[i],cls_obj_uc,obj_unc=obj_unc, background_mem=include_bg_mem,
weight_type = rel_all_u)
if obj_unc and obj_mem:
for u in obj_all_u :
batch_unc = torch.tensor(unc_list_obj[i][u]) # obj_num x c
index,batch_classes = torch.where(batch_unc!=0)
obj_memory += torch.matmul(batch_unc.T,obj_features)
# unq_batch_classes = torch.unique(batch_classes)
# for k in unq_batch_classes:
# unq_idx = torch.where(batch_classes == k)[0]
# obj_norm_factor[k] = obj_norm_factor[k] + torch.sum(batch_unc[index[unq_idx],batch_classes[unq_idx]])
# for idx,k in zip(index,batch_classes) :
# obj_norm_factor[k] = obj_norm_factor[k] + batch_unc[idx,k]
for u in rel_all_u:
for rel in rel_class_num.keys():
batch_unc = torch.tensor(unc_list_rel[i][rel][u])
index,batch_classes = torch.where(batch_unc!=0)
rel_memory[rel] += torch.matmul(batch_unc.T,rel_features)
# unq_batch_classes = torch.unique(batch_classes)
# for k in unq_batch_classes:
# unq_idx = torch.where(batch_classes == k)[0]
# rel_norm_factor[rel][k] = rel_norm_factor[rel][k] + torch.sum(batch_unc[index[unq_idx],batch_classes[unq_idx]])
# for idx,k in zip(index,batch_classes) :
# rel_norm_factor[rel][k] = rel_norm_factor[rel][k] + batch_unc[idx,k]
if obj_mem and obj_weight_type=='simple':
tmp = obj_memory
nz_idx = torch.where(obj_norm_factor!=0)
tmp[nz_idx] = tmp[nz_idx]/(obj_norm_factor[nz_idx].unsqueeze(-1).repeat(1,obj_feature_dim))
obj_memory = tmp
# obj_memory = obj_memory/(obj_norm_factor.unsqueeze(-1).repeat(1,obj_feature_dim))
if rel_weight_type == 'simple':
for rel in rel_memory.keys():
tmp = rel_memory[rel]
nz_idx = torch.where(rel_norm_factor[rel]!=0)
tmp[nz_idx] = tmp[nz_idx]/(rel_norm_factor[rel][nz_idx].unsqueeze(-1).repeat(1,rel_feature_dim))
rel_memory[rel] = tmp
return rel_memory,obj_memory
| sayaknag/unbiasedSGG | lib/Memory.py | Memory.py | py | 6,007 | python | en | code | 12 | github-code | 90 |
38304816960 |
# given an array of ints, is it possible to choose a group of some of the ints, beginning at the start index, such that the group sums to the given target?
# however, with the additional constraint that all 6's must be chosen
def group_sum6(start, nums, target):
if start >= len(nums):
return target == 0
else:
# all 6's must be chosen
if nums[start] == 6:
return group_sum6(start + 1, nums, target - nums[start])
else:
# use item at start or not
return group_sum6(start + 1, nums, target - nums[start]) or group_sum6(start + 1, nums, target)
print(group_sum6(0, [2, 4, 8], 8))
print(group_sum6(0, [2, 4, 8], 9))
print(group_sum6(0, [2, 4, 8], 7))
| jemtca/CodingBat | Python/Recursion-2/group_sum6.py | group_sum6.py | py | 726 | python | en | code | 0 | github-code | 90 |
41298152118 | import numpy as np
class Grid(object):
""" Finds the number of times one must apply the function,
f(a, b) = a * a + b, to each point before the result exceeds
Grid.threshold.
If Grid.elementwise equals True, Grid.fill() uses a straight-forward,
ineffecient method, one element at a time:
for every point, x, on the horizontal axis
for every point, y, on the vertical axis
find the number of times one must apply the function
If Grid.elementwise equals False, Grid.fill() uses a faster array
method instead
"""
DEFAULT_THRESHOLD = 4.0
DEFAULT_MAX_ITERATIONS = 255
DEFAULT_SCALE = 3.0
MAGNIFICATION_PER_CLICK = 0.5
MANDLEBROT = lambda x, y: x ** 2 + y
TOWER = lambda x, y: x ** x + y
def __init__(self, width, height, elementwise=False, centre=complex(0, 0),
scale=DEFAULT_SCALE, threshold=DEFAULT_THRESHOLD,
iterations=DEFAULT_MAX_ITERATIONS, funct=MANDLEBROT):
self.centre, self.scale = centre, scale
self.elementwise, self.threshold = elementwise, threshold
self.width, self.height = width, height
self.funct = funct
self.halfwidth, self.halfheight = width // 2, height // 2
self.widthrange, self.heightrange = range(width), range(height)
self.iterations = range(1, iterations)
self.aspectratio = height / width if width > 0 else 0
def checknum(self, number):
newval = number
for iteration in self.iterations:
newval = self.funct(newval, number)
if abs(newval) > self.threshold:
return iteration
return False
def zoom(self, pixelposition, magnification=MAGNIFICATION_PER_CLICK):
self.centre = complex(grid.fetchreal(pixelposition[0]),
grid.fetchimaginary(pixelposition[1]))
self.scale *= magnification
def fill(self, displaysurface=None):
if self.elementwise:
try:
result = 0
for xpixel in self.widthrange:
x = self.fetchreal(xpixel)
for ypixel in self.heightrange:
y = self.fetchimaginary(ypixel)
exclusion = self.checknum(complex(x, y))
if exclusion:
if displaysurface:
redandgreen = 256 - exclusion
displaysurface.set_at((xpixel, ypixel),
(redandgreen, redandgreen, 255))
else:
result += exclusion
return result
except ZeroDivisionError:
raise RuntimeError("Grid too small to fill") from ZeroDivisionError
elif displaysurface:
complexvals = (np.linspace(self.fetchreal(), self.fetchreal(self.width),
self.width).reshape((self.width, 1)) + 1j *
np.linspace(self.fetchimaginary(),
self.fetchimaginary(self.height),
self.height).reshape((1, self.height)))
stillchecking = np.full(complexvals.shape, True, dtype=bool)
newvalues = np.zeros(complexvals.shape, dtype=np.complex128)
iterationcounts = np.zeros(newvalues.shape, dtype=int)
for iteration in self.iterations:
newvalues[stillchecking] = self.funct(newvalues[stillchecking],
complexvals[stillchecking])
iterationcounts[np.greater(np.abs(newvalues), self.threshold,
out=np.full(complexvals.shape, False),
where=stillchecking)] = iteration
stillchecking[np.abs(newvalues) > self.threshold] = False
displaysurface.blit(pygame.surfarray.make_surface(iterationcounts), (0, 0))
pygame.display.update()
def fetchreal(self, abscissa=0):
return (self.centre.real + self.scale * (abscissa - self.halfwidth) /
self.halfwidth)
def fetchimaginary(self, ordinate=0):
return (self.centre.imag + self.aspectratio * self.scale *
(ordinate - self.halfheight) / self.halfheight)
if __name__ == '__main__':
ELEMENTWISE = False
import pygame
screen = pygame.display.set_mode((200, 150), 24)
grid = Grid(200, 150, ELEMENTWISE, funct=Grid.MANDLEBROT)
grid.fill(screen)
pygame.display.flip()
show, buttonClear = True, True
while show:
for event in pygame.event.get():
if event.type == pygame.MOUSEBUTTONUP:
buttonClear = True
if event.type == pygame.MOUSEBUTTONDOWN:
if buttonClear:
grid.zoom(pygame.mouse.get_pos())
buttonClear = False
screen.fill((0, 0, 0),)
pygame.display.flip()
grid.fill(screen)
pygame.display.flip()
if (event.type == pygame.KEYDOWN) or (event.type == pygame.QUIT):
show = False
| andysmith2378/projectkirill | main.py | main.py | py | 5,491 | python | en | code | 0 | github-code | 90 |
18185088469 | import sys
M = 10 ** 9 + 7
N, K = map(int, input().split())
p_list = []
n_list = []
z_count = 0
for A in map(int, input().split()):
if A < 0:
n_list.append(A)
elif A > 0:
p_list.append(A)
else:
z_count += 1
p_list.sort()
n_list.sort()
#print(p_list)
#print(n_list)
if len(p_list) + len(n_list) < K:
print(0)
else:
values = []
if len(p_list) + len(n_list) == K:
values = p_list + n_list
n_count = 0
for v in values:
if v < 0:
n_count += 1
if n_count % 2 == 1 and z_count > 0:
values = [0]
elif len(p_list) == 0 and K % 2 == 1:
if z_count > 0:
values = [0]
else:
for i in range(min(len(n_list), K)):
values.append(n_list.pop())
else:
n_list.reverse()
values_pair = []
values_left = None
values_left_count = 0
if len(p_list) >= K:
if K % 2 == 1:
values_left = p_list.pop()
values_left_count = 1
p_count = K - 1
else:
p_count = K
while len(values_pair) * 2 + values_left_count < K:
v1 = p_list.pop()
v2 = p_list.pop()
values_pair.append((v1 * v2, v1, v2))
else:
if (K - len(p_list)) % 2 == 1 and len(p_list) > 0:
p_count = len(p_list) - 1
else:
p_count = len(p_list)
if p_count % 2 == 1:
values_left = p_list.pop()
values_left_count = 1
p_count -= 1
for i in range(0, p_count - 1, 2):
v1 = p_list.pop()
v2 = p_list.pop()
values_pair.append((v1 * v2, v1, v2))
while len(values_pair) * 2 + values_left_count < K:
v1 = n_list.pop()
v2 = n_list.pop()
values_pair.append((v1 * v2, v1, v2))
values_pair.sort()
# print(values_pair)
i = 0
for i in range(len(values_pair)):
if len(n_list) < 2:
break
elif values_pair[i][0] < n_list[-1] * n_list[-2]:
v1 = n_list.pop()
v2 = n_list.pop()
values_pair[i] = (v1 * v2, v1, v2)
else:
break
if values_left != None:
values.append(values_left)
for prod, v1, v2 in values_pair:
values.append(v1)
values.append(v2)
# print(values)
total = 1
for v in values:
total = (total * v) % M
# total = (total * v)
print(total)
| Aasthaengg/IBMdataset | Python_codes/p02616/s747859042.py | s747859042.py | py | 2,092 | python | en | code | 0 | github-code | 90 |
19414977991 | import numpy
import theano
import theano.tensor as T
import mysql.connector
import six.moves.cPickle as pickle
import timeit
class LinearRegression(object):
"""Linear Regression Class
The linear regression is fully described by a weight matrix :math:`W`
and bias vector :math:`b`. Regression is done by projecting data
points onto a hyperplanes where the visible values lies, the distance to which is used to
determine a class membership probability.
"""
def __init__(self, input_x, input_y, n_in, n_out, W=None, b=None):
""" Initialize the parameters of the logistic regression
:type input_x: theano.tensor.TensorType
:param input_x: symbolic variable that describes the input of the
architecture (one minibatch)
:type input_y: theano.tensor.TensorType
:param input_y: symbolic variable that describes the input of the
architecture (one minibatch)
:type n_in: int
:param n_in: number of input units, the dimension of the space in
which the data points lie
:type n_out: int
:param n_out: number of output units, the dimension of the space in
which the labels lie
"""
# initialize with 0 the weights W as a matrix of shape (n_in, n_out)
if W is None:
self.W = theano.shared(
numpy.asarray(
numpy.random.uniform(
low=-numpy.sqrt(6. / (n_in + n_out)),
high=numpy.sqrt(6. / (n_in + n_out)),
size=(n_in, n_out)
),
dtype=theano.config.floatX
)
)
else:
self.W = W
# initialize the biases b as a vector of n_out 0s
if b is None:
self.b = theano.shared(
value=numpy.ones(
(n_out,),
dtype=theano.config.floatX
),
name='b',
borrow=True
)
else:
self.b = b
# symbolic description of how to compute prediction whose
# probability is maximal
self.y_pred = T.dot(input_x, self.W) + self.b
# symbolic expression for computing the matrix proportional to the prediction
# probabilities
# Where:
# W is a matrix where column-k represent the projection hyperplane for
# the prediction
# x is a matrix where row-j represents input training sample-j
# b is a vector where element-k represent the free parameter of
# hyperplane-k
self.p_d_given_thenta = T.pow(input_y - self.y_pred, 2)
# L1 norm ; one regularization option is to enforce L1 norm to
# be small
self.l1 = (
T.sum(T.abs_(self.W))
)
# square of L2 norm ; one regularization option is to enforce
# square of L2 norm to be small
self.l2_sqr = (
T.sum((self.W ** 2))
)
# parameters of the model
self.params = [self.W, self.b]
# keep track of model input
self.input_x = input_x
self.input_y = input_y
def negative_log_likelihood(self):
"""Return the mean of the negative log-likelihood of the prediction
of this model under a given target distribution.
Note: we use the mean instead of the sum so that
the learning rate is less dependent on the batch size
"""
return T.mean(T.log(self.p_d_given_thenta))
def errors(self):
"""Return a float representing the number of errors in the minibatch
over the total number of examples of the minibatch ; zero one
loss over the size of the minibatch
"""
return self.p_d_given_thenta
def sgd_optimization(data, learning_rate=0.01, l1_reg=0.000, l2_reg=0.05, n_epochs=10000, batch_size=1):
"""
Minimal example.
Demonstrate stochastic gradient descent optimization of a log-linear
model
:type learning_rate: float
:param learning_rate: learning rate used (factor for the stochastic
gradient)
:type l1_reg: float
:param l1_reg: L1-norm's weight when added to the cost
:type l2_reg: float
:param l2_reg: L2-norm's weight when added to the cost
:type n_epochs: int
:param n_epochs: maximal number of epochs to run the optimizer
"""
train_set_x, train_set_y = data
#test_set_x, test_set_y = datasets[1]
# compute number of minibatches for training, validation and testing
n_train_batches = train_set_x.get_value(borrow=True).shape[0]
#n_train_batches = train_set_x.get_value(borrow=True).shape[0] // batch_size
#n_test_batches = test_set_x.get_value(borrow=True).shape[0] // batch_size
######################
# BUILD ACTUAL MODEL #
######################
print('... building the model')
# allocate symbolic variables for the data
index = T.lscalar() # index to a [mini]batch
# generate symbolic variables for input (x and y represent a
# minibatch)
x = T.dvector('x') # data, presented as rasterized images
y = T.dscalar('y') # labels, presented as 1D vector of [int] labels
# construct the logistic regression class
# Each MNIST image has size 28*28
classifier = LinearRegression(input_x=x, input_y=y, n_in=train_set_x.get_value(borrow=True).shape[0], n_out=1)
# the cost we minimize during training is the negative log likelihood of
# the model in symbolic format
cost = classifier.negative_log_likelihood() + l1_reg * classifier.l1 + l2_reg * classifier.l2_sqr
# compiling a Theano function that computes the mistakes that are made by
# the model on a minibatch
#test_model = theano.function(
# inputs=[index],
# outputs=classifier.errors(),
# givens={
# x: test_set_x[index * batch_size: (index + 1) * batch_size],
# y: test_set_y[index * batch_size: (index + 1) * batch_size]
# }
#)
# compute the gradient of cost with respect to theta = (W,b)
g_W = T.grad(cost=cost, wrt=classifier.W)
g_b = T.grad(cost=cost, wrt=classifier.b)
# specify how to update the parameters of the model as a list of
# (variable, update expression) pairs.
updates = [(classifier.W, classifier.W - learning_rate * g_W),
(classifier.b, classifier.b - learning_rate * g_b)]
# compiling a Theano function `train_model` that returns the cost, but in
# the same time updates the parameter of the model based on the rules
# defined in `updates`
train_model = theano.function(
inputs=[],
outputs=cost,
updates=updates,
givens={
x: train_set_x,
y: train_set_y
}
)
###############
# TRAIN MODEL #
###############
print('... training the model')
start_time = timeit.default_timer()
epoch = 0
while (epoch < n_epochs):
epoch = epoch + 1
minibatch_avg_cost = train_model()
# compute zero-one loss on validation set
#validation_losses = [test_model(i)
# for i in range(n_test_batches)]
#this_validation_loss = numpy.mean(validation_losses)
# save the best model
best_b = classifier.b
best_W = classifier.W
#with open('best_model.pkl', 'wb') as f:
# pickle.dump(classifier, f)
end_time = timeit.default_timer()
print('The code run for %d epochs, with %f epochs/sec' % (epoch, 1. * epoch / (end_time - start_time)))
print(('The code ran for %.1fs' % (end_time - start_time)))
return [best_W, best_b]
def shared_dataset(data_xy, borrow=True):
""" Function that loads the dataset into shared variables
The reason we store our dataset in shared variables is to allow
Theano to copy it into the GPU memory (when code is run on GPU).
Since copying data into the GPU is slow, copying a minibatch everytime
is needed (the default behaviour if the data is not in a shared
variable) would lead to a large decrease in performance.
"""
shared = theano.shared(numpy.asarray(data_xy, dtype=theano.config.floatX), borrow=borrow)
return shared
def execute(games=39):
seasons = [2011, 2012, 2013, 2014]
scores = dict()
cnx = mysql.connector.connect(user="inf2290", host="localhost", database="cartola", password="^inf2290$")
for season in seasons:
query = (
"select atleta_id, pontos_num from atleta_rodada_%d where rodada_id<%d and "
"(status_id=1 or status_id=7) order by rodada_id asc"
%(season, games)
)
cursor = cnx.cursor()
cursor.execute(query)
for line in cursor:
if line[1] == 0:
continue
if line[0] in scores:
scores[line[0]].append(line[1])
else:
scores[line[0]] = [line[1]]
cnx.close()
params = []
start_time = timeit.default_timer()
for id, score in scores.items():
print(score)
print(id)
if len(score) >= 7:
param = sgd_optimization([shared_dataset(score[:len(score)-1]), shared_dataset(score[-1])])
params.append(param)
end_time = timeit.default_timer()
print(('The code ran for %.1fs' % (end_time - start_time)))
#with open('params.pkl', 'wb') as f:
# pickle.dump(params, f)
if __name__ == "__main__":
execute() | tonitsp/INF2290_CARTOLA | linear_regrassion.py | linear_regrassion.py | py | 9,591 | python | en | code | 1 | github-code | 90 |
42325417313 | import os
from django.views.generic.edit import FormView
from django.views.generic import DetailView, ListView
from django.shortcuts import render
from django.utils.crypto import get_random_string
from django.urls import reverse
from django.urls import reverse_lazy
from django.http import HttpResponse
from django.http import HttpResponseRedirect
from django.http import Http404
from django.shortcuts import redirect
from django.core.exceptions import ObjectDoesNotExist
from django.core.files import File
from django.contrib import messages
from django.conf import settings
from django.contrib.auth.decorators import user_passes_test
from teacher.models import Teacher
from teacher.models import TeachersClassRoom
from student.forms import SolutionCreateForm
from student.models import Solution
from student.models import Student, Solution, SolutionFile
from .forms import AssignmentCreateForm
from .models import AssignmentsFile
from .models import Assignment
def user_is_teacher_check(user):
if user.is_authenticated:
teacher = Teacher.objects.filter(user=user)
if teacher.count() > 0:
return True
return False
@user_passes_test(user_is_teacher_check, login_url='customuser:permission_denied')
def add_assignment_view(request, slug_of_class):
try:
classroom = TeachersClassRoom.objects.get(slug=slug_of_class)
if not classroom.belongs_to_teacher(request.user):
return render(request, '404.html')
except ObjectDoesNotExist:
return render(request, '404.html')
if request.method == 'POST':
form = AssignmentCreateForm(request.POST, request.FILES)
files = request.FILES.getlist('assign_file')
if form.is_valid():
assign = Assignment(
title=form.cleaned_data['title'],
instructions=form.cleaned_data['instructions'],
classroom=classroom,
due_date=form.cleaned_data['due_date']
)
assign.save()
for f in files:
assignment_file = AssignmentsFile(file=f, assignment=assign)
assignment_file.save()
return HttpResponseRedirect(reverse_lazy('teacher:classroom_detail', args=(slug_of_class,)))
else:
form = AssignmentCreateForm()
return render(request, 'assignment.html', {'form': form, 'classroom': classroom})
def assignment_view(request, slug, *args, **kwargs):
assignment = Assignment.objects.get(slug=slug)
files = list(AssignmentsFile.objects.filter(assignment=assignment))
if not request.user.is_student:
classroom_id = assignment.classroom.id
classroom = TeachersClassRoom.objects.get(pk=classroom_id)
students = classroom.student_set.all()
students_count = students.count()
solutions = Solution.objects.filter(assignment=assignment)
solutions_count = solutions.count()
context = {
'assignment': assignment,
'assignment_files': files,
'solutions': solutions,
'solutions_count': solutions_count,
'students_count': students_count,
}
return render(request, "index.html", context)
else:
context = {
'assignment': assignment,
'assignment_files': files,
}
return render(request, "student_assignment_view.html", context)
@user_passes_test(user_is_teacher_check, login_url='customuser:permission_denied')
def assignment_delete_view(request, slug, *args, **kwargs):
try:
assignment = Assignment.objects.get(slug=slug)
classroom = assignment.classroom
if not classroom.belongs_to_teacher(request.user):
return render(request, '404.html')
except ObjectDoesNotExist:
return render(request, '404.html')
assignment.delete()
messages.success(request, "Successfully deleted")
return HttpResponseRedirect(reverse_lazy('teacher:classroom_detail', kwargs={'slug': classroom.slug}))
def assignment_file_view(request, slug, *args, **kwargs):
try:
assignment = Assignment.objects.get(slug=slug)
classroom = assignment.classroom
if not classroom.belongs_to_teacher(request.user):
return render(request, '404.html')
except ObjectDoesNotExist:
return render(request, '404.html')
files = assignment.get_files()
context = {
'assignment_files': files,
}
return render(request, "assignment_file_view.html", context)
def solution_create_view(request, pk, *args, **kwargs):
try:
assignment = Assignment.objects.get(id=pk)
student = Student.objects.get(user=request.user)
sol = Solution.objects.get(assignment=assignment, student=student)
except:
sol = None
if sol is not None:
solfiles = SolutionFile.objects.filter(submission=sol)
else:
solfiles = None
if solfiles is None:
count = 0
form = SolutionCreateForm()
if request.method == 'POST':
form = SolutionCreateForm(request.POST, request.FILES)
files = request.FILES.getlist('solution_file')
if form.is_valid():
comment = form.cleaned_data['comment']
solution_obj = Solution(
comment=comment, student=student, assignment=assignment)
solution_obj.save()
for f in files:
solution_file = SolutionFile(
file=f, submission=solution_obj)
solution_file.save()
messages.success(
request, "Solution to assignment is successfully submitted")
return redirect('customuser:homepage')
else:
messages.error(request, "Please enter valid info")
return render(request, 'student_solution_view.html', {'form': form, 'assignment': assignment, 'count': count})
else:
count = 1
return render(request, 'student_solution_view.html', {'solution_files': solfiles, 'count': count, 'assignment': assignment})
@user_passes_test(user_is_teacher_check, login_url='customuser:permission_denied')
def see_student_solution(request, slug_of_assignment, slug, *args, **kwargs):
sol = Solution.objects.get(slug=slug)
assignment = Assignment.objects.get(slug=slug_of_assignment)
solfiles = SolutionFile.objects.filter(submission=sol)
student = sol.student
return render(request, 'see_student_solution.html', {'solution_files': solfiles, 'assignment': assignment, 'student': student})
| piyushbhutani1999/heroku | assignment/views.py | views.py | py | 6,601 | python | en | code | 0 | github-code | 90 |
18204295069 | def factorization(n):
if n < 2:
return []
arr = []
temp = n
for i in range(2, int(-(-n**0.5//1))+1):
if temp%i==0:
cnt=0
while temp%i==0:
cnt+=1
temp //= i
arr.append([i, cnt])
if temp!=1:
arr.append([temp, 1])
if arr==[]:
arr.append([n, 1])
return arr
n=int(input())
factor=factorization(n)
cnt=0
for f in factor:
c=f[1]
for i in range(1,100):
c-=i
if c < 0:
break
cnt+=1
print(cnt)
| Aasthaengg/IBMdataset | Python_codes/p02660/s531871800.py | s531871800.py | py | 545 | python | en | code | 0 | github-code | 90 |
32325140636 | budget = float(input())
tv_show_numbers = int(input())
for number in range(1, tv_show_numbers + 1):
tv_show_name = input()
tv_show_price = float(input())
discount = 1
if tv_show_name == "Thrones":
discount = 0.5
elif tv_show_name == "Lucifer":
discount = 0.6
elif tv_show_name == "Protector":
discount = 0.7
elif tv_show_name == "TotalDrama":
discount = 0.8
elif tv_show_name == "Area":
discount = 0.9
tv_show_price = tv_show_price * discount
budget -= tv_show_price
if budget >= 0:
print(f"You bought all the series and left with {budget:.2f} lv.")
else:
budget = abs(budget)
print(f"You need {budget:.2f} lv. more to buy the series!") | VelkovIv/Programing-Basic-July-2022-in-SoftUni | test_exam_tasks/05.series.py | 05.series.py | py | 730 | python | en | code | 1 | github-code | 90 |
19187457786 | import csv
import re
from collections.abc import Iterable
from io import TextIOWrapper
from pathlib import Path
from typing import Any
from zipfile import ZipFile
def to_positive_float(value: Any) -> float | None:
if isinstance(value, str):
value = re.sub(r"[^\d./]", "", value) if value else ""
try:
return float(value)
except (ValueError, TypeError):
return None
def term_data(
csv_path: Path | Iterable[Path], field: str, type_=None
) -> dict[str, Any]:
paths = csv_path if isinstance(csv_path, Iterable) else [csv_path]
type_ = type_ if type_ else str
data = {}
for path in paths:
terms = read_terms(path)
for term in terms:
value = term.get(field)
if value not in (None, ""):
data[term["pattern"]] = type_(value)
return data
def read_terms(csv_path: Path | Iterable[Path]) -> list[dict]:
paths = csv_path if isinstance(csv_path, Iterable) else [csv_path]
terms = []
for path in paths:
if path.suffix == ".zip":
with ZipFile(path) as zippy, zippy.open(f"{path.stem}.csv") as in_csv:
reader = csv.DictReader(TextIOWrapper(in_csv, "utf-8"))
terms += list(reader)
else:
with open(path) as term_file:
reader = csv.DictReader(term_file)
terms += list(reader)
return terms
| rafelafrance/parser_ensemble | reconcile/pylib/util.py | util.py | py | 1,413 | python | en | code | 1 | github-code | 90 |
72318539818 | import os
import json
import string
import re
import aiohttp
import logs
import stats
import jobs
from random_string import random_string
async def post(request):
# print(await request.text())
req = (await request.post()) or (await request.json())
code = req.get('code')
output_format = req.get('format').lower()
client_name = req.get('client_name', 'unnamed')
density = req.get('density', 200)
quality = req.get('quality', 85)
if False \
or not isinstance(code, str) \
or not isinstance(output_format, str) \
or not isinstance(density, int) \
or not isinstance(quality, int) \
or not (1 <= density <= 2000) \
or not (1 <= quality <= 100):
raise aiohttp.web.json_response({'error': 'bad input formats'})
if output_format not in ('pdf', 'png', 'jpg'):
return aiohttp.web.json_response({'error': 'invalid output format'})
job_id = random_string(64)
logs.info('Job {} started'.format(job_id))
reply = await jobs.render_latex(job_id, output_format, code, density, quality)
if reply['status'] == 'success':
reply['filename'] = job_id + '.' + output_format
logs.info('Job success : {}'.format(job_id))
stats.track_event('api2', 'success', client=client_name)
else:
logs.info('Job failed : {}'.format(job_id))
stats.track_event('api2', 'failure', client=client_name)
return aiohttp.web.json_response(reply)
async def get(request):
filename = request.match_info['filename']
if not re.match(r'^[A-Za-z0-9]{64}\.(pdf|png|pdf)$', filename):
logs.info('{} not found'.format(filename))
raise aiohttp.web.HTTPBadRequest
path = './temp/' + filename.replace('.', '/a.')
if not os.path.isfile(path):
raise aiohttp.web.HTTPNotFound
return aiohttp.web.FileResponse(path)
| DXsmiley/rtex | src/api2.py | api2.py | py | 1,888 | python | en | code | 14 | github-code | 90 |
3184647787 | import torch
import os
from transformers import BertTokenizer, BertForMaskedLM
from transformers import RobertaTokenizer, RobertaForMaskedLM
from transformers import XLMRobertaTokenizer, XLMRobertaForMaskedLM
def init_decoder_weight(dataset, ontology_file, save_dir, MLM_decoder, bert_size):
type_token = []
with open(ontology_file, 'r') as f:
for line in f:
line = line.strip()
line = line.replace('_', ' ')
line = line.replace('/', ' ')
# line = line.split('/')[-1]
type_token.append(tokenizer(line, add_special_tokens=False)['input_ids'])
type2token = torch.zeros(len(type_token), tokenizer.vocab_size)
for idx, temp in enumerate(type_token):
for i in temp:
type2token[idx, i] = 1 / len(temp)
fc = torch.nn.Linear(bert_size, len(type_token))
MLM_decoder_weight = MLM_decoder.weight.data
MLM_decoder_bias = MLM_decoder.bias.data
fc.weight.data = type2token @ MLM_decoder_weight
fc.bias.data = type2token @ MLM_decoder_bias
torch.save(fc.state_dict(), os.path.join(save_dir, f'{dataset}_fc.pth'))
if __name__ == '__main__':
save_dir = './save'
# bert_name = 'bert-base-cased'
bert_name = 'bert-base-cased'
save_dir = os.path.join(save_dir, bert_name)
if not os.path.exists(save_dir):
os.makedirs(save_dir)
bert_path = './Bert/bert-base-cased'
bert_size = 768
# init model params from MLMHead
model = BertForMaskedLM.from_pretrained(bert_path)
tokenizer = BertTokenizer.from_pretrained(bert_path)
# transform layer & LayerNorm
dense = model.cls.predictions.transform.dense.state_dict()
ln = model.cls.predictions.transform.LayerNorm.state_dict()
MLM_decoder = model.get_output_embeddings()
torch.save(dense, os.path.join(save_dir, 'dense.pth'))
torch.save(ln, os.path.join(save_dir, 'ln.pth'))
# decoder layer
init_decoder_weight('ultra', './datasets/original/ontology/ultra_types.txt', save_dir, MLM_decoder, bert_size)
init_decoder_weight('onto', './datasets/original/ontology/onto_ontology.txt', save_dir, MLM_decoder, bert_size)
init_decoder_weight('bbn', './datasets/original/ontology/bbn_types.txt', save_dir, MLM_decoder, bert_size)
init_decoder_weight('figer', './datasets/original/ontology/figer_types.txt', save_dir, MLM_decoder, bert_size)
| mhtang1995/CPPT | param_init.py | param_init.py | py | 2,388 | python | en | code | 1 | github-code | 90 |
32619765508 | # # # # # Clash detection 2 # # # # #
import clr
import System
import math
clr.AddReference('RevitAPI')
clr.AddReference('RevitAPIUI')
from Autodesk.Revit.DB import *
from Autodesk.Revit.UI import *
from System.Collections.Generic import List
from pyrevit import forms
from rpw.ui.forms import TextInput
doc = __revit__.ActiveUIDocument.Document
uidoc = __revit__.ActiveUIDocument
options = __revit__.Application.Create.NewGeometryOptions()
copyOptions = CopyPasteOptions()
class Bubble:
doc = __revit__.ActiveUIDocument.Document
uidoc = __revit__.ActiveUIDocument
options = __revit__.Application.Create.NewGeometryOptions()
def __init__(self,bubble_name):
self.familysymbol = None
self.presence = False
gm_collector = FilteredElementCollector(doc)\
.OfClass(FamilySymbol)\
.OfCategory(BuiltInCategory.OST_GenericModel)
for i in gm_collector:
if i.FamilyName == str(bubble_name):
self.familysymbol = i
self.presence = True
break
return self.familysymbol
return self.presence
def get_selected_elements(doc):
try:
# # Revit 2016
return [doc.GetElement(id)
for id in __revit__.ActiveUIDocument.Selection.GetElementIds()]
except:
# # old method
return list(__revit__.ActiveUIDocument.Selection.Elements)
def DocToOriginTransform(doc):
try:
projectPosition = doc.ActiveProjectLocation.get_ProjectPosition(XYZ.Zero)
except:
projectPosition = doc.ActiveProjectLocation.GetProjectPosition(XYZ.Zero)
translationVector = XYZ(projectPosition.EastWest, projectPosition.NorthSouth, projectPosition.Elevation)
translationTransform = Transform.CreateTranslation(translationVector)
rotationTransform = Transform.CreateRotationAtPoint(XYZ.BasisZ, projectPosition.Angle, XYZ.Zero)
finalTransform = translationTransform.Multiply(rotationTransform)
return finalTransform
def OriginToDocTransform(doc):
try:
projectPosition = doc.ActiveProjectLocation.get_ProjectPosition(XYZ.Zero)
except:
projectPosition = doc.ActiveProjectLocation.GetProjectPosition(XYZ.Zero)
translationVector = XYZ(-projectPosition.EastWest, -projectPosition.NorthSouth, -projectPosition.Elevation)
translationTransform = Transform.CreateTranslation(translationVector)
rotationTransform = Transform.CreateRotationAtPoint(XYZ.BasisZ, -projectPosition.Angle, XYZ.Zero)
finalTransform = rotationTransform.Multiply(translationTransform)
return finalTransform
def DocToDocTransform(doc1, doc2):
docToOriginTrans = DocToOriginTransform(doc1)
originToDocTrans = OriginToDocTransform(doc2)
docToDocTrans = originToDocTrans.Multiply(docToOriginTrans)
return docToDocTrans
def get_solid(element):
solid_list = []
for i in element.get_Geometry(options):
if i.ToString() == "Autodesk.Revit.DB.Solid":
solid_list.append(SolidUtils.CreateTransformed(i, DocToDocTransform(element.Document, __revit__.ActiveUIDocument.Document)))
elif i.ToString() == "Autodesk.Revit.DB.GeometryInstance":
for j in i.GetInstanceGeometry():
if j.ToString() == "Autodesk.Revit.DB.Solid":
solid_list.append(SolidUtils.CreateTransformed(j, DocToDocTransform(element.Document, __revit__.ActiveUIDocument.Document)))
return solid_list
def get_intersection(el1, el2):
bb1 = el1.get_Geometry(options).GetBoundingBox()
bb2 = el2.get_Geometry(options).GetBoundingBox()
trans1 = bb1.Transform
trans2 = bb2.Transform
min1 = trans1.OfPoint(bb1.Min)
max1 = trans1.OfPoint(bb1.Max)
min2 = trans2.OfPoint(bb2.Min)
max2 = trans2.OfPoint(bb2.Max)
outline1 = Outline(min1, max1)
outline2 = Outline(min2, max2)
# print(outline1.Intersects(outline2,0))
# print(outline1.ContainsOtherOutline(outline2,0))
# print(outline2.ContainsOtherOutline(outline1,0))
solid1_list = get_solid(el1)
solid2_list = get_solid(el2)
for i in solid1_list:
for j in solid2_list:
try:
inter = BooleanOperationsUtils.ExecuteBooleanOperation(i, j, BooleanOperationsType.Intersect)
if inter.Volume != 0:
interBb = inter.GetBoundingBox()
interTrans = interBb.Transform
interPoint = interTrans.OfPoint(interBb.Min)
break
except:
"Oh god!"
try:
interPoint
return interPoint
except:
return None
def get_elements_in_3Dview():
cat_list = [BuiltInCategory.OST_PipeCurves, BuiltInCategory.OST_DuctCurves,\
BuiltInCategory.OST_CableTray, BuiltInCategory.OST_StructuralFraming,\
BuiltInCategory.OST_PipeFitting, BuiltInCategory.OST_DuctFitting,\
BuiltInCategory.OST_CableTrayFitting, BuiltInCategory.OST_FlexPipeCurves]
rvtlink_collector = FilteredElementCollector(doc, doc.ActiveView.Id)\
.OfCategory(BuiltInCategory.OST_RvtLinks)\
.WhereElementIsNotElementType()\
.ToElements()
SectionBox = doc.ActiveView.GetSectionBox()
trans = SectionBox.Transform
bbmin = SectionBox.Min
bbmax = SectionBox.Max
dl_list = []
for rvtlink in rvtlink_collector:
dl_list.append(rvtlink)
dl_list.append(doc)
element_list = []
for cat in cat_list:
for dl in dl_list:
if dl.GetType().ToString() == "Autodesk.Revit.DB.Document":
try:
outline = Outline(trans.OfPoint(bbmin),trans.OfPoint(bbmax))
bbFilter = BoundingBoxIntersectsFilter(outline)
except:
bbmin2 = trans.OfPoint(bbmin)
bbmax2 = trans.OfPoint(bbmax)
outmin = XYZ(min(bbmin2.X,bbmax2.X), min(bbmin2.Y,bbmax2.Y), min(bbmin2.Z,bbmax2.Z))
outmax = XYZ(max(bbmin2.X,bbmax2.X), max(bbmin2.Y,bbmax2.Y), max(bbmin2.Z,bbmax2.Z))
outline = Outline(outmin, outmax)
bbFilter = BoundingBoxIntersectsFilter(outline)
element_collector = FilteredElementCollector(dl)\
.OfCategory(cat)\
.WherePasses(bbFilter)\
.WhereElementIsNotElementType()\
.ToElements()
for e in element_collector:
if (e.GetType().ToString() == "Autodesk.Revit.DB.FamilyInstance") and (e.Category.Id.IntegerValue == -2001320) \
and (e.StructuralType.ToString() != "Beam"):
pass
else:
element_list.append(e)
elif dl.GetType().ToString() == "Autodesk.Revit.DB.RevitLinkInstance":
docToOriginTrans = DocToOriginTransform(doc)
originToDocTrans = OriginToDocTransform(dl.GetLinkDocument())
docToDocTrans = originToDocTrans.Multiply(docToOriginTrans)
try:
outline = Outline(docToDocTrans.OfPoint(trans.OfPoint(bbmin)), docToDocTrans.OfPoint(trans.OfPoint(bbmax)))
bbFilter = BoundingBoxIntersectsFilter(outline)
except:
bbmin2 = docToDocTrans.OfPoint(trans.OfPoint(bbmin))
bbmax2 = docToDocTrans.OfPoint(trans.OfPoint(bbmax))
outmin = XYZ(min(bbmin2.X,bbmax2.X), min(bbmin2.Y,bbmax2.Y), min(bbmin2.Z,bbmax2.Z))
outmax = XYZ(max(bbmin2.X,bbmax2.X), max(bbmin2.Y,bbmax2.Y), max(bbmin2.Z,bbmax2.Z))
outline = Outline(outmin, outmax)
# outline = Outline(trans.OfPoint(bbmin), trans.OfPoint(bbmax))
bbFilter = BoundingBoxIntersectsFilter(outline)
element_collector = FilteredElementCollector(dl.GetLinkDocument())\
.OfCategory(cat)\
.WherePasses(bbFilter)\
.WhereElementIsNotElementType()\
.ToElements()
for e in element_collector:
if (e.GetType().ToString() == "Autodesk.Revit.DB.FamilyInstance") and (e.Category.Id.IntegerValue == -2001320) \
and (e.StructuralType.ToString() != "Beam"):
pass
else:
element_list.append(e)
return element_list
def get_docsolid(element, elementdoc, activedoc):
docToOriginTrans = DocToOriginTransform(elementdoc)
originToDocTrans = OriginToDocTransform(activedoc)
bb = element.get_Geometry(options).GetBoundingBox()
trans = bb.Transform
bbmin = originToDocTrans.OfPoint(docToOriginTrans.OfPoint(trans.OfPoint(bb.Min)))
bbmax = originToDocTrans.OfPoint(docToOriginTrans.OfPoint(trans.OfPoint(bb.Max)))
pt0 = XYZ(bbmin.X, bbmin.Y, bbmin.Z)
pt1 = XYZ(bbmax.X, bbmin.Y, bbmin.Z)
pt2 = XYZ(bbmax.X, bbmax.Y, bbmin.Z)
pt3 = XYZ(bbmin.X, bbmax.Y, bbmin.Z)
edge0 = Line.CreateBound(pt0, pt1)
edge1 = Line.CreateBound(pt1, pt2)
edge2 = Line.CreateBound(pt2, pt3)
edge3 = Line.CreateBound(pt3, pt0)
iCurveCollection = List[Curve]()
iCurveCollection.Add(edge0)
iCurveCollection.Add(edge1)
iCurveCollection.Add(edge2)
iCurveCollection.Add(edge3)
height = bbmax.Z - bbmin.Z
baseLoop = CurveLoop.Create(iCurveCollection)
iCurveLoopCollection = List[CurveLoop]()
iCurveLoopCollection.Add(baseLoop)
solid = GeometryCreationUtilities.CreateExtrusionGeometry(iCurveLoopCollection, XYZ.BasisZ, height)
return solid
def get_docoutline(element, elementdoc, activedoc):
docToOriginTrans = DocToOriginTransform(elementdoc)
originToDocTrans = OriginToDocTransform(activedoc)
bb = element.get_Geometry(options).GetBoundingBox()
trans = bb.Transform
bbmin = originToDocTrans.OfPoint(docToOriginTrans.OfPoint(trans.OfPoint(bb.Min)))
bbmax = originToDocTrans.OfPoint(docToOriginTrans.OfPoint(trans.OfPoint(bb.Max)))
outline = Outline(bbmin, bbmax)
return outline
bname = "OBSERVATIONS_2017"
# bname = "Observation_Libelle SYA"
element_list = get_elements_in_3Dview()
print(element_list)
tuple_list = []
point_list = []
k = 0
l = -1
for i in element_list:
k = k + 1
for j in range(k, len(element_list)):
if (i.Category.Id.IntegerValue == -2001320) and (element_list[j].Category.Id.IntegerValue == -2001320):
"Beam intersection"
else:
interPoint = get_intersection(i, element_list[j])
if interPoint is None:
pass
else:
if interPoint.ToString() not in tuple_list:
point_list.append(interPoint)
tuple_list.append(interPoint.ToString())
t = Transaction(doc, 'Place bubbles')
t.Start()
for m in point_list:
print("Bubble placed")
instance = doc.Create.NewFamilyInstance(m, Bubble(bname).familysymbol, Structure.StructuralType.NonStructural)
t.Commit()
# bname = "OBSERVATIONS_2017"
# if Bubble(bname).presence is True:
# cat_list = [BuiltInCategory.OST_PipeCurves, BuiltInCategory.OST_DuctCurves,\
# BuiltInCategory.OST_CableTray, BuiltInCategory.OST_StructuralFraming,\
# BuiltInCategory.OST_PipeFitting, BuiltInCategory.OST_DuctFitting,\
# BuiltInCategory.OST_CableTrayFitting, BuiltInCategory.OST_FlexPipeCurves]
# rvtlink_collector = FilteredElementCollector(doc, doc.ActiveView.Id)\
# .OfCategory(BuiltInCategory.OST_RvtLinks)\
# .WhereElementIsNotElementType()\
# .ToElements()
# SectionBox = doc.ActiveView.GetSectionBox()
# trans = SectionBox.Transform
# bbmin = SectionBox.Min
# bbmax = SectionBox.Max
# # outline = Outline(trans.OfPoint(min),trans.OfPoint(max))
# dl_list = []
# for rvtlink in rvtlink_collector:
# dl_list.append(rvtlink)
# dl_list.append(doc)
# element_list = []
# for cat in cat_list:
# for dl in dl_list:
# if dl.GetType().ToString() == "Autodesk.Revit.DB.Document":
# outline = Outline(trans.OfPoint(bbmin),trans.OfPoint(bbmax))
# bbFilter = BoundingBoxIntersectsFilter(outline)
# element_collector = FilteredElementCollector(dl)\
# .OfCategory(cat)\
# .WherePasses(bbFilter)\
# .WhereElementIsNotElementType()\
# .ToElements()
# for e in element_collector:
# if (e.GetType().ToString() == "Autodesk.Revit.DB.FamilyInstance") and (e.StructuralType.ToString() != "Beam"):
# pass
# else:
# element_list.append(e)
# elif dl.GetType().ToString() == "Autodesk.Revit.DB.RevitLinkInstance":
# docToOriginTrans = DocToOriginTransform(doc)
# originToDocTrans = OriginToDocTransform(dl.GetLinkDocument())
# docToDocTrans = originToDocTrans.Multiply(docToOriginTrans)
# try:
# outline = Outline(docToDocTrans.OfPoint(trans.OfPoint(bbmin)), docToDocTrans.OfPoint(trans.OfPoint(bbmax)))
# bbFilter = BoundingBoxIntersectsFilter(outline)
# except:
# a = "Same base point"
# outline = Outline(trans.OfPoint(bbmin), trans.OfPoint(bbmax))
# bbFilter = BoundingBoxIntersectsFilter(outline)
# element_collector = FilteredElementCollector(dl.GetLinkDocument())\
# .OfCategory(cat)\
# .WherePasses(bbFilter)\
# .WhereElementIsNotElementType()\
# .ToElements()
# for e in element_collector:
# if (e.GetType().ToString() == "Autodesk.Revit.DB.FamilyInstance") and (e.StructuralType.ToString() != "Beam"):
# pass
# else:
# element_list.append(e)
# print(element_list)
# tuple_list = []
# point_list = []
# k = 0
# l = -1
# for i in element_list:
# l = l + 1
# k = k + 1
# for j in range(k, len(element_list)):
# if interpoint(i, element_list[j], element_list[l].Document, element_list[j].Document) is None:
# pass
# else:
# if interpoint(i, element_list[j], element_list[l].Document, element_list[j].Document).ToString() not in tuple_list:
# point_list.append(interpoint(i, element_list[j], element_list[l].Document, element_list[j].Document))
# tuple_list.append(interpoint(i, element_list[j], element_list[l].Document, element_list[j].Document).ToString())
# t = Transaction(doc, 'Place bubbles')
# t.Start()
# for m in point_list:
# print("Bubble placed")
# instance = doc.Create.NewFamilyInstance(m, Bubble(bname).familysymbol, Structure.StructuralType.NonStructural)
# t.Commit()
# # # # # Clash detection 2 # # # # #
# def get_selected_elements(doc):
# try:
# # # Revit 2016
# return [doc.GetElement(id)
# for id in __revit__.ActiveUIDocument.Selection.GetElementIds()]
# except:
# # # old method
# return list(__revit__.ActiveUIDocument.Selection.Elements)
# el = get_selected_elements(doc)[0]
# print("el:")
# print(el)
# print("dir(el)")
# print(dir(el))
# print("dir(el.location)")
# print(dir(el.Location))
# bb = el.get_Geometry(options).GetBoundingBox()
# print(bb)
# print("bbmin")
# print(bb.Min)
# print("bbmax")
# print(bb.Max)
# print("dir(bb)")
# print(dir(bb))
# print("category")
# print(el.Category.Name)
# print("el geometry object from reference")
# a = el.GetGeometryObjectFromReference(Reference(el))
# print(el.GetGeometryObjectFromReference(Reference(el)))
# print("dir(a)")
# print(dir(a))
# print(bb.Bounds)
# ceiling_collector = FilteredElementCollector(doc,doc.ActiveView.Id)\
# .OfCategory(BuiltInCategory.OST_Ceilings)\
# .WhereElementIsNotElementType()\
# .ToElements()
# hsfp_list = []
# for i in ceiling_collector:
# z = round(i.LookupParameter("D"+"\xe9"+"calage par rapport au niveau").AsDouble()/3.2808399, 2)
# if (z not in hsfp_list) and (i.LookupParameter("Sous-projet").AsValueString() == "BPS_FXP"):
# print(i.LookupParameter("Sous-projet").AsValueString())
# print(z)
# hsfp_list.append(z)
# for j in hsfp_list:
# print(j)
# import clr
# import System
# import math
# clr.AddReference('RevitAPI')
# clr.AddReference('RevitAPIUI')
# from Autodesk.Revit.DB import *
# from Autodesk.Revit.UI import *
# from System.Collections.Generic import List
# from pyrevit import forms
# from rpw.ui.forms import TextInput
# from rpw.ui.forms import (FlexForm, Label, ComboBox, TextBox, TextBox, Separator, Button)
# >>>>>>>>>>>>>>>
# beam_collector = FilteredElementCollector(doc,doc.ActiveView.Id)\
# .OfCategory(BuiltInCategory.OST_StructuralFraming)\
# .WhereElementIsNotElementType()\
# .ToElements()
# icollection = List[ElementId]()
# uidoc.Selection.SetElementIds(icollection)
# for beam in beam_collector:
# if beam.StructuralType.ToString() == "Beam":
# bb = beam.get_Geometry(options).GetBoundingBox()
# z = bb.Min.Z/3.2808399
# if z-114.72<2.79 :
# print(z)
# print(beam.Id)
# icollection.Add(beam.Id)
# # "FABRICATION_LEVEL_PARAM"
# # param = BuiltInParameter.STRUCTURAL_REFERENCE_LEVEL_ELEVATION
# # print(param)
# # zlevel = beam.get_Parameter(param).AsDouble()/3.2808399
# # print(zlevel)
# uidoc.Selection.SetElementIds(icollection)
# <<<<<<<<<<<<<<<<<<<<<
# doc = __revit__.ActiveUIDocument.Document
# uidoc = __revit__.ActiveUIDocument
# options = __revit__.Application.Create.NewGeometryOptions()
# SEBoptions = SpatialElementBoundaryOptions()
# roomcalculator = SpatialElementGeometryCalculator(doc)
# def Ungroup(group):
# group.UngroupMembers()
# def Regroup(groupname,groupmember):
# newgroup = doc.Create.NewGroup(groupmember)
# newgroup.GroupType.Name = str(groupname)
# def convertStr(s):
# """Convert string to either int or float."""
# try:
# ret = int(s)
# except ValueError:
# ret = 0
# return ret
# class FailureHandler(IFailuresPreprocessor):
# def __init__(self):
# self.ErrorMessage = ""
# self.ErrorSeverity = ""
# def PreprocessFailures(self, failuresAccessor):
# # failuresAccessor.DeleteAllWarning()
# # return FailureProcessingResult.Continue
# failures = failuresAccessor.GetFailureMessages()
# rslt = ""
# for f in failures:
# fseverity = failuresAccessor.GetSeverity()
# if fseverity == FailureSeverity.Warning:
# failuresAccessor.DeleteWarning(f)
# elif fseverity == FailureSeverity.Error:
# rslt = "Error"
# failuresAccessor.ResolveFailure(f)
# if rslt == "Error":
# return FailureProcessingResult.ProceedWithCommit
# # return FailureProcessingResult.ProceedWithRollBack
# else:
# return FailureProcessingResult.Continue
# td_button = TaskDialogCommonButtons.Yes | TaskDialogCommonButtons.No
# res = TaskDialog.Show("Importation from Excel","Attention :\n- Les ids des elements doivent etre en colonne 1\n- Les noms exacts (avec majuscules) des parametres partages doivent etre en ligne 1\n- Aucun accent ou caractere special dans le fichier Excel", td_button)
# if res == TaskDialogResult.Yes:
# # t = Transaction(doc, 'Read Excel spreadsheet.')
# # t.Start()
# #Accessing the Excel applications.
# xlApp = System.Runtime.InteropServices.Marshal.GetActiveObject('Excel.Application')
# count = 1
# dicWs = {}
# count = 1
# for i in xlApp.Worksheets:
# dicWs[i.Name] = i
# count += 1
# components = [Label('Enter the name of ID parameter:'),
# ComboBox('combobox', dicWs),
# Label('Enter the number of rows in Excel you want to integrate to Revit:'),
# TextBox('textbox', Text="600"),
# Label('Enter the number of colones in Excel you want to integrate to Revit:'),
# TextBox('textbox2', Text="20"),
# Separator(),
# Button('Select')]
# form = FlexForm('Title', components)
# form.show()
# worksheet = form.values['combobox']
# rowEnd = convertStr(form.values['textbox'])
# colEnd = convertStr(form.values['textbox2'])
# #Row, and Column parameters
# rowStart = 1
# column_id = 1
# colStart = 2
# # Using a loop to read a range of values and print them to the console.
# array = []
# param_names_excel = []
# data = {}
# for r in range(rowStart, rowEnd):
# data_id = worksheet.Cells(r, column_id).Text
# data_id_int = convertStr(data_id)
# if data_id_int != 0:
# data = {'id': data_id_int}
# for c in range(colStart, colEnd):
# data_param_value = worksheet.Cells(r, c).Text
# data_param_name = worksheet.Cells(1, c).Text
# if data_param_name != '':
# param_names_excel.append(data_param_name)
# if data_param_value != '':
# data[data_param_name] = data_param_value
# array.append(data)
# # t.Commit()
# #Recuperation des portes
# doors = FilteredElementCollector(doc)\
# .OfCategory(BuiltInCategory.OST_Doors)\
# .WhereElementIsNotElementType()\
# .ToElements()
# #Get parameters in the model
# params_door_set = doors[0].Parameters
# params_door_name = []
# for param_door in params_door_set:
# params_door_name.append(param_door.Definition.Name)
# unfounddoors = []
# # TROUVER ERREUR ICI ET SUPPRIMER PARAGRAPHE!
# # print(array)
# # for hash in array:
# # for param in param_names_excel:
# # print(param)
# # if (param in params_door_name) and (param in hash):
# # # door.LookupParameter(param).Set(hash[param])
# # print(hash[param])
# # TROUVER ERREUR ICI ET SUPPRIMER PARAGRAPHE!
# # t = Transaction(doc, 'Feed doors')
# # t.Start()
# tg = TransactionGroup(doc, 'Feed doors')
# tg.Start()
# for hash in array:
# idInt = int(hash['id'])
# try :
# door_id = ElementId(idInt)
# door = doc.GetElement(door_id)
# groupId = door.GroupId
# print("Door : " + str(idInt))
# print("Group id : " + str(groupId))
# if str(groupId) != "-1":
# t1 = Transaction(doc, 'Ungroup group')
# t1.Start()
# group = doc.GetElement(groupId)
# groupname = group.Name
# groupmember = group.GetMemberIds()
# Ungroup(group)
# t1.Commit()
# print(t1.GetStatus())
# # TROUVER ERREUR ICI ET SUPPRIMER PARAGRAPHE!
# # for param in param_names_excel:
# # if (param in params_door_name) and (param in hash):
# # door.LookupParameter(param).Set(hash[param])
# for param in param_names_excel:
# try:
# if (param in params_door_name) and (param in hash):
# door.LookupParameter(param).Set(hash[param])
# print(param + " : Done")
# except:
# print(param + " : Failed")
# # TROUVER ERREUR ICI ET SUPPRIMER PARAGRAPHE!
# if str(groupId) != "-1":
# try:
# t2 = Transaction(doc, 'Regroup group')
# t2.Start()
# print(t2.GetStatus())
# failureHandlingOptions = t2.GetFailureHandlingOptions()
# failureHandler = FailureHandler()
# failureHandlingOptions.SetFailuresPreprocessor(failureHandler)
# failureHandlingOptions.SetClearAfterRollback(True)
# t2.SetFailureHandlingOptions(failureHandlingOptions)
# Regroup(groupname,groupmember)
# t2.Commit()
# print(t2.GetStatus())
# except:
# t2.RollBack()
# print(t2.GetStatus())
# for i in groupmember:
# IdsInLine = IdsInLine + str(i.IntegerValue) + ", "
# IdsInLine = IdsInLine[:len(IdsInLine)-3]
# print("Regrouping failed on group : " + str(groupId) + " / " + str(groupname))
# print("Grouped element ids was : " + IdsInLine)
# print("Door " + str(idInt) + " : OK")
# except:
# print(str(idInt) + " not in REVIT doc")
# unfounddoors.append(idInt)
# print("Job done!")
# # t.Commit()
# tg.Assimilate()
# if len(unfounddoors) != 0:
# print(str(len(unfounddoors)) + " doors not found : ")
# print(unfounddoors)
# else:
# "A plus tard!" | ThomFgt/PyRevit | PyRevit/MyExtensions/AddIns.extension/AddIns.tab/tests.panel/Test2Pierre.pushbutton/script.py | script.py | py | 22,144 | python | en | code | 2 | github-code | 90 |
73173500455 | from PIL import Image
from OpenGL.GL import *
from src.layout_creation.rect import Rect
from src.utils.MathUtils import Vector2
class ImageSceneObject:
def __init__(self, image: Image, rect: Rect, offset: Vector2):
self.rect = rect
width, height = image.size
# Dirty hack for portrait textures, they seem to work with
# POT width, otherwise texture looks bad
if width < height or width % 2 != 0:
new_height = height * (2048.0 / width)
image = image.resize((2048, int(new_height)))
width, height = image.size
self.img_data = image.tobytes("raw", "RGB", 0, -1)
texture = glGenTextures(1)
glBindTexture(GL_TEXTURE_2D, texture)
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT)
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT)
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR)
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR)
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, width, height, 0, GL_RGB, GL_UNSIGNED_BYTE, self.img_data)
self.texture = texture
glEnable(GL_TEXTURE_2D)
self.texture = texture
# Initialize UV
self.uv_min = Vector2(0, 0)
self.uv_max = Vector2(1, 1)
self.uv_offset = offset
width_ratio = width / rect.w
height_ratio = height / rect.h
if width_ratio > height_ratio:
self._lock_height_uv = True
self.uv_max.x = height_ratio / width_ratio
else:
self._lock_height_uv = False
self.uv_max.y = width_ratio / height_ratio
def add_uv_offset(self, delta: Vector2):
if self._lock_height_uv:
self.uv_offset.x -= delta.x
else:
self.uv_offset.y += delta.y
resulting_uv_min = self.uv_min + self.uv_offset
resulting_uv_max = self.uv_max + self.uv_offset
if resulting_uv_max.x > 1.0:
diff = resulting_uv_max.x - 1.0
self.uv_offset.x -= diff
elif resulting_uv_min.x < 0.0:
diff = resulting_uv_min.x
self.uv_offset.x -= diff
if resulting_uv_max.y > 1.0:
diff = resulting_uv_max.y - 1.0
self.uv_offset.y -= diff
elif resulting_uv_min.y < 0.0:
diff = resulting_uv_min.y
self.uv_offset.y -= diff
def draw(self):
glPolygonMode(GL_FRONT_AND_BACK, GL_FILL)
glEnable(GL_TEXTURE_2D)
glBindTexture(GL_TEXTURE_2D, self.texture)
corners = self.rect.get_corners()
uv_min = Vector2(self.uv_min.x, self.uv_min.y)
uv_max = Vector2(self.uv_max.x, self.uv_max.y)
uv_min += self.uv_offset
uv_max += self.uv_offset
glBegin(GL_QUADS)
glTexCoord2f(uv_min.x, uv_max.y)
glVertex3f(corners[0].x, corners[0].y, 0.0)
glTexCoord2f(uv_min.x, uv_min.y)
glVertex3f(corners[1].x, corners[1].y, 0.0)
glTexCoord2f(uv_max.x, uv_min.y)
glVertex3f(corners[2].x, corners[2].y, 0.0)
glTexCoord2f(uv_max.x, uv_max.y)
glVertex3f(corners[3].x, corners[3].y, 0.0)
glEnd()
glDisable(GL_TEXTURE_2D)
def dispose(self):
glDeleteTextures(1, self.texture)
| jlol/photo-album | src/opengl/image_scene_object.py | image_scene_object.py | py | 3,313 | python | en | code | 0 | github-code | 90 |
18527318339 | #import sys
#import numpy as np
import math
#from fractions import Fraction
import itertools
from collections import deque
from collections import Counter
import heapq
from fractions import gcd
#input=sys.stdin.readline
#import bisect
n,m=map(int,input().split())
d=[list(map(int,input().split())) for _ in range(m)]
c_0={}
c_1={}
c_2={}
for i in range(n):
c=list(map(int,input().split()))
for j in range(n):
if (i+j+2)%3==0:
if c[j] not in c_0:
c_0[c[j]]=1
else:
c_0[c[j]]+=1
elif (i+j+2)%3==1:
if c[j] not in c_1:
c_1[c[j]]=1
else:
c_1[c[j]]+=1
else:
if c[j] not in c_2:
c_2[c[j]]=1
else:
c_2[c[j]]+=1
color=[i for i in (range(1,m+1))]
ans=10**9
for l in itertools.permutations(color,3):
x,y,z=l
res=0
for cr in c_0:
res+=d[cr-1][x-1]*c_0[cr]
for cr in c_1:
res+=d[cr-1][y-1]*c_1[cr]
for cr in c_2:
res+=d[cr-1][z-1]*c_2[cr]
ans=min(ans,res)
print(ans) | Aasthaengg/IBMdataset | Python_codes/p03330/s832138538.py | s832138538.py | py | 1,105 | python | en | code | 0 | github-code | 90 |
31174003482 | from rest_framework.decorators import api_view
from rest_framework.response import Response
from rest_framework import status
import pandas as pd
def transform_csv(file, email_column_name):
pass
def transform_excel(file, email_column_name):
excel = pd.ExcelFile(file.open('r'))
print(excel.sheet_names)
#print(file)
@api_view(['POST'])
def service(request):
try:
email_column_name = request.data.get('email_column_name')
file = request.FILES.get['file']
if (file.name.split('.')[1]).lower() == 'csv':
response = delimiter = request.data.get('delimiter')
transform_csv(file, email_column_name)
else:
response = transform_excel(file, email_column_name)
return Response({'h': 'h'}, status.HTTP_200_OK)
except Exception as e:
return Response({'Error': e}, status.HTTP_400_BAD_REQUEST)
| DavidRoldan523/inbound_marketing_clean | API/inboud_transform/views/service_view.py | service_view.py | py | 901 | python | en | code | 0 | github-code | 90 |
73340136937 | import numpy as np
from bds_sampler import make_degree_sequence, sample
seq = np.array(
[
[1, 2],
[3, 2],
[4, 6],
[3, 3],
[5, 3],
[4, 4],
]
)
print(sample(in_seq=seq[:, 0], out_seq=seq[:, 1], N_samples=1))
# or generate a random degree sequence with pareto distributed in and out degrees
seq = make_degree_sequence(N_nodes=15)
print(sample(in_seq=seq[:, 0], out_seq=seq[:, 1], N_samples=1))
| ianhi/BDS-sampler | examples/basic.py | basic.py | py | 452 | python | en | code | 1 | github-code | 90 |
23800759218 | import sys
INF = int(1e9)
input = sys.stdin.readline
N = int(input())
maxdp = list(map(int, input().split()))
mindp = maxdp.copy()
maxtemp = [0, 0, 0]
mintemp = [INF, INF, INF]
for i in range(N-1):
num = str(input())
for ind, val in enumerate(num):
if ind % 2 != 0:
continue
ind = ind // 2
for j in range(3):
if abs(j-ind) <= 1 and maxtemp[ind] < maxdp[j]+int(val):
maxtemp[ind] = maxdp[j]+int(val)
if abs(j-ind) <= 1 and mintemp[ind] > mindp[j]+int(val):
mintemp[ind] = mindp[j]+int(val)
for j in range(3):
maxdp[j] = maxtemp[j]
mindp[j] = mintemp[j]
maxtemp[j] = 0
mintemp[j] = INF
print(max(maxdp), min(mindp)) | 723poil/boj | 백준/Gold/2096. 내려가기/내려가기.py | 내려가기.py | py | 783 | python | en | code | 0 | github-code | 90 |
35413139669 | import re, os
from tqdm import tqdm #进度条库
import threading #线程
#调用自己的模块,先获取执行的目录,再import文件
import sys
sys.path.append(".") #执行目录地址
from utils.subConvert.sub_convert import sub_convert
from utils.subConvert import list_to_content
#源文件
source_sublist_path = './utils/collectTGsub/TGsources.yaml'
#爬取的TG分享的节点
crawlTGnodeAll = 'https://raw.githubusercontent.com/rxsweet/useProxies/main/sub/sources/crawlTGnode'
#输出订阅文件位置
outputAllyaml_path = './sub/sources/TGnodeAll.yaml'
#outputUrlSub_path = './sub/sources/TGsubUrl'
#outputBase64Sub_path = './sub/sources/TGsubUrl64'
def get_sublist():
new_url_list = []
clashlist = list_to_content.get_yaml_list(source_sublist_path,'clash订阅')
new_url_list.extend(clashlist) #列表追加用extend
v2list = list_to_content.get_yaml_list(source_sublist_path,'v2订阅')
new_url_list.extend(v2list)
return new_url_list
def urllist_to_sub(new_url_list): #将url订阅列表内容转换成url,base64,clash文件保存
allProxy = list_to_content.list_to_content(new_url_list)
# 将列表内容,以行写入字符串?
allProxy = '\n'.join(allProxy)
#先格式化allProxy列表为YAML
allyaml = sub_convert.format(allProxy)
#去重
if isinstance(allyaml, dict): #如果返回解析错误,不执行makeup
allyaml = sub_convert.makeup(allyaml, dup_rm_enabled=True, format_name_enabled=False)
# 写入YAML 文件
print('write YAML file content!')
list_to_content.write_file(outputAllyaml_path,allyaml)
"""
#获取allyaml_path文件路径
good_file_path = os.path.abspath(outputAllyaml_path)
# 写入url 订阅文件
print('write URL file content!')
subContent = sub_convert.convert_remote(good_file_path,'url')
list_to_content.write_file(outputUrlSub_path,subContent)
# 写入base64 订阅文件
print('write Base64 file content!')
subContent = sub_convert.base64_encode(subContent)
list_to_content.write_file(outputBase64Sub_path,subContent)
"""
if __name__ == '__main__':
new_url_list = get_sublist()
new_url_list.append(crawlTGnodeAll)#爬取的TG分享的节点
#sub_convert.geoip_update()
urllist_to_sub(new_url_list)
| rxsweet/codes | TGlist2Node/TGlist2Node.py | TGlist2Node.py | py | 2,328 | python | en | code | 7 | github-code | 90 |
22548346323 | #while loops.
#Items - each item of a collection or a list
items = ["crayon", "scissors", "paper", "glitter glue", "markers", "pens"]
for item in items:
print(f"The item is: {item}")
#Numbers - list of numbers
numbers = [0, 1, 2, 3, 4, 5, 6, 7, 8]
numbers = range(10)
for number in range(10):
print(number)
#range - using range.
for i in range(100, 200):
print(i)
#range 2
for i in range(100, 200, 10):
print(i)
#range 3 - por 4 vezes ela vai printar (--10)
for i in range(5):
print(i)
for j in range(10, 13):
print(f"--{j}")
#letters - each letter of a string// vai printar cada letra do nome uma embaixo da outra
first_name = "Brigham"
for letter in first_name:
print(f"The letter is: {letter}")
#letters 2 - vai printar a posicao de cada letra uma embaixo da outra
word = "book"
number_of_letters = 4
for index in range(number_of_letters):
letter = word[index]
print(f"Index: {index} Letter: {letter}")
#dog - vai perguntar no nome do cachorro e vai contar quantos letras ele tem
dog_name = input("What is your dog's name? ")
letter_count = len(dog_name)
print(f"Your dog's name has {letter_count} letters")
#book
word = "book"
number_of_letters = len(word) # Notice this can now work for any string
for index in range(number_of_letters):
letter = word[index]
print(f"Index: {index} Letter: {letter}")
# Notice by using enumerate, we specify both i and letter // vai prinatr cada letra e a posicao dela
first_name = "Brigham"
for i, letter in enumerate(first_name):
print(f"The letter {letter} is at position {i}") | karolcastro/Pathway | 08/preparation_material.py | preparation_material.py | py | 1,624 | python | en | code | 0 | github-code | 90 |
35716445081 | import os
from secfsdstools.c_index.companyindexreading import CompanyIndexReader
from secfsdstools.c_index.indexdataaccess import IndexReport
CURRENT_DIR, _ = os.path.split(__file__)
PATH_TO_PARQUET = f'{CURRENT_DIR}/../_testdata/parquet/'
def test_get_latest_company_information_parquet(basicconf):
reader = CompanyIndexReader.get_company_index_reader(cik=320193, configuration=basicconf)
reader.dbaccessor.find_latest_company_report = lambda x: IndexReport(
adsh='0001193125-10-012085',
fullPath=f'{basicconf.parquet_dir}/quarter/2010q1.zip',
cik=320193,
name='',
form='',
filed=0,
period=0,
originFile='',
originFileType='',
url='',
)
result = reader.get_latest_company_filing()
assert result is not None
assert result['adsh'] == '0001193125-10-012085'
assert result['cik'] == 320193
assert result['name'] == 'APPLE INC'
assert result['form'] == '10-Q'
assert result['period'] == 20091231
| HansjoergW/sec-fincancial-statement-data-set | tests/c_index/test_companyindexreading.py | test_companyindexreading.py | py | 1,019 | python | en | code | 12 | github-code | 90 |
19017640775 | # Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class Solution:
def flatten (self, root: Optional[TreeNode]) -> None:
ptr = root
while (ptr != None):
if (ptr.left == None): ptr = ptr.right
else:
temp = ptr.left
while (temp.right != None and temp.right != ptr): temp = temp.right
if (temp.right == None): temp.right, ptr = ptr, ptr.left
if (temp.right == ptr):
act_right = ptr.right
temp.right, ptr.right, ptr.left = ptr.right, ptr.left, None
ptr = act_right
| Tejas07PSK/lb_dsa_cracker | Binary Search Trees/Flatten BST to sorted list/solution2.py | solution2.py | py | 771 | python | en | code | 2 | github-code | 90 |
18406241439 | from collections import deque
n=int(input())
ch=0
g=[[] for _ in range(n)]
k=[[] for _ in range(n)]
for i in range(n-1):
u,v,w=map(int,input().split())
if w%2==0:
g[u-1].append(v-1)
g[v-1].append(u-1)
else:
k[u-1].append(v-1)
k[v-1].append(u-1)
ch1=u-1
ch2=v-1
ch=1
## 0:White 1:Black
ans=[-1 for _ in range(n)]
if ch>0:
ans[ch1]=0
d=[ch1]
qu=deque(d)
while qu:
p=qu.popleft()
for gr in g[p]:
if ans[gr]==-1:
if ans[p]==0:
ans[gr]=0
qu.append(gr)
else:
ans[gr]=1
qu.append(gr)
for kr in k[p]:
if ans[kr]==-1:
if ans[p]==0:
ans[kr]=1
qu.append(kr)
else:
ans[kr]=0
qu.append(kr)
for j in range(n):
print(ans[j])
else:
for j in range(n):
print(0)
| Aasthaengg/IBMdataset | Python_codes/p03044/s816522629.py | s816522629.py | py | 884 | python | en | code | 0 | github-code | 90 |
72671747176 | import shutil
from pathlib import Path
import sh
import pytest
@pytest.fixture
def workdir(tmpdir, request):
dir_ = Path(request.fspath).parent
tdir = Path(tmpdir)
return shutil.copytree(dir_, tdir / "case")
@pytest.fixture
def run(workdir, request):
def wrapper():
test_name = Path(request.fspath).stem.replace("test_", "")
try:
path = Path(workdir) / f"{test_name}.sh"
if path.is_file():
res = sh.sh(str(path), _cwd=workdir)
else:
res = sh.sh(f"{workdir}/run.sh", _cwd=workdir)
except sh.ErrorReturnCode as erc:
print(erc)
res = erc
res.stdout_ = res.stdout.decode("utf-8").strip()
res.stderr_ = res.stderr.decode("utf-8").strip()
return res
return wrapper
@pytest.fixture
def result(run):
return run()
@pytest.fixture
def stdout(result):
return result.stdout_
@pytest.fixture
def stderr(result):
return result.stderr_
@pytest.fixture
def get_file(tmpdir):
def getter(path):
f = Path(tmpdir) / "case" / path
return f.read_text().strip()
return getter
| vantage-org/vantage | tests/conftest.py | conftest.py | py | 1,164 | python | en | code | 1 | github-code | 90 |
2033540297 | import re
class SearchParser(object):
@staticmethod
def get_dict(text):
d = {}
# Looks for one of the things to search for then colon then a non-greedy anything and then either comma or end of string
pattern = re.compile(r'(name|cmc|rarity|text):(.*?)(,|$)',re.IGNORECASE)
matches = re.findall(pattern, text)
for search_for, data, _ in matches:
# Remove leading and trailing white space that might have gotten picked up in the wildcard
data = data.strip()
# For some reason the API calls mythic cards "special" for rarity
if search_for == 'rarity' and data == 'mythic':
data = 'special'
d[search_for] = data
return d
if __name__ == "__main__":
text = input("Input some text to search: ")
print(SearchParser.get_dict(text))
| ToxicGLaDOS/magic-collection-tracker | searchparser.py | searchparser.py | py | 896 | python | en | code | 0 | github-code | 90 |
8909384587 | '''
File name : objTracking.py
Description : Main file for object tracking
Author : Rahmad Sadli
Date created : 20/02/2020
Python Version : 3.7
'''
import cv2
from Detector import detect
from KalmanFilter import KalmanFilter
import math
import gc
def dist(x, y, x1, y1):
return math.sqrt((x-x1)**2 + (y-y1)**2)
def getClosestKF(x, y, predicted_x, predicted_y, distance_threshold):
index = -1
minDist = 4444 # Length of diagonal of 4k image
for i in range(len(predicted_x)):
currDistance = dist(x, y, predicted_x[i], predicted_y[i])
if(currDistance < minDist):
minDist = currDistance
index = i
if(minDist <= distance_threshold):
return index
else:
return -1
def predictKFs(KFs):
predicted_x = []
predicted_y = []
for i in KFs:
# Predict
(x, y) = i.predict()
predicted_x.append(x)
predicted_y.append(y)
return predicted_x, predicted_y
def main():
# Create opencv video capture object
# VideoCap = cv2.VideoCapture('./video/randomball.avi')
VideoCap = cv2.VideoCapture('./video/multi.mp4')
# VideoCap = cv2.VideoCapture('./video/cars2.mp4')
frame_width = int(VideoCap.get(3))
frame_height = int(VideoCap.get(4))
fps = VideoCap.get(cv2.CAP_PROP_FPS)
outVid = cv2.VideoWriter('trackedObjects.avi', cv2.VideoWriter_fourcc('M', 'J', 'P', 'G'), fps,
(frame_width, frame_height))
largestDistance = math.sqrt(frame_width**2+frame_height**2)
# Variable used to control the speed of reading the video
ControlSpeedVar = 100 # Lowest: 1 - Highest:100
HiSpeed = 100
# Create KalmanFilter object KF
#KalmanFilter(dt, u_x, u_y, std_acc, x_std_meas, y_std_meas)
# debugMode = 1
debugMode = 0
KFs = []
KFs_used = []
j = 0
while(True):
j += 1
# Read frame
ret, frame = VideoCap.read()
if not ret and cv2.waitKey(2) & 0xFF == ord('q'):
outVid.release()
VideoCap.release()
cv2.destroyAllWindows()
break
# Detect object
try:
centers = detect(frame, debugMode)
except:
outVid.release()
VideoCap.release()
cv2.destroyAllWindows()
break
# If centroids are detected then track them
if (len(centers) > 0):
# getting rid of unused Kalman Filters
temp = KFs.copy()
KFs = []
for i in range(len(temp)):
if(KFs_used[i]):
KFs.append(temp[i])
gc.collect()
# predict all KFs
predicted_x, predicted_y = predictKFs(KFs)
KFs_used = [False]*len(KFs)
for i in range(len(centers)):
# Draw the detected circle
cv2.circle(frame, (int(centers[i][0]), int(
centers[i][1])), 10, (0, 191, 255), 2)
curr_KF = getClosestKF(
centers[i][0], centers[i][1], predicted_x, predicted_y, largestDistance/24)
if(curr_KF < 0):
KF = KalmanFilter(1.0/fps, 1, 1, 1, 0.1, 0.1)
x, y = KF.predict()
KFs.append(KF)
KFs_used.append(True)
else:
KF = KFs[curr_KF]
KFs_used[curr_KF] = True
x, y = predicted_x[curr_KF], predicted_y[curr_KF]
# Draw a rectangle as the predicted object position
x, y = int(x), int(y)
cv2.rectangle(frame, (x - 15, y - 15),
(x + 15, y + 15), (255, 0, ), 2)
# Update
(x1, y1) = KF.update(centers[i])
x1, y1 = int(x1), int(y1)
# Draw a rectangle as the estimated object position
cv2.rectangle(frame, (x1 - 15, y1 - 15),
(x1 + 15, y1 + 15), (0, 0, 255), 2)
cv2.putText(frame, "Estimated Position"+str(i),
(x1 + 15, y1 + 10), 0, 0.5, (0, 0, 255), 2)
cv2.putText(frame, "Predicted Position"+str(i),
(x + 15, y), 0, 0.5, (255, 0, 0), 2)
cv2.putText(frame, "Measured Position"+str(i),
(int(centers[i][0] + 15), int(centers[i][1] - 15)), 0, 0.5, (0, 191, 255), 2)
cv2.imshow('image', frame)
outVid.write(frame)
cv2.waitKey(HiSpeed-ControlSpeedVar+1)
if __name__ == "__main__":
# execute main
main()
| hatemgahmed/Circular-Objects-Motion-Prediction | objTracking.py | objTracking.py | py | 4,700 | python | en | code | 1 | github-code | 90 |
38330326304 | from django.conf.urls.defaults import *
# Uncomment the next two lines to enable the admin:
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
url(r'^$', 'views.index'),
url(r'^maquinas/$', 'ping.views.index'),
url(r'^prendidas/$', 'ping.views.prendidas'),
url(r'^todas/$', 'ping.views.todas'),
url(r'^accounts/login/$', 'django.contrib.auth.views.login'),
#url(r'^login/$', 'ping.login.index'),
#url(r'^ping/(?P<maquina_id>\d+)/$', 'ping.views.detail'),
#url(r'^ping/(?P<maquina_id>\d+)/results/$', 'ping.views.results'),
#url(r'^ping/(?P<maquina_id>\d+)/vote/$', 'ping.views.vote'),
url(r'^admin/', include(admin.site.urls)),
)
| narval/mactenimiento | urls.py | urls.py | py | 702 | python | en | code | 0 | github-code | 90 |
74132079337 | def task():
inputText = open("day13.txt", "r").readlines()
timestamp = inputText[0]
buses = inputText[1]
timestamp = timestamp.strip()
buses = buses.strip()
timestamp = int(timestamp)
IDs = []
buses = buses.split(",")
for bus in buses:
if bus != "x":
IDs.append(int(bus))
else:
IDs.append(None)
modulos = []
t = -1
for el in IDs:
t += 1
if el:
modulos.append((el, el - (t % el) if (t % el) != 0 else 0))
modulos.sort()
modulos.reverse()
answer = modulos[0][1]
n = modulos[0][0]
for el in modulos[1:]:
divisor = el[0]
remainder = el[1]
while answer % divisor != remainder:
answer += n
n *= divisor
return answer
print(task())
| klukas17/AoC-2020 | day13-part2.py | day13-part2.py | py | 858 | python | en | code | 0 | github-code | 90 |
30392604218 | import discord
from discord.ext import commands
import random
description = '''An example bot to showcase the discord.ext.commands extension
module.
There are a number of utility commands being showcased here.'''
intents = discord.Intents.default()
intents.members = True
intents.message_content = True
bot = commands.Bot(command_prefix='$', description=description, intents=intents)
@bot.event
async def on_ready():
print(f'Logged in as {bot.user} (ID: {bot.user.id})')
print('------')
@bot.command()
@commands.has_permissions(manage_messages=True)
async def clear(ctx, amount=20):
await ctx.channel.purge(limit=amount)
bot.run('token')
| VXT08/M1Y4- | clear.py | clear.py | py | 659 | python | en | code | 0 | github-code | 90 |
71605598058 |
from PySide2.QtCore import *
from PySide2.QtGui import *
from PySide2.QtWidgets import *
from customLoader import loadUi
import os
import time
import sys
import ctypes
import numpy
rng = numpy.random.default_rng()
here = os.path.dirname(__file__)
def random_color():
return rng.random((1, 3))[0]*245
def nudge_color(color):
nudged = rng.normal(color, 2)
total = nudged.sum()
if total > 600:
nudged = nudged - (total-600)
elif total < 150:
nudged = nudged + (150-total)
return numpy.clip(nudged, 0, 255)
class SceneUpdateWorker(QThread):
status = Signal(object, object)
finished = Signal(object)
def __init__(self, parent=None):
QThread.__init__(self, parent)
self.parent = parent
self.start_item = None
self.my_cells = set()
self.my_edges = []
def update_neighbors(self, source, depth=0):
if depth >= 300:
return source
depth += 1
time.sleep(.02)
self.my_cells.add(source)
neighbors = [n for n in source.neighbors if n.updated < 1]
if not neighbors:
neighbors = self.spread()
while neighbors:
time.sleep(.03)
depth += 1
source = rng.choice(neighbors)
if source not in self.my_cells:
source.set_color(source.bleed(force=True))
source.set_color(nudge_color(source.color))
self.status.emit(source, self)
self.my_cells.add(source)
if depth % 30 == 0:
self.spread()
neighbors = [n for n in source.neighbors if n.updated < 1]
if not neighbors:
neighbors = self.spread()
return source
def edges(self):
return [c for c in self.my_cells.copy() if c.is_edge()]
def spread(self):
neighbors = self.edges()
self.my_cells = set(neighbors)
rng.shuffle(neighbors)
expanded = []
for n in neighbors[:min(int(len(neighbors)/10), 100)]:
fringes = [f for f in n.neighbors if f.updated < 1]
if not fringes:
continue
bleeder = rng.choice(fringes)
stranger = [f for f in bleeder.neighbors if f.updated and f not in self.my_cells]
if stranger:
bleeder.set_color(bleeder.average_color(bleeder.get_surrounding_color()))
else:
bleeder.set_color(nudge_color(n.color))
self.my_cells.add(bleeder)
self.status.emit(bleeder, self)
expanded.append(bleeder)
#time.sleep(.03)
return expanded
def run(self):
neighbors = True
if self.start_item:
self.my_cells.add(self.start_item)
else:
row = rng.choice(list(range(len(self.parent.data))))
items = self.parent.data[row]
item = rng.choice(items)
if not item.color.sum():
item.set_color(random_color())
self.my_cells.add(item)
self.status.emit(item, self)
while neighbors:
neighbors = self.edges()
if neighbors:
last = self.update_neighbors(rng.choice(neighbors))
last.set_color(last.average_color(last.get_surrounding_color()))
self.status.emit(last, self)
self.finished.emit(self)
def stop(self):
self.terminate()
class Cell(QGraphicsRectItem):
def __init__(self, location, parent=None):
super(Cell, self).__init__(None)
self.parent = parent
self.location = location
self.setRect(QRect(0, 0, 2, 2))
self.setX(self.location[0] * 2 + 2)
self.setY(self.location[1] * 2 + 2)
self.chance = 3
self.color = numpy.array([0.0, 0.0, 0.0])
self.neighbors = []
self.updated = 0
self.edge = True
self.setPen(Qt.NoPen)
self.setBrush(QColor(*self.color.tolist()))
def discover_neighbors(self):
maxlen = len(self.parent.data)-1
self.neighbors = []
if self.location[0] > 0:
west = self.parent.data[self.location[1]][self.location[0]-1]
self.neighbors.append(west)
if self.location[1] > 0:
north_west = self.parent.data[self.location[1]-1][self.location[0]-1]
self.neighbors.append(north_west)
if self.location[1] < maxlen:
south_west = self.parent.data[self.location[1]+1][self.location[0]-1]
self.neighbors.append(south_west)
if self.location[0] < maxlen:
east = self.parent.data[self.location[1]][self.location[0]+1]
self.neighbors.append(east)
if self.location[1] > 0:
north_east = self.parent.data[self.location[1]-1][self.location[0]+1]
self.neighbors.append(north_east)
if self.location[1] < maxlen:
south_east = self.parent.data[self.location[1]+1][self.location[0]+1]
self.neighbors.append(south_east)
if self.location[1] > 0:
north = self.parent.data[self.location[1]-1][self.location[0]]
self.neighbors.append(north)
if self.location[1] < maxlen:
south = self.parent.data[self.location[1]+1][self.location[0]]
self.neighbors.append(south)
def get_surrounding_color(self):
return [n.color for n in self.neighbors if n.color.sum()]
def average_color(self, colors):
if not len(colors):
return numpy.array([0.0, 0.0, 0.0])
return numpy.add.reduce(colors)/len(colors)
def is_edge(self):
if self.edge:
if len(self.get_surrounding_color()) == len(self.neighbors):
self.edge = False
return self.edge
def bleed(self, force=False):
if not self.color.sum() or force:
surrounding = self.get_surrounding_color()
self.color = self.average_color(surrounding)
return self.color
def boundingRect(self):
return QRect(0, 0, 2, 2)
def set_color(self, color):
self.setBrush(QColor(*color.tolist()))
self.color = color
class MyMainWindow(QMainWindow):
def __init__(self, parent=None):
QMainWindow.__init__(self, parent)
loadUi(r"%s\active_scene.ui" % here, self)
self.setWindowIcon(QIcon(r"%s\app_icon.png" % here))
ctypes.windll.shell32.SetCurrentProcessExplicitAppUserModelID('active_scene')
self.active_scene = QGraphicsScene(self)
self.graphix_view.setScene(self.active_scene)
self.scene_updaters = []
self.known = set()
self.items_to_update = set()
self.data = []
self.timer = QTimer(self)
def closeEvent(self, event):
for updater in self.scene_updaters:
if updater.isRunning():
updater.stop()
def wheelEvent(self, event):
# zoomIn factor
factor = 1.15
# zoomOut factor
if event.angleDelta().y() < 0:
factor = 1.0 / factor
self.graphix_view.scale(factor, factor)
#self.graphix_view.update()
event.accept()
def generate(self):
self.data = []
for r in list(range(500)):
row = []
for c in list(range(500)):
cell = Cell([c, r], parent=self)
row.append(cell)
self.items_to_update.add(cell)
self.data.append(row)
for row in list(range(len(self.data))):
items = self.data[row]
for c in list(range(len(items))):
item = items[c]
item.discover_neighbors()
self.active_scene.addItem(item)
self.active_scene.setSceneRect(0, 0, (10 + 2*len(self.data)), (10 + 2*len(self.data)))
self.graphix_view.setSceneRect(self.active_scene.sceneRect())
self.graphix_view.fitInView(self.active_scene.sceneRect(), Qt.KeepAspectRatio)
#QCoreApplication.processEvents()
for i in list(range(3)):
scene_updater = SceneUpdateWorker(parent=self)
scene_updater.status.connect(self.update_activity)
scene_updater.finished.connect(self.finish_activity)
scene_updater.start()
self.scene_updaters.append(scene_updater)
time.sleep(.1)
print('active threads: %d' % len(self.scene_updaters))
#self.timer.timeout.connect(self.do_updates)
#self.timer.start(750)
def do_updates(self):
self.active_scene.update()
#self.timer.setInterval(350*len(self.scene_updaters))
def update_activity(self, item, thread):
if item in self.known:
return
self.known.add(item)
item.update()
if item in self.items_to_update:
self.items_to_update.remove(item)
item.updated = item.updated + 1
if len(self.scene_updaters) < 5:
chance = rng.integers(100000, size=1)[0]
if chance < 2:
possible = thread.edges()
if chance == 1:
possible = list(self.items_to_update)
if len(possible):
new_thread = SceneUpdateWorker(parent=self)
new_thread.my_cells = thread.my_cells
new_thread.start_item = rng.choice(possible)
new_thread.status.connect(self.update_activity)
new_thread.finished.connect(self.finish_activity)
new_thread.start()
self.scene_updaters.append(new_thread)
print('active threads: %d' % len(self.scene_updaters))
def finish_activity(self, thread):
if thread in self.scene_updaters:
self.scene_updaters.remove(thread)
if thread.isRunning():
thread.stop()
thread.quit()
print('active threads: %d' % len(self.scene_updaters))
if len(self.scene_updaters) < 1:
for row in list(range(len(self.data))):
items = self.data[row]
for c in list(range(len(items))):
items[c].bleed()
items[c].update()
def launch_it():
app = QApplication([])
window = MyMainWindow()
window.show()
window.generate()
sys.exit(app.exec_())
if __name__ == "__main__":
launch_it()
| Axident/active_scene | active_scene.py | active_scene.py | py | 10,494 | python | en | code | 0 | github-code | 90 |
22428291776 | """converts binary to decimal and vice versa"""
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description='Convert numbers from binary to decimal and vice versa')
parser.add_argument('number')
parser.add_argument('-bin', '--bin', action='store_true', help='change from binary to decimal (default: from decimal to binary')
args = parser.parse_args()
if args.bin:
print(int(args.number, 2))
else:
print("{0:b}".format(int(args.number)))
| wondermike221/megaProjectIdeas | numbers/b2d.py | b2d.py | py | 510 | python | en | code | 0 | github-code | 90 |
24471101988 | import transformers
import json
model = 'Helsinki-NLP/opus-mt-en-de'
# Read files
with open('transcript.srt', 'r', encoding='utf-8') as f:
transcript_srt = f.read()
with open('transcript.json', 'r') as f:
transcript = json.load(f)
# Prepare marked english text
english_text = transcript_srt.split('\n')
timestamps = english_text[1::4]
text_chunks = english_text[2::4]
english_text_marked = ''
for i in range(len(text_chunks)):
english_text_marked += f' (T1M3STMP_{i + 1}) '
english_text_marked += text_chunks[i]
english_text_marked = english_text_marked.split('. ')
for i in range(len(english_text_marked) - 1):
english_text_marked[i] += '.'
english_text_marked[0] = english_text_marked[0][1:]
# Translate text
translator = transformers.pipeline('translation', model='Helsinki-NLP/opus-mt-en-de', device='cuda')
translation = translator(english_text_marked)
# Post process translated text
german_text_marked = ''
for i in range(len(translation)):
german_text_marked += ' ' + translation[i]['translation_text']
german_text_marked = german_text_marked[1:]
german_text = german_text_marked.split('(T1M3STMP_')
german_text.pop(0)
if len(german_text) == len(timestamps):
print(f'OK: Number of text chunks ({len(german_text)}) == number of timestamps ({len(timestamps)})\n'
f'Manual postprocessing of translated transcript is not required.')
else:
print(f'WARNING: Number of text chunks ({len(german_text)}) != number of timestamps ({len(timestamps)})\n'
f'Manual postprocessing of translated transcript is required.')
# Add SRT timestamps to transcript.json
transcript['timestamps_srt'] = timestamps
with open('transcript.json', 'w') as f:
json.dump(transcript, f)
# Export TXT files
with open('translation_raw.txt', 'w', encoding='utf-8') as f:
f.write('\n'.join(german_text))
with open('transcript.txt', 'w', encoding='utf-8') as f:
f.write('\n'.join(text_chunks))
| lazlo-bleker/subtitle-smith | 3_translate_srt.py | 3_translate_srt.py | py | 1,933 | python | en | code | 0 | github-code | 90 |
18330972549 | N = int(input())
S = input()
S = list(S)
d = []
for n in range(1, N):
if S[n-1] == S[n]:
d.append(n)
cnt = 0
for i in d:
i -= cnt
del S[i]
cnt += 1
print(len(S)) | Aasthaengg/IBMdataset | Python_codes/p02887/s526794800.py | s526794800.py | py | 192 | python | en | code | 0 | github-code | 90 |
13735976420 | import cv2
img = cv2.imread('20220811/white.jpg', cv2.IMREAD_COLOR)
print(img.shape)
for i in range(0, 280, 10):
img[:, :i]=[255, 255, 0]
cv2.imshow('white', img)
a = cv2.waitKey(100)
print(a)
cv2.imwrite('white_copy.jpg', img)
img = cv2.imread('20220811/white.jpg', cv2.IMREAD_COLOR)
img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
print(img_gray.shape)
cv2.imshow('white', img_gray)
cv2.waitKey(0)
cv2.imwrite('white_gray.jpg', img_gray) | bbangdoyoon/dip | bigdata_image_work/20220811/ex01.py | ex01.py | py | 459 | python | en | code | 0 | github-code | 90 |
42487721231 | #!/usr/bin/env python3
import sys
from enum import Enum
import numpy as np
import attr
import tqdm
def read(path):
with open(path, 'r') as f:
return int(f.read().strip())
@attr.s
class Spiral():
arr = attr.ib(default=np.array(1, ndmin=2))
Dir = Enum('Dir', 'R U L D', start=0)
def _neighbourhood(self, index):
l = [list(index) for _ in range(8)]
# E
l[0][1] = l[0][1] + 1
# N
l[1][0] = l[1][0] - 1
# W
l[2][1] = l[2][1] - 1
# S
l[3][0] = l[3][0] + 1
# NE
l[4][0] = l[4][0] - 1
l[4][1] = l[4][1] + 1
# NW
l[5][0] = l[5][0] - 1
l[5][1] = l[5][1] - 1
# SW
l[6][0] = l[6][0] + 1
l[6][1] = l[6][1] - 1
# SE
l[7][0] = l[7][0] + 1
l[7][1] = l[7][1] + 1
acc = 0
for i in l:
try:
acc += self.arr[tuple(i)]
except IndexError:
pass
return acc
@classmethod
def _directions(cls):
index = 0
d = cls.Dir.R
while True:
index += 1
steps = int(np.ceil(index / 2))
for _ in range(steps):
yield d
d = cls.Dir((d.value + 1) % 4)
@classmethod
def _movecursor(cls, cursor, direction):
new_cursor = list(cursor)
if direction == cls.Dir.R:
new_cursor[1] += 1
elif direction == cls.Dir.U:
new_cursor[0] -= 1
elif direction == cls.Dir.L:
new_cursor[1] -= 1
elif direction == cls.Dir.D:
new_cursor[0] += 1
return tuple(new_cursor)
@classmethod
def until(cls, n, neighbours=False):
size = int(np.ceil(np.sqrt(n)))
blank = np.zeros((size, size), dtype=int)
spiral = cls(blank)
cursor = (0, 0)
spiral.arr[cursor] = 1
directions = spiral._directions()
for i, d in enumerate(directions):
cursor = spiral._movecursor(cursor, d)
if not neighbours:
spiral.arr[cursor] = i + 2
else:
spiral.arr[cursor] = spiral._neighbourhood(cursor)
if spiral.arr[cursor] >= n:
break
return spiral
def max(self):
return np.max(self.arr)
def _remap(self, index):
coord = list(index)
for d, c in enumerate(index):
if c > self.arr.shape[d] // 2:
coord[d] = c - self.arr.shape[d]
return tuple(coord)
def argmax(self):
index = np.where(self.arr == self.max())
index = (index[0][0], index[1][0])
index = self._remap(index)
return index
def dist(self, start=None, end=None):
if start is None:
start_idx = self.argmax()
if end is None:
end_idx = (0, 0)
return abs((end_idx[0] - start_idx[0]) + (end_idx[1] - start_idx[1]))
def main(filename):
index = read(filename)
spiral = Spiral.until(index)
print(spiral.dist())
spiral2 = spiral.until(index, neighbours=True)
print(spiral2.max())
if __name__ == "__main__":
main(sys.argv[1])
| treuherz/AdventOfCode | 17/3/solution.py | solution.py | py | 3,220 | python | en | code | 0 | github-code | 90 |
8941998147 |
from MoveZeroes import Solution
import pytest
from copy import deepcopy
SOL = Solution()
TEST_SUITS = [
([0,1,0,3,12], [1,3,12,0,0]),
([2,1], [2,1]),
([4,2,4,0,0,3,0,5,1,0], [4,2,4,3,5,1,0,0,0,0]),
]
@pytest.mark.parametrize(
"nums, ans",
deepcopy(TEST_SUITS)
)
def test(nums, ans):
SOL.moveZeroes(nums)
assert nums == ans
| hongtw/coding-life | leetcode/0283.Move-Zeroes/MoveZeroes_test.py | MoveZeroes_test.py | py | 361 | python | en | code | 1 | github-code | 90 |
18313732339 | # -*- coding: utf-8 -*-
import sys
import math
import os
import itertools
import string
import heapq
import _collections
from collections import Counter
from collections import defaultdict
from collections import deque
from functools import lru_cache
import bisect
import re
import queue
import decimal
class Scanner():
@staticmethod
def int():
return int(sys.stdin.readline().rstrip())
@staticmethod
def string():
return sys.stdin.readline().rstrip()
@staticmethod
def map_int():
return [int(x) for x in Scanner.string().split()]
@staticmethod
def string_list(n):
return [Scanner.string() for i in range(n)]
@staticmethod
def int_list_list(n):
return [Scanner.map_int() for i in range(n)]
@staticmethod
def int_cols_list(n):
return [Scanner.int() for i in range(n)]
class Math():
@staticmethod
def gcd(a, b):
if b == 0:
return a
return Math.gcd(b, a % b)
@staticmethod
def lcm(a, b):
return (a * b) // Math.gcd(a, b)
@staticmethod
def divisor(n):
res = []
i = 1
for i in range(1, int(n ** 0.5) + 1):
if n % i == 0:
res.append(i)
if i != n // i:
res.append(n // i)
return res
@staticmethod
def round_up(a, b):
return -(-a // b)
@staticmethod
def is_prime(n):
if n < 2:
return False
if n == 2:
return True
if n % 2 == 0:
return False
d = int(n ** 0.5) + 1
for i in range(3, d + 1, 2):
if n % i == 0:
return False
return True
@staticmethod
def fact(N):
res = {}
tmp = N
for i in range(2, int(N ** 0.5 + 1) + 1):
cnt = 0
while tmp % i == 0:
cnt += 1
tmp //= i
if cnt > 0:
res[i] = cnt
if tmp != 1:
res[tmp] = 1
if res == {}:
res[N] = 1
return res
def pop_count(x):
x = x - ((x >> 1) & 0x5555555555555555)
x = (x & 0x3333333333333333) + ((x >> 2) & 0x3333333333333333)
x = (x + (x >> 4)) & 0x0f0f0f0f0f0f0f0f
x = x + (x >> 8)
x = x + (x >> 16)
x = x + (x >> 32)
return x & 0x0000007f
MOD = int(1e09) + 7
INF = int(1e15)
class Edge:
def __init__(self, to_, id_):
self.to = to_
self.id = id_
ans = None
G = None
K = None
def dfs(to, c):
global G
global ans
nc = c % K + 1
for g in G[to]:
if ans[g.id] != -1:
continue
ans[g.id] = nc
dfs(g.to, nc)
nc = nc % K + 1
def solve():
global G
global ans
global K
N = Scanner.int()
G = [[] for _ in range(N)]
for i in range(N - 1):
x, y = Scanner.map_int()
x -= 1
y -= 1
G[x].append(Edge(y, i))
G[y].append(Edge(x, i))
K = 0
for i in range(N):
K = max(K, len(G[i]))
ans = [-1 for _ in range(N - 1)]
dfs(0, 0)
print(K)
print(*ans, sep='\n')
def main():
sys.setrecursionlimit(1000000)
# sys.stdin = open("sample.txt")
# T = Scanner.int()
# for _ in range(T):
# solve()
# print('YNeos'[not solve()::2])
solve()
if __name__ == "__main__":
main()
| Aasthaengg/IBMdataset | Python_codes/p02850/s170382565.py | s170382565.py | py | 3,400 | python | en | code | 0 | github-code | 90 |
27921053891 | # -*- coding: utf-8 -*-
#
# PySceneDetect: Python-Based Video Scene Detector
# ---------------------------------------------------------------
# [ Site: http://www.bcastell.com/projects/PySceneDetect/ ]
# [ Github: https://github.com/Breakthrough/PySceneDetect/ ]
# [ Documentation: http://pyscenedetect.readthedocs.org/ ]
#
# Copyright (C) 2014-2020 Brandon Castellano <http://www.bcastell.com>.
#
# PySceneDetect is licensed under the BSD 3-Clause License; see the included
# LICENSE file, or visit one of the following pages for details:
# - https://github.com/Breakthrough/PySceneDetect/
# - http://www.bcastell.com/projects/PySceneDetect/
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
""" ``scenedetect.frame_timecode`` Module
This module contains the :py:class:`FrameTimecode` object, which is used as a way for
PySceneDetect to store frame-accurate timestamps of each cut. This is done by also
specifying the video framerate with the timecode, allowing a frame number to be
converted to/from a floating-point number of seconds, or string in the form
`"HH:MM:SS[.nnn]"` where the `[.nnn]` part is optional.
See the following examples, or the :py:class:`FrameTimecode constructor <FrameTimecode>`.
Unit tests for the FrameTimecode object can be found in `tests/test_timecode.py`.
"""
# Standard Library Imports
import math
# PySceneDetect Library Imports
from scenedetect.platform import STRING_TYPE
MINIMUM_FRAMES_PER_SECOND_FLOAT = 1.0 / 1000.0
MINIMUM_FRAMES_PER_SECOND_DELTA_FLOAT = 1.0 / 100000
class FrameTimecode(object):
""" Object for frame-based timecodes, using the video framerate
to compute back and forth between frame number and second/timecode formats.
The timecode argument is valid only if it complies with one of the following
three types/formats:
1) string: standard timecode HH:MM:SS[.nnn]:
`str` in form 'HH:MM:SS' or 'HH:MM:SS.nnn', or
`list`/`tuple` in form [HH, MM, SS] or [HH, MM, SS.nnn]
2) float: number of seconds S[.SSS], where S >= 0.0:
`float` in form S.SSS, or
`str` in form 'Ss' or 'S.SSSs' (e.g. '5s', '1.234s')
3) int: Exact number of frames N, where N >= 0:
`int` in form `N`, or
`str` in form 'N'
Arguments:
timecode (str, float, int, or FrameTimecode): A timecode or frame
number, given in any of the above valid formats/types. This
argument is always required.
fps (float, or FrameTimecode, conditionally required): The framerate
to base all frame to time arithmetic on (if FrameTimecode, copied
from the passed framerate), to allow frame-accurate arithmetic. The
framerate must be the same when combining FrameTimecode objects
in operations. This argument is always required, unless **timecode**
is a FrameTimecode.
Raises:
TypeError: Thrown if timecode is wrong type/format, or if fps is None
or a type other than int or float.
ValueError: Thrown when specifying a negative timecode or framerate.
"""
def __init__(self, timecode=None, fps=None):
# type: (Union[int, float, str, FrameTimecode], float,
# Union[int, float, str, FrameTimecode])
# The following two properties are what is used to keep track of time
# in a frame-specific manner. Note that once the framerate is set,
# the value should never be modified (only read if required).
self.framerate = None
self.frame_num = None
# Copy constructor. Only the timecode argument is used in this case.
if isinstance(timecode, FrameTimecode):
self.framerate = timecode.framerate
self.frame_num = timecode.frame_num
if fps is not None:
raise TypeError('Framerate cannot be overwritten when copying a FrameTimecode.')
else:
# Ensure other arguments are consistent with API.
if fps is None:
raise TypeError('Framerate (fps) is a required argument.')
if isinstance(fps, FrameTimecode):
fps = fps.framerate
# Process the given framerate, if it was not already set.
if not isinstance(fps, (int, float)):
raise TypeError('Framerate must be of type int/float.')
elif (isinstance(fps, int) and not fps > 0) or (
isinstance(fps, float) and not fps >= MINIMUM_FRAMES_PER_SECOND_FLOAT):
raise ValueError('Framerate must be positive and greater than zero.')
self.framerate = float(fps)
# Process the timecode value, storing it as an exact number of frames.
if isinstance(timecode, (str, STRING_TYPE)):
self.frame_num = self._parse_timecode_string(timecode)
else:
self.frame_num = self._parse_timecode_number(timecode)
# Alternative formats under consideration (require unit tests before adding):
# Standard timecode in list format [HH, MM, SS.nnn]
#elif isinstance(timecode, (list, tuple)) and len(timecode) == 3:
# if any(not isinstance(x, (int, float)) for x in timecode):
# raise ValueError('Timecode components must be of type int/float.')
# hrs, mins, secs = timecode
# if not (hrs >= 0 and mins >= 0 and secs >= 0 and mins < 60
# and secs < 60):
# raise ValueError('Timecode components must be positive.')
# secs += (((hrs * 60.0) + mins) * 60.0)
# self.frame_num = int(secs * self.framerate)
def get_frames(self):
# type: () -> int
""" Get the current time/position in number of frames. This is the
equivalent of accessing the self.frame_num property (which, along
with the specified framerate, forms the base for all of the other
time measurement calculations, e.g. the :py:meth:`get_seconds` method).
If using to compare a :py:class:`FrameTimecode` with a frame number,
you can do so directly against the object (e.g. ``FrameTimecode(10, 10.0) <= 10``).
Returns:
int: The current time in frames (the current frame number).
"""
return int(self.frame_num)
def get_framerate(self):
# type: () -> float
""" Get Framerate: Returns the framerate used by the FrameTimecode object.
Returns:
float: Framerate of the current FrameTimecode object, in frames per second.
"""
return self.framerate
def equal_framerate(self, fps):
# type: (float) -> bool
""" Equal Framerate: Determines if the passed framerate is equal to that of the
FrameTimecode object.
Arguments:
fps: Framerate (float) to compare against within the precision constant
MINIMUM_FRAMES_PER_SECOND_DELTA_FLOAT defined in this module.
Returns:
bool: True if passed fps matches the FrameTimecode object's framerate, False otherwise.
"""
return math.fabs(self.framerate - fps) < MINIMUM_FRAMES_PER_SECOND_DELTA_FLOAT
def get_seconds(self):
# type: () -> float
""" Get the frame's position in number of seconds.
If using to compare a :py:class:`FrameTimecode` with a frame number,
you can do so directly against the object (e.g. ``FrameTimecode(10, 10.0) <= 1.0``).
Returns:
float: The current time/position in seconds.
"""
return float(self.frame_num) / self.framerate
def get_timecode(self, precision=3, use_rounding=True):
# type: (int, bool) -> str
""" Get a formatted timecode string of the form HH:MM:SS[.nnn].
Args:
precision: The number of decimal places to include in the output ``[.nnn]``.
use_rounding: True (default) to round the output to the desired precision.
Returns:
str: The current time in the form ``"HH:MM:SS[.nnn]"``.
"""
# Compute hours and minutes based off of seconds, and update seconds.
secs = self.get_seconds()
base = 60.0 * 60.0
hrs = int(secs / base)
secs -= (hrs * base)
base = 60.0
mins = int(secs / base)
secs -= (mins * base)
# Convert seconds into string based on required precision.
if precision > 0:
if use_rounding:
secs = round(secs, precision)
#secs = math.ceil(secs * (10**precision)) / float(10**precision)
msec = format(secs, '.%df' % precision)[-precision:]
secs = '%02d.%s' % (int(secs), msec)
else:
secs = '%02d' % int(round(secs, 0)) if use_rounding else '%02d' % int(secs)
# Return hours, minutes, and seconds as a formatted timecode string.
return '%02d:%02d:%s' % (hrs, mins, secs)
def previous_frame(self):
# type: () -> FrameTimecode
"""
Returns a new FrameTimecode for the frame before this one.
:return: New FrameTimeCode object, one frame earlier
"""
new_timecode = FrameTimecode(self)
new_timecode.frame_num -= 1
return new_timecode
def _seconds_to_frames(self, seconds):
# type: (float) -> int
""" Converts the passed value seconds to the nearest number of frames using
the current FrameTimecode object's FPS (self.framerate).
Returns:
Integer number of frames the passed number of seconds represents using
the current FrameTimecode's framerate property.
"""
return int(seconds * self.framerate)
def _parse_timecode_number(self, timecode):
# type: (Union[int, float]) -> int
""" Parses a timecode number, storing it as the exact number of frames.
Can be passed as frame number (int), seconds (float)
Raises:
TypeError, ValueError
"""
# Process the timecode value, storing it as an exact number of frames.
# Exact number of frames N
if isinstance(timecode, int):
if timecode < 0:
raise ValueError('Timecode frame number must be positive and greater than zero.')
return timecode
# Number of seconds S
elif isinstance(timecode, float):
if timecode < 0.0:
raise ValueError('Timecode value must be positive and greater than zero.')
return self._seconds_to_frames(timecode)
# FrameTimecode
elif isinstance(timecode, FrameTimecode):
return timecode.frame_num
elif timecode is None:
raise TypeError('Timecode/frame number must be specified!')
else:
raise TypeError('Timecode format/type unrecognized.')
def _parse_timecode_string(self, timecode_string):
# type: (str) -> int
""" Parses a string based on the three possible forms (in timecode format,
as an integer number of frames, or floating-point seconds, ending with 's').
Requires that the framerate property is set before calling this method.
Assuming a framerate of 30.0 FPS, the strings '00:05:00.000', '00:05:00',
'9000', '300s', and '300.0s' are all possible valid values, all representing
a period of time equal to 5 minutes, 300 seconds, or 9000 frames (at 30 FPS).
Raises:
TypeError, ValueError
"""
if self.framerate is None:
raise TypeError('self.framerate must be set before calling _parse_timecode_string.')
# Number of seconds S
if timecode_string.endswith('s'):
secs = timecode_string[:-1]
if not secs.replace('.', '').isdigit():
raise ValueError('All characters in timecode seconds string must be digits.')
secs = float(secs)
if secs < 0.0:
raise ValueError('Timecode seconds value must be positive.')
return int(secs * self.framerate)
# Exact number of frames N
elif timecode_string.isdigit():
timecode = int(timecode_string)
if timecode < 0:
raise ValueError('Timecode frame number must be positive.')
return timecode
# Standard timecode in string format 'HH:MM:SS[.nnn]'
else:
tc_val = timecode_string.split(':')
if not (len(tc_val) == 3 and tc_val[0].isdigit() and tc_val[1].isdigit()
and tc_val[2].replace('.', '').isdigit()):
raise ValueError('Unrecognized or improperly formatted timecode string.')
hrs, mins = int(tc_val[0]), int(tc_val[1])
secs = float(tc_val[2]) if '.' in tc_val[2] else int(tc_val[2])
if not (hrs >= 0 and mins >= 0 and secs >= 0 and mins < 60 and secs < 60):
raise ValueError('Invalid timecode range (values outside allowed range).')
secs += (((hrs * 60.0) + mins) * 60.0)
return int(secs * self.framerate)
def __iadd__(self, other):
# type: (Union[int, float, str, FrameTimecode]) -> FrameTimecode
if isinstance(other, int):
self.frame_num += other
elif isinstance(other, FrameTimecode):
if self.equal_framerate(other.framerate):
self.frame_num += other.frame_num
else:
raise ValueError('FrameTimecode instances require equal framerate for addition.')
# Check if value to add is in number of seconds.
elif isinstance(other, float):
self.frame_num += self._seconds_to_frames(other)
else:
raise TypeError('Unsupported type for performing addition with FrameTimecode.')
if self.frame_num < 0: # Required to allow adding negative seconds/frames.
self.frame_num = 0
return self
def __add__(self, other):
# type: (Union[int, float, str, FrameTimecode]) -> FrameTimecode
to_return = FrameTimecode(timecode=self)
to_return += other
return to_return
def __isub__(self, other):
# type: (Union[int, float, str, FrameTimecode]) -> FrameTimecode
if isinstance(other, int):
self.frame_num -= other
elif isinstance(other, FrameTimecode):
if self.equal_framerate(other.framerate):
self.frame_num -= other.frame_num
else:
raise ValueError('FrameTimecode instances require equal framerate for subtraction.')
# Check if value to add is in number of seconds.
elif isinstance(other, float):
self.frame_num -= self._seconds_to_frames(other)
else:
raise TypeError('Unsupported type for performing subtraction with FrameTimecode.')
if self.frame_num < 0:
self.frame_num = 0
return self
def __sub__(self, other):
# type: (Union[int, float, str, FrameTimecode]) -> FrameTimecode
to_return = FrameTimecode(timecode=self)
to_return -= other
return to_return
def __eq__(self, other):
# type: (Union[int, float, str, FrameTimecode]) -> bool
if isinstance(other, int):
return self.frame_num == other
elif isinstance(other, float):
return self.get_seconds() == other
elif isinstance(other, str):
return self.frame_num == self._parse_timecode_string(other)
elif isinstance(other, FrameTimecode):
if self.equal_framerate(other.framerate):
return self.frame_num == other.frame_num
else:
raise TypeError(
'FrameTimecode objects must have the same framerate to be compared.')
elif other is None:
return False
else:
raise TypeError('Unsupported type for performing == with FrameTimecode.')
def __ne__(self, other):
# type: (Union[int, float, str, FrameTimecode]) -> bool
return not self == other
def __lt__(self, other):
# type: (Union[int, float, str, FrameTimecode]) -> bool
if isinstance(other, int):
return self.frame_num < other
elif isinstance(other, float):
return self.get_seconds() < other
elif isinstance(other, str):
return self.frame_num < self._parse_timecode_string(other)
elif isinstance(other, FrameTimecode):
if self.equal_framerate(other.framerate):
return self.frame_num < other.frame_num
else:
raise TypeError(
'FrameTimecode objects must have the same framerate to be compared.')
#elif other is None:
# return False
else:
raise TypeError('Unsupported type for performing < with FrameTimecode.')
def __le__(self, other):
# type: (Union[int, float, str, FrameTimecode]) -> bool
if isinstance(other, int):
return self.frame_num <= other
elif isinstance(other, float):
return self.get_seconds() <= other
elif isinstance(other, str):
return self.frame_num <= self._parse_timecode_string(other)
elif isinstance(other, FrameTimecode):
if self.equal_framerate(other.framerate):
return self.frame_num <= other.frame_num
else:
raise TypeError(
'FrameTimecode objects must have the same framerate to be compared.')
#elif other is None:
# return False
else:
raise TypeError('Unsupported type for performing <= with FrameTimecode.')
def __gt__(self, other):
# type: (Union[int, float, str, FrameTimecode]) -> bool
if isinstance(other, int):
return self.frame_num > other
elif isinstance(other, float):
return self.get_seconds() > other
elif isinstance(other, str):
return self.frame_num > self._parse_timecode_string(other)
elif isinstance(other, FrameTimecode):
if self.equal_framerate(other.framerate):
return self.frame_num > other.frame_num
else:
raise TypeError(
'FrameTimecode objects must have the same framerate to be compared.')
#elif other is None:
# return False
else:
raise TypeError('Unsupported type (%s) for performing > with FrameTimecode.' %
type(other).__name__)
def __ge__(self, other):
# type: (Union[int, float, str, FrameTimecode]) -> bool
if isinstance(other, int):
return self.frame_num >= other
elif isinstance(other, float):
return self.get_seconds() >= other
elif isinstance(other, str):
return self.frame_num >= self._parse_timecode_string(other)
elif isinstance(other, FrameTimecode):
if self.equal_framerate(other.framerate):
return self.frame_num >= other.frame_num
else:
raise TypeError(
'FrameTimecode objects must have the same framerate to be compared.')
#elif other is None:
# return False
else:
raise TypeError('Unsupported type for performing >= with FrameTimecode.')
def __int__(self):
return self.frame_num
def __float__(self):
return self.get_seconds()
def __str__(self):
return self.get_timecode()
def __repr__(self):
return 'FrameTimecode(frame=%d, fps=%f)' % (self.frame_num, self.framerate)
| sibozhang/Text2Video | venv_vid2vid/lib/python3.7/site-packages/scenedetect/frame_timecode.py | frame_timecode.py | py | 20,620 | python | en | code | 381 | github-code | 90 |
6663385380 | from pathlib import Path
from watchdog.observers import Observer
from watchdog.events import FileSystemEventHandler, FileSystemEvent
import time
from datetime import datetime
class StoredEventHandler(FileSystemEventHandler):
"""Stores all the events captured."""
def __init__(self) -> None:
super().__init__()
self.events = []
def on_any_event(self, event):
self.events.append(event)
return super().on_any_event(event)
class QuickObserver:
"""Use a context manager to automate starting and stopping of
the observer. For example:
```
with QuickObserver(".") as ob:
os.mkdir("test_dir")
with open("test_dir/a.txt", "w") as f:
pass
print(ob.events)
```
After leaving the context, access the events through `ob.events`
Argument `stop_delay` is used to pause execution for some seconds
on `__exit__` to wait the observer to detect fs changes
"""
def __init__(self, monitor_path, stop_delay=2):
super().__init__()
self._events = None
self._context_exited = False
self._monitor_path = monitor_path
self._stop_delay = stop_delay
@property
def events(self):
if not self._context_exited:
raise Exception("`event` can only be accessed after leaving context!")
return self._events
def __enter__(self):
self._event_handler = StoredEventHandler()
self._observer = Observer()
self._observer.schedule(self._event_handler, self._monitor_path, recursive=True)
self._context_exited = False
self._observer.start()
return self
def __exit__(self, exc, value, tb):
time.sleep(self._stop_delay)
self._context_exited = True
self._events = self._event_handler.events
self._observer.stop()
self._observer.join()
def timestamp_now():
return datetime.now().strftime("%Y%m%d_%H%M%S_%f")
PREFIX_MAP = {
"created": "Create",
"modified": "Modify",
"deleted": "Delete",
"moved": "Move ",
"closed": "Close "
}
def event_to_log(event: FileSystemEvent, relative_to: Path = None):
if event.event_type in PREFIX_MAP:
action = PREFIX_MAP[event.event_type]
else:
action = event.event_type
item = "dir " if event.is_directory else "file"
if event.event_type == "moved":
src = Path(event.src_path)
dest = Path(event.dest_path)
if relative_to:
src = src.relative_to(relative_to)
dest = dest.relative_to(relative_to)
return f"{action} {item}: {src} -> {dest}"
else:
src = Path(event.src_path)
if relative_to:
src = src.relative_to(relative_to)
return f"{action} {item}: {src}"
def generate_report(
title: str, events: list[FileSystemEvent], relative_to: Path = None
):
if relative_to:
lines = [timestamp_now(), title] + [event_to_log(e, relative_to) for e in events]
else:
lines = [timestamp_now(), title] + [event_to_log(e) for e in events]
return "\n".join(lines)
| jamesWalker55/watchdog-test-cases | utility.py | utility.py | py | 3,117 | python | en | code | 0 | github-code | 90 |
5838104057 | import numpy as np
from scipy.optimize import leastsq
import pylab as plt
N = 1000 # number of data points
t = np.linspace(0, 4*np.pi, N)
data = 3.0*np.sin(t+0.001) + 0.5 + np.random.randn(N) # create artificial data with noise ndarray
data_guess = [np.mean(data), 3*np.std(data)/(2**0.5), 0, 1] # std, freq, phase, mean
def get_sin_model(guess, t, data):
# This functions models
guess_std = guess[0]
guess_freq = guess[1]
guess_phase = guess[2]
guess_mean = guess[3]
# # we'll use this to plot our first estimate. This might already be good enough for you
# data_first_guess = guess_std * np.sin(guess_freq * t + guess_phase) + guess_mean
# optimize_func = lambda x: x[0] * np.sin(x[1] * t + x[2]) + x[3] - data
# est_std, est_freq, est_phase, est_mean = leastsq(optimize_func, [guess_std, guess_freq, guess_phase, guess_mean])[0]
# recreate the fitted curve using the optimized parameters
# data_fit = est_std * np.sin(est_freq * t + est_phase) + est_mean
# we'll use this to plot our first estimate. This might already be good enough for you
data_first_guess = guess_std * np.exp(-t) * np.sin(guess_freq * np.exp(-t) * t + guess_phase) + guess_mean
# Define the function to optimize, in this case, we want to minimize the difference
# between the actual data and our "guessed" parameters
optimize_func = lambda x: x[0] * np.exp(-t) * np.sin(x[1] * np.exp(-t) * t + x[2]) + x[3] - data
est_std, est_freq, est_phase, est_mean = leastsq(optimize_func, [guess_std, guess_freq, guess_phase, guess_mean])[0]
# recreate the fitted curve using the optimized parameters
data_fit = est_std * np.sin(est_freq * t + est_phase) + est_mean
print('fitted mean, std, phase, freq are %f %f %f %f' % (est_std, est_freq, est_phase, est_mean))
estimation = [est_std, est_freq, est_phase, est_mean]
# plot results
plt.plot(data, '.')
plt.plot(data_fit, label='after fitting')
plt.plot(data_first_guess, label='first guess')
plt.legend()
plt.show()
return estimation
get_sin_model(data_guess, t, data) | salomeow/fyp_server_py | leastsq.py | leastsq.py | py | 2,105 | python | en | code | 0 | github-code | 90 |
21893541145 | import folium
import pandas as pd
import matplotlib.pyplot as plt
pd.set_option('display.max_rows', None)
pd.set_option('display.max_columns', None)
pd.set_option('display.width', None)
pd.set_option('display.max_colwidth', None)
# TODO: - allow user to change the date of the depicted data through the web page
# - add chloropeth to the map to help depict which countries are faring the worst with C19
#
# Develops the map in folium with markers on the country displaying information within the data frame
def holder():
# Covid 19 data for each country
# world = pd.read_json('world.json')
# world.to_csv()
# print(world)
data = pd.read_csv('covid.csv')
# Country codes to map each country to folium
ccode = pd.read_csv('CountryCodes1.txt')
df1 = pd.DataFrame(data)
df2 = pd.DataFrame(ccode).set_index('alpha3')
long = []
lat = []
# adds each countries latitude and longtitude coordinates to the list
for x in df1['iso_code']:
try:
lat.append((df2.loc[x, 'latitude']))
long.append((df2.loc[x, 'longitude']))
except:
continue
# Adds the longitude, and latitude data to the dataframe consisting of the covid 19 data of each country
df1.loc[:, 'Latitude'] = pd.Series(lat)
df1.loc[:, 'Longitude'] = pd.Series(long)
# date = input('Enter date 1/1/2020 format')
# pick which date you want to be depicted
test = df1.loc[df1['date'] == '7/14/2020'].fillna(0)
print(test)
test2 = test.set_index('iso_code', drop=True)
#test.reset_index(inplace=True)
#print(test2.index)
map1 = folium.Map(location=[0, 0],max_bounds=True, zoom_start=3, min_zoom=2, tiles='OpenStreetMap')
fg = folium.FeatureGroup(name='Current spread of Corona Virus')
# test2 = test1.set_index('iso_code', drop=True)
fg.add_child(folium.GeoJson(data=open('world.json', 'r', encoding='utf-8-sig').read(), style_function=lambda z: {'fillColor': color(test2, z['properties']['ISO3']) if functown(test2, z['properties']['ISO3']) != 0 else color(test2, z['properties']['ISO3'])}))
#test2[z['properties']['ISO3'],'total_cases_per_million']
for i in test.index:
fg.add_child(folium.Marker(location=[(test.loc[i, 'Latitude']), (test.loc[i, 'Longitude'])], popup= graph(test,i), icon=folium.Icon(color='red')))
#fg.add_child(folium.GeoJson(data=open('world.json', 'r', encoding='utf-8-sig').read()))
map1.add_child(fg)
map1.save('test1.html')
def graph(gdata, i):
return ('<b><font color=red> Date:</font></b> ' + str(gdata.loc[i,'date']) + '<br>' +
'<b><font color=red>Country:</font></b> '+str(gdata.loc[i, 'location']) + '<br>' +
'<b><font color=red>cases:</font></b> '+str(gdata.loc[i,'total_cases']) + '<br>' +
'<b><font color=red>deaths:</font></b> '+ str(gdata.loc[i,'total_deaths']))
def functown(pdata,z):
if z in pdata.index:
value = pdata.loc[z, 'total_cases_per_million']
#print(value)
return int(value)
else:
return 0
def color(pdata,z):
if z in pdata.index:
value = pdata.loc[z, 'total_cases_per_million']
if value == 0:
return 'White'
elif value <= 200:
return 'Yellow'
elif value > 200 and value < 1000:
return '#fc4503'
elif value >= 1000:
return '#fc03eb'
# test function to read a file
def testing():
file = open('CountryCodes1','r+')
for i in file:
print(i)
file.close()
holder()
| zouvier/COVID-19-HEAT-MAP | webmap.py | webmap.py | py | 3,395 | python | en | code | 0 | github-code | 90 |
18543479859 | a, b, c, x, y = map(int, input().split())
ans = float('inf')
for i in range(10 ** 5 + 1):
price = i * 2 * c
if x > i:
price += a * (x - i)
if y > i:
price += b * (y - i)
ans = min(ans, price)
print(ans)
| Aasthaengg/IBMdataset | Python_codes/p03371/s885414560.py | s885414560.py | py | 237 | python | en | code | 0 | github-code | 90 |
45112768483 |
#O seguinte algoritmo sorteia um aluno de uma lista
from random import choice
lista = []
n = 0
while n<10:
lista.append(str(input("Digite o nome do aluno [{}]: " .format(n))))
n+=1
print("A pessoa escolhida foi: {} " .format(choice(lista))) | G4BR-13-L/my-pyhton-journey | 11 - 20/ex019.py | ex019.py | py | 251 | python | pt | code | 1 | github-code | 90 |
42367435111 | import sys
import numpy as np
def read_data(filename):
title = []
data = []
i=0
with open(filename) as f:
for line in f:
try:
l = line.strip(' \r\n').split(' ')
data.append(list(map(float,l)))
except:
title.append(line.strip(' \r\n'))
pass
return np.asarray(data),title
def save(filename,data,title):
f = open(filename,'w')
f.write('\r\n'.join(title)+'\r\n')
res = []
for d in data:
d = ['{:.6f}'.format(x) for x in d]
res.append(' '.join(d))
data = ' \r\n'.join(res)
f.write(data)
def confine(data,maxs=1.1):
data[data>maxs]=maxs
return data
filename = 'pure_s_sr.txt'
data,title = read_data(filename)
data = confine(data)
save(filename+'2',data,title)
| Moirai7/environment | EnvironmentalData/range.py | range.py | py | 972 | python | en | code | 0 | github-code | 90 |
7245501302 | #!/usr/bin/python
'''
Data loading and pre-processing functions
'''
import os
import glob
import tempfile
import numpy as np
import pandas as pd
import re
from Bio import SeqIO
import pyranges as pr
import matplotlib as mpl
import matplotlib.pyplot as plt
import sklearn
from sklearn.metrics import roc_curve
from sklearn.metrics import roc_auc_score
from sklearn.metrics import precision_recall_curve
from sklearn.metrics import confusion_matrix, classification_report
from sklearn.preprocessing import LabelEncoder, OneHotEncoder
from sklearn.model_selection import train_test_split
import mlxtend
from mlxtend.plotting import plot_confusion_matrix
def string_to_array(my_string):
""" Change a DNA string (actg or z) to an array of digits"""
my_string = my_string.lower()
my_string = re.sub('[^acgt]', 'z', my_string)
my_array = np.array(list(my_string))
return my_array
def one_hot_encoder(my_array):
""" One-hot encoding for sequence input data"""
label_encoder = LabelEncoder()
label_encoder.fit(np.array(['z','a','c','g','t']))
integer_encoded = label_encoder.transform(my_array)
onehot_encoder = OneHotEncoder(sparse=False, dtype=int, categories=[([0,1,2,3,4])])
integer_encoded = integer_encoded.reshape(len(integer_encoded), 1)
onehot_encoded = onehot_encoder.fit_transform(integer_encoded)
return onehot_encoded
def load_sequence_data(inputPath, sequenceFile):
""" Load, encode, and parse sequence and label data """
sequences= pd.read_csv(os.path.join(inputPath,sequenceFile))
# examine class imbalance
neg, pos = np.bincount(sequences['label'])
total = neg + pos
print('dataset Info:\n Total samples: {}\n Positive Tiles: {}({:.2f}% of total)\n'.format(
total, pos, 100 * pos / total))
onehot = np.empty([sequences.shape[0],len(sequences['seq'][1]),5]) #dimension=[no.sequences,len.sequence,onehotencoding]
for row in range(sequences.shape[0]):
onehot[row] = one_hot_encoder(string_to_array(sequences.seq[row]))
seq_data= onehot.reshape(onehot.shape[0],len(sequences['seq'][1]),5,1)
seq_labels = sequences['label']
return seq_data, seq_labels
def prepare_training_data(datapath,validation_split=0.1):
""" Wrapper function to load the training dataset """
print("Loading and encoding the read-depth data")
depth_train = np.array(pd.read_csv(os.path.join(datapath,'train_depth.txt'),sep="\t", header = None))
depth_train = depth_train.reshape(depth_train.shape[0],depth_train.shape[1], 1)
print("Finished loading and encoding the read-depth data")
print("Loading and encoding the gene-expression data")
exp_train = np.array(pd.read_csv(os.path.join(datapath,'train_expression.txt'),sep="\t", header = None))
exp_train = exp_train.reshape(exp_train.shape[0], exp_train.shape[1],1)
print("Finished loading and encoding the gene-expression data")
print("Loading and encoding the reference time-point data")
time_train = np.array(pd.read_csv(os.path.join(datapath,'train_ref.txt'),sep="\t", header = None))
time_train = time_train.reshape(time_train.shape[0], time_train.shape[1], 1)
print("Finished loading and encoding the reference time-point data")
print("Loading foldchange data")
foldchange_train = np.array(pd.read_csv(os.path.join(datapath,'train_foldchange.txt'),sep="\t", header = None))
foldchange_train = foldchange_train.reshape(foldchange_train.shape[0], foldchange_train.shape[1], 1)
print("Finished loading and encoding the foldchange data")
print("Loading and one-hot encoding the sequence data")
weight_train = time_train*foldchange_train
seq_train, y_train = load_sequence_data(datapath, 'train_sequences.csv')
print("The number of positive and negative tiles in the train dataset is:")
print(y_train.value_counts())
train_bed= pr.read_bed(os.path.join(datapath,"train_tiles.bed"),
as_df=True)
print('Splitting data into: {}% training and {}% validation\n'.format(
(1- validation_split)*100, validation_split *100))
(depth_train,depth_val,seq_train,seq_val,exp_train,exp_val,y_train,y_val,weight_train,
weight_val, train_bed, val_bed) = train_test_split(depth_train,seq_train,exp_train,y_train,weight_train,train_bed,
test_size = validation_split, random_state = 50)
print('Training labels shape:', y_train.shape)
print('Validation labels shape:', y_val.shape)
print('Training features shape:', depth_train.shape, seq_train.shape, exp_train.shape, weight_train.shape)
print('Validation features shape:', depth_val.shape, seq_val.shape, exp_val.shape, weight_val.shape)
return depth_train, depth_val, exp_train, exp_val, weight_train, weight_val, seq_train, seq_val, y_train, y_val,train_bed, val_bed
def prepare_test_data(datapath):
""" Wrapper function to load the test dataset """
print("Loading and encoding the test dataset")
depth_test = np.array(pd.read_csv(os.path.join(datapath,'test_depth.txt'),sep="\t", header = None))
depth_test = depth_test.reshape(depth_test.shape[0],depth_test.shape[1], 1)
exp_test = np.array(pd.read_csv(os.path.join(datapath,'test_expression.txt'),sep="\t", header = None))
exp_test = exp_test.reshape(exp_test.shape[0], exp_test.shape[1],1)
time_test = np.array(pd.read_csv(os.path.join(datapath,'test_ref.txt'),sep="\t", header = None))
time_test = time_test.reshape(time_test.shape[0], time_test.shape[1], 1)
foldchange_test = np.array(pd.read_csv(os.path.join(datapath,'test_foldchange.txt'),sep="\t", header = None))
foldchange_test = foldchange_test.reshape(foldchange_test.shape[0], foldchange_test.shape[1], 1)
weight_test = time_test*foldchange_test
seq_test, y_test = load_sequence_data(datapath, 'test_sequences.csv')
test_bed= pr.read_bed(os.path.join(datapath,"test_tiles.bed"),
as_df=True)
print('Test labels shape:', y_test.shape)
print('Test features shape:', depth_test.shape, seq_test.shape, exp_test.shape, weight_test.shape)
return depth_test, exp_test, weight_test, seq_test, y_test, test_bed
def plot_roc(name, labels, predictions, **kwargs):
""" auROC plotting function """
fp, tp, _ = sklearn.metrics.roc_curve(labels, predictions)
plt.plot(fp, tp, label=name, linewidth=1, **kwargs)
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.xlim([-0.005,1.02])
plt.ylim([-0.005,1.02])
plt.grid(True)
ax = plt.gca()
ax.set_aspect('equal')
def plot_prc(name, labels, predictions, **kwargs):
""" auPRC plotting function """
precision, recall, _ = sklearn.metrics.precision_recall_curve(labels, predictions)
plt.plot(100*recall, 100*precision, label=name, linewidth=1, **kwargs)
plt.xlabel('Recall [%]')
plt.ylabel('Precision [%]')
plt.xlim([-0.5,102])
plt.ylim([-0.5,102])
plt.legend(loc='lower left')
plt.grid(True)
ax = plt.gca()
ax.set_aspect('equal')
def plot_cm(labels, predictions, p=0.5):
""" confusion matrix plotting function """
cm = confusion_matrix(labels, predictions > p)
fig, ax = plot_confusion_matrix(conf_mat=cm,
figsize=(8,8),
class_names=['False','True'],
show_normed=True)
plt.tight_layout()
plt.title('Confusion matrix @{:.2f}'.format(p))
plt.ylabel('Actual label')
plt.xlabel('Predicted label')
print('Negative Tiles Correctly Classified (True Negatives): ', cm[0][0])
print('Negative Tiles Incorrectly Classified (False Positives): ', cm[0][1])
print('Positive Tiles Correctly Classified (True Positives): ', cm[1][1])
print('Positive Tiles Incorrectly Classified (False Negatives): ', cm[1][0])
print('Total Positive Tiles: ', np.sum(cm[1]))
| pkhoueiry/TempoMAGE | load_data.py | load_data.py | py | 7,923 | python | en | code | 1 | github-code | 90 |
25043539212 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
* find all markdown files in specified root
* convert files into html
* translate via API
* all files are sent in one request per target language
* write results back, as html
"""
import requests
import datetime
import werkzeug
import json
import time
import os
import codecs
# enforce quotas (https://cloud.google.com/translate/quotas) (very naive implementation)
quota_char = 0
quota_limit = 100000
quota_wait = 100
config_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), "config.json")
if not os.path.exists(config_path):
raise Exception("%s not found" % config_path)
with codecs.open(config_path, "r", "utf-8") as fd:
config = json.load(fd)
if "auth_type" not in config:
raise Exception("configuration file (%s) must specify 'auth_type': 'apikey'")
def get_temp_dir():
tdir = os.path.join(
"/tmp", "translate", datetime.datetime.now().strftime("%Y%m%d%H%M%S")
)
if not os.path.exists(tdir):
os.makedirs(tdir)
return tdir
def oauth_init(headers):
# TODO: not implemented
raise Exception("oauth is not implemented")
# 1) open in browser:
# https://accounts.google.com/o/oauth2/v2/auth?redirect_uri=https://developers.google.com/oauthplayground&prompt=consent&response_type=code&client_id=407408718192.apps.googleusercontent.com&scope=https://www.googleapis.com/auth/cloud-translation&access_type=offline
# 2) wait for callback on redirect uri
# /oauthplayground/?code=4/IAHF5AAuEYtmLlUK8PnbPyqL-d7j3bFJnpuA5mcrc1UAyhgISt4Q9lQhutkehYe1AH3BFrQ82XOovvBstiPS5vI&scope=https://www.googleapis.com/auth/cloud-translation
# 3) get code from request (2)
# 4) exchange authorization code for token
# POST https://developers.google.com/oauthplayground/exchangeAuthCode
# { code: '4/IAHF5AAuEYtmLlUK8PnbPyqL-d7j3bFJnpuA5mcrc1UAyhgISt4Q9lQhutkehYe1AH3BFrQ82XOovvBstiPS5vI',
# token_uri: 'https://www.googleapis.com/oauth2/v4/token' }
# https://accounts.google.com/o/oauth2/v2/auth?redirect_uri=http://localhost:5555/XXX&prompt=consent&response_type=code&client_id=XXX.apps.googleusercontent.com&scope=https://www.googleapis.com/auth/cloud-translation&access_type=offline
bearer_token = "TODO"
headers["Authorization"] = "Bearer %s" % bearer_token
return headers
def query_string(data):
if type(data) != dict:
return ""
if len(data) == 0:
return ""
return "?" + werkzeug.urls.url_encode(data)
def translate(tdict, source_lang, language):
s_tdict = {}
for d in tdict:
sdict = {d: tdict[d]}
s_tdict[d] = translate_single(sdict, source_lang, language)
return s_tdict
def translate_single(tdict, source_lang, language):
global quota_char
# dont translate source language
if language == source_lang:
return tdict
keys = sorted(tdict.keys())
o_text_list = []
for key in keys:
o_text_list.append(tdict[key])
data = {
"q": o_text_list,
"target": language,
"format": "html",
"source": source_lang,
"model": "nmt",
}
query_data = {}
headers = {"Content-Type": "application/json; charset: utf-8"}
if config["auth_type"] == "apikey":
query_data["key"] = config["api_key"]
elif config["auth_type"] == "oauth":
headers = oauth_init(headers)
else:
raise Exception("auth_type '%s' not implemented" % config["auth_type"])
URL = "https://translation.googleapis.com/language/translate/v2" + query_string(
query_data
)
quota_char += len(str(data))
if quota_char >= quota_limit:
print("Would hit rate limit - waiting %s seconds" % (quota_wait + 5))
time.sleep(quota_wait + 5)
print("Resuming after rate limit")
quota_char = 0
print(" translating %s -> %s: %s" % (source_lang, language, keys))
req = requests.post(URL, headers=headers, json=data)
response = req.json()
if (
"error" in response
and "code" in response["error"]
and response["error"]["code"] == 403
):
print("Rate limit hit - waiting %s seconds" % (quota_wait + 5))
time.sleep(quota_wait + 5)
quota_char = 0
print("Resuming after rate limit")
return translate_single(tdict, source_lang, language)
cnt = 0
for t in response["data"]["translations"]:
tdict[keys[cnt]] = t["translatedText"]
cnt += 1
return tdict
| hotmaps-docker/gollum | wikitranslate/translate.py | translate.py | py | 4,501 | python | en | code | 1 | github-code | 90 |
30731754108 | from StringIO import StringIO
from flask import Response
from sqlalchemy import text
from ohm.extensions import db
def _copy_sql(sql, params, buf):
conn = db.engine.raw_connection()
c = conn.cursor()
sql = c.mogrify(sql, params)
sql = "COPY ({}) TO STDOUT WITH CSV HEADER".format(sql)
c.copy_expert(sql, buf)
def make_csv_response(sql, params):
"""
`sql` is a SQLAlchemy parameterized string. Compile it to the postgres dialect so we can use the
raw connection and copy_expert.
"""
sql = str(text(sql).compile(dialect=db.session.bind.dialect))
buf = StringIO()
_copy_sql(sql, params, buf)
return Response(buf.getvalue(), status=200, mimetype='text/csv')
| billyfung/flask-template | app/api/utils.py | utils.py | py | 715 | python | en | code | 0 | github-code | 90 |
1987492875 | import sys
import argparse
def parseFasta(filename):
fas = {}
id = None
with open(filename, 'r') as fh:
for line in fh:
if line[0] == '>':
header = line[1:].rstrip()
id = header.split()[0]
fas[id] = []
else:
fas[id].append(line.rstrip())
for id, seq in fas.items():
fas[id] = ''.join(seq)
return fas
def fasta2phylip(args):
fas = parseFasta(args.input)
outfile = args.output
sequence_list = [] # To keep order of sequence
sequence_dict = {}
for rec in fas:
sequence_list.append(rec)
sequence_dict[rec] = fas[rec]
# Test length of the alignment:
alignment_length = 0
for gene in sequence_dict:
if (alignment_length != 0) and (len(sequence_dict[gene]) != alignment_length):
print("Error in alignment length, exit on error !!!")
sys.exit()
else:
alignment_length = len(sequence_dict[gene])
number_of_seq = len(sequence_dict)
longest_id = sorted(sequence_dict.keys(), key = lambda k: len(k))[-1]
# Write alignment in Phylip format
phyfile = open(outfile, "w")
phyfile.write(str(number_of_seq) + " " + str(alignment_length) + "\n")
for gene in sequence_list:
phyfile.write(gene.ljust(len(longest_id), ' ') + " " + sequence_dict[gene] + "\n")
#phyfile.write(gene + " " + sequence_dict[gene] + "\n")
phyfile.close()
if __name__=='__main__':
parser = argparse.ArgumentParser(description = 'preparing fasta files for paml!!')
parser.add_argument('--input',
dest = 'input',
help = 'input fasta file')
parser.add_argument('--output',
dest = 'output',
help = 'output phylip file for paml')
if len(sys.argv) <= 1:
parser.print_help()
sys.exit()
args = parser.parse_args()
fasta2phylip(args)
| Github-Yilei/genome-assembly | Python/fasta2phylip.py | fasta2phylip.py | py | 2,061 | python | en | code | 2 | github-code | 90 |
18341398989 | import sys
input=sys.stdin.readline #文字列入力はするな!!
import heapq
n,m=map(int,input().split())
a=[]
for i in input().split():
heapq.heappush(a,-int(i))
for i in range(m):
p=-heapq.heappop(a)
p=p//2
heapq.heappush(a,-p)
print(-sum(a))
| Aasthaengg/IBMdataset | Python_codes/p02912/s445042670.py | s445042670.py | py | 269 | python | en | code | 0 | github-code | 90 |
18206256079 | N,S = list(map(int,input().split()))
A = list(map(int,input().split()))
dp = [[0]*(N+1) for i in range(S+1)]
p = 998244353
twos = [1]*N
for i in range(1,N):
twos[i] = (twos[i-1]*2)%p
for j in range(1,N+1):
flag = 0
for i in range(1,S+1):
if flag==0:
dp[i][j] = (dp[i][j-1]*2)%p
else:
dp[i][j] = (dp[i][j-1]*2+dp[i-A[j-1]][j-1])%p
if i==A[j-1]:
dp[i][j] += twos[j-1]
flag = 1
print(dp[-1][-1]%p) | Aasthaengg/IBMdataset | Python_codes/p02662/s986400757.py | s986400757.py | py | 480 | python | en | code | 0 | github-code | 90 |
72774512298 | # -*- coding: utf-8 -*-
"""
LandSurfaceClustering.py
Landon Halloran
07.03.2019
www.ljsh.ca
Land use clustering using multi-band remote sensing data.
This is a rough first version. Modifications will need to be made in order to properly
treat other datasets.
Demo data is Sentinel-2 data (bands 2, 3, 4, 5, 6, 7, 8, 11 & 12) at 10m resolution
(some bands are upsampled) in PNG format, exported from L1C_T32TLT_A007284_20180729T103019.
"""
# import these modules:
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import imageio
import glob
import seaborn as sns; sns.set(style="ticks", color_codes=True)
########## USER MUST DEFINE THESE ###########
image_folder_name = 'example_png' # subfolder where images are located
image_format = 'png' # format of image files (the exact suffix of the filenames)
band_names = ['B02','B03','B04','B05','B06','B07','B08','B11','B12'] # names of bands (in file names). should all have some length
Nsamples = 20000 # number of random samples used to "train" k-means here (for faster execution)
NUMBER_OF_CLUSTERS = 4 # the number of independent clusters for k-means
colour_map = 'terrain' # cmap, see matplotlib.org/examples/color/colormaps_reference.html
#############################################
# import images to dictionary:
images = dict();
for image_path in glob.glob(image_folder_name+'/*.'+image_format):
print('reading ',image_path)
temp = imageio.imread(image_path)
temp = temp[:,:,0].squeeze()
images[image_path[18:21]] = temp # FOR DIFFERENT FILE NAMES, ADJUST THIS!
print('images have ', np.size(temp),' pixels each')
# make a 3D numpy array of data...
imagecube = np.zeros([images['B02'].shape[0],images['B02'].shape[1],np.size(band_names)])
for j in np.arange(np.size(band_names)):
imagecube[:,:,j] = images[band_names[j]] #
imagecube=imagecube/256 # scaling to between 0 and 1
# display an RGB or false colour image
thefigsize = (10,8)# set figure size
#plt.figure(figsize=thefigsize)
#plt.imshow(imagecube[:,:,0:3])
# sample random subset of images
imagesamples = []
for i in range(Nsamples):
xr=np.random.randint(0,imagecube.shape[1]-1)
yr=np.random.randint(0,imagecube.shape[0]-1)
imagesamples.append(imagecube[yr,xr,:])
# convert to pandas dataframe
imagessamplesDF=pd.DataFrame(imagesamples,columns = band_names)
# make pairs plot (each band vs. each band)
seaborn_params_p = {'alpha': 0.15, 's': 20, 'edgecolor': 'k'}
#pp1=sns.pairplot(imagessamplesDF, plot_kws = seaborn_params_p)#, hist_kws=seaborn_params_h)
# fit kmeans to samples:
from sklearn.cluster import KMeans
KMmodel = KMeans(n_clusters=NUMBER_OF_CLUSTERS)
KMmodel.fit(imagessamplesDF)
KM_train = list(KMmodel.predict(imagessamplesDF))
i=0
for k in KM_train:
KM_train[i] = str(k)
i=i+1
imagessamplesDF2=imagessamplesDF
imagessamplesDF2['group'] = KM_train
# pair plots with clusters coloured:
pp2=sns.pairplot(imagessamplesDF,vars=band_names, hue='group',plot_kws = seaborn_params_p)
pp2._legend.remove()
# make the clustered image
imageclustered=np.empty((imagecube.shape[0],imagecube.shape[1]))
i=0
for row in imagecube:
temp = KMmodel.predict(row)
imageclustered[i,:]=temp
i=i+1
# plot the map of the clustered data
plt.figure(figsize=thefigsize)
plt.imshow(imageclustered, cmap=colour_map)
| lhalloran/LandSurfaceClustering | LandSurfaceClustering.py | LandSurfaceClustering.py | py | 3,320 | python | en | code | 28 | github-code | 90 |
24345185621 | """ Setup info for Products.MaildropHost
$Id: setup.py 1657 2008-11-01 12:10:57Z jens $
"""
import os
from setuptools import find_packages
from setuptools import setup
NAME = 'MaildropHost'
here = os.path.abspath(os.path.dirname(__file__))
package = os.path.join(here, 'Products', NAME)
def _read(name):
f = open(os.path.join(package, name))
return f.read()
_boundary = '\n' + ('-' * 60) + '\n\n'
setup(name='Products.%s' % NAME,
version=_read('VERSION.txt').strip(),
description="Asynchronous transaction-aware MailHost replacement for Zope 2",
long_description=( _read('README.txt')
+ _boundary
+ _read('INSTALL.txt')
+ _boundary
+ _read('CHANGES.txt')
+ _boundary
+ 'Download\n========'
),
classifiers=[
"Development Status :: 5 - Production/Stable",
"Framework :: Zope2",
"Intended Audience :: Developers",
"License :: OSI Approved :: Zope Public License",
"Operating System :: POSIX",
"Programming Language :: Python",
"Topic :: Communications :: Email",
"Topic :: Software Development",
],
keywords='web zope zope2 mail smtp',
author='Jens Vagelpohl',
author_email='jens@dataflake.org',
url='http://pypi.python.org/pypi/Products.MaildropHost',
license="ZPL 2.1 (http://www.zope.org/Resources/License/ZPL-2.1)",
packages=find_packages(),
namespace_packages=['Products'],
include_package_data=True,
zip_safe=False,
install_requires=[
# Zope >= 2.8
'setuptools',
],
entry_points="""
[zope2.initialize]
Products.%s = Products.%s:initialize
""" % (NAME, NAME),
)
| pedroardaglio/Products.MaildropHost | setup.py | setup.py | py | 1,851 | python | en | code | 1 | github-code | 90 |
72615679977 | # -*- coding: utf-8 -*-
"""
@author: Sergio GARCIA-VEGA
sergio.garcia-vega@postgrad.manchester.ac.uk
The University of Manchester, Manchester, UK
BigDataFinance, Work Package 1, Research Project 1
Id: Main_VECM.py
"""
import os
import pickle
import numpy as np
import pandas as pd
from os import listdir
import sklearn.metrics as metrics
from statsmodels.tsa.api import VECM
from sklearn.metrics import mean_squared_error
#==============================================================================
# Importing Traning and Testing samples
#==============================================================================
crr_wd = os.getcwd()
data_dir_trte = os.path.join(crr_wd,'Data')
TrainTest = {}
for trte_stock in listdir(data_dir_trte):
key_df = trte_stock.split('.')[0].split('Stock_')[1]
TrainTest[key_df] = pickle.load(open('Data\\' + str(trte_stock), 'rb'))
keys_stocks = list(TrainTest.keys())
data = {}
for stock in keys_stocks:
data[stock] = TrainTest[stock]['Train']['Set']
data = pd.DataFrame(data)
data = np.array(data)
#==============================================================================
# Vector Error Correction Model (VECM)
#==============================================================================
data_test = np.zeros([280,10,24])
count = 0
for stock in keys_stocks:
#======================================================================
# Data Embedding
#======================================================================
X_test = TrainTest[stock]['Test']['X_te']
X_test = X_test.reshape(X_test.shape[0], X_test.shape[1], 1)
for i in range(280):
data_test[i, :, count] = X_test[i, :, 0]
count = count + 1
#======================================================================
# Predictions
#======================================================================
pred = np.zeros([280, 1, 24])
for i in range(280):
vecm = VECM(endog = np.concatenate((data, data_test[i, :, :])), k_ar_diff = 3, coint_rank = 0, deterministic = 'ci')
res = vecm.fit()
pred[i, :, :] = res.predict(steps=1)
#==========================================================================
# Saving Results
#==========================================================================
count = 0
for stock in keys_stocks:
y_test = TrainTest[stock]['Test']['y_te']
y_pred = pred[:, 0, count][:, np.newaxis]
mse = mean_squared_error(y_test, y_pred)
mae_test = metrics.mean_absolute_error(y_test, y_pred)
Results_VECM = {}
Results_VECM['Regression'] = {}
Results_VECM['Regression']['Desired'] = y_test
Results_VECM['Regression']['Prediction'] = y_pred
Results_VECM['Measures'] = {}
Results_VECM['Measures']['MSE'] = mse
Results_VECM['Measures']['MAE'] = mae_test
count = count + 1
pickle.dump(Results_VECM, open('Results\\VECM\\Results_VECM_' + stock + '.pkl', 'wb'))
| xiaogaogaoxiao/PRL-2020 | Main_VECM.py | Main_VECM.py | py | 3,318 | python | en | code | 0 | github-code | 90 |
36517172159 | s=input()
l=s.split()
l1=l[-1]
l2=[]
l3=[]
for i in l1:
if i.isupper():
l2.append(i)
else:
l3.append(i)
if (min(l3).upper()) in l2:
print(min(l3))
else:
print(min(l1)) | 20A91A04O3/codemind-python | minimum_elemnt_from_a_string.py | minimum_elemnt_from_a_string.py | py | 199 | python | en | code | 0 | github-code | 90 |
18155234849 | import numpy as np
# import math
# import copy
# from collections import deque
import sys
input = sys.stdin.readline
# sys.setrecursionlimit(10000)
from numba import njit,i8
@njit(i8[:](i8,i8[:],i8[:],i8,i8,i8))
def SerchLoop(M,A,cnt,start,end,temp):
for i in range(1,M+2):
temp = A[i-1] ** 2
temp %= M
if cnt[temp] != 0:
start = cnt[temp]
end = i - 1
break
A[i] = temp
cnt[temp] = i
return np.append(np.append(A,start),end)
def main():
N,X,M = map(int,input().split())
A = [-1 for i in range(M+2)]
A[0] = X
A = np.array(A)
start = -1
end = -1
cnt = [0 for i in range(M)]
cnt[X] = 0
cnt = np.array(cnt)
C = SerchLoop(M,A,cnt,start,end,0)
A = C[:-2]
start = C[-2]
end = C[-1]
L = end - start + 1
if start == -1:
straight = [0 for i in range(N)]
for i in range(N):
straight[i] = straight[i-1] + A[i]
res = straight[-1]
elif start == 0:
loop = [0 for i in range(L)]
for i in range(L):
loop[i] = loop[i-1] + A[start+i]
res = (N // L) * loop[-1]
if N % L != 0:
res += loop[N % L - 1]
else:
straight = [0 for i in range(start)]
for i in range(start):
straight[i] = straight[i-1] + A[i]
loop = [0 for i in range(L)]
for i in range(L):
loop[i] = loop[i-1] + A[start+i]
if N >= start:
res = straight[-1]
N -= start
else:
res = straight[N-1]
N = 0
res += (N // L) * loop[-1]
if N % L != 0:
res += loop[N % L - 1]
print(res)
main()
| Aasthaengg/IBMdataset | Python_codes/p02550/s742978683.py | s742978683.py | py | 1,731 | python | en | code | 0 | github-code | 90 |
31875859527 | """
Every three lines are a single group.
Get an intersection character of the lines.
Get the sum as in part no. one.
"""
with open("data/3.txt", "r") as f:
data = f.readlines()
list1 = [x.replace("\n", "") for x in data]
list2 = [list1[x:x + 3] for x in range(0, len(list1), 3)]
list3 = []
for x in list2:
set_a = set()
set_b = set()
set_c = set()
for letter in x[0]:
set_a.add(letter)
for letter in x[1]:
set_b.add(letter)
for letter in x[2]:
set_c.add(letter)
list3.append([set_a, set_b, set_c])
list4 = [x[0].intersection(x[1], x[2]) for x in list3]
list5 = [x.pop() for x in list4]
list6 = [ord(x)-96 if x.islower() else ord(x)-38 for x in list5]
print(f"The sum is: {sum(list6)}")
| xSilence8x/advent_of_code_2022 | 3-2nd_part.py | 3-2nd_part.py | py | 751 | python | en | code | 0 | github-code | 90 |
73388153896 | import sqlite3
from sqlite3 import Error
import configparser
config = configparser.ConfigParser()
config.read('config.ini')
DB_FILE = config['DEFAULT']['DB_FILE']
CREATE_PLAYLIST_TABLE = '''
CREATE TABLE IF NOT EXISTS playlistKeys (
id integer PRIMARY KEY,
playlist_name NOT NULL,
rfid text,
added_date text
);
'''
INSERT_PLAYLIST = '''
INSERT INTO playlistKeys(playlist_name,rfid,added_date) VALUES (?,?,?)
'''
LIST_PLAYLISTS = '''
SELECT id,playlist_name,rfid,added_date FROM playlistKeys ORDER BY playlist_name
'''
SELECT_PLAYLIST = '''
SELECT playlist_name FROM playlistKeys WHERE id = ?
'''
SELECT_PLAYLIST_BY_RFID = '''
SELECT playlist_name FROM playlistKeys WHERE rfid = ?
'''
SELECT_PLAYLIST_BY_QUERY = '''
SELECT id,playlist_name,rfid,added_date FROM playlistKeys WHERE playlist_name like ?
'''
SET_RFID = '''
UPDATE playlistKeys SET rfid = ? WHERE id = ?
'''
def create_connection():
"""
Create a database connection to a SQLite database
:return The database connection
"""
conn = None
try:
conn = sqlite3.connect(DB_FILE)
except Error as e:
print(e)
if conn:
conn.close()
return conn
def create_table(conn):
"""
Create a table from the create_table_sql statement
:param conn: Connection object
"""
try:
c = conn.cursor()
c.execute(CREATE_PLAYLIST_TABLE)
except Error as e:
print(e)
def store_playlists(playlists, conn):
"""
Store all playlists in the database
:param playlists: The List of playlists
:param conn: The database connection
"""
for playlist in playlists:
playlist["rfid"] = "-1"
print(playlist)
add_playlist(conn, playlist)
def add_playlist(conn, playlist):
"""
Create a new project into the projects table
:param conn: The database connection
:param playlist
:return: playlist id
"""
cur = conn.cursor()
cur.execute(INSERT_PLAYLIST, (playlist['playlist'], playlist['rfid'], playlist['last-modified']))
conn.commit()
return cur.lastrowid
def get_all_playlists():
"""
Load all playlists from the database
:return: The playlists
"""
conn = create_connection()
playlists = conn.execute(LIST_PLAYLISTS).fetchall()
conn.close()
return playlists
def get_playlist(playlist_id):
"""
Get the playlist identified by the id
:param playlist_id: The database id of the playlist
:return: The playlist name
"""
conn = create_connection()
playlist = conn.execute(SELECT_PLAYLIST, (playlist_id,)).fetchone()[0]
conn.close()
return playlist
def get_playlist_by_rfid(rfid):
"""
Get the playlist identified by the rfid
:param rfid: The stored rfid of the playlist
:return: The playlist name
"""
conn = create_connection()
playlist = conn.execute(SELECT_PLAYLIST_BY_RFID, (rfid,)).fetchone()[0]
conn.close()
return playlist
def search_playlist(query):
"""
Search for playlist containing the query string
:param query: The search string
:return: The list of playlist names
"""
conn = create_connection()
real_query = "%" + query.strip() + "%"
playlist = conn.execute(SELECT_PLAYLIST_BY_QUERY, (real_query,)).fetchmany()
conn.close()
return playlist
def set_rfid_to_playlist(rfid, playlist_id):
"""
Add or update the rfid value of a playlist entry
:param rfid: The rfid value to set
:param playlist_id: The database id of the playlist
"""
conn = create_connection()
cur = conn.cursor()
cur.execute(SET_RFID, (rfid, playlist_id))
conn.commit()
conn.close()
| jefure/mpd_rfid | repository.py | repository.py | py | 3,685 | python | en | code | 0 | github-code | 90 |
73562520298 | import pygame
from color import BLACK
from animation import Animation
pygame.mixer.init()
pygame.mixer.pre_init(44100, -16, 2, 512)
class Player:
def __init__(self, x, y):
# base img
self.animation = Animation()
self.animation_frames = self.animation.animation_frames
self.animation_database = self.animation.animation_database
self.img = self.animation_frames['idle_0']
self.rect = self.img.get_rect()
self.rect.x = x
self.rect.y = y
self.vel_y = 0
self.crawl= True
self.crawl_book = False
self.move = False
self.movement = [0, 0]
self.frame = 0
self.action = 'idle'
self.flip = True
self.book = None
def draw(self, dis):
if self.action == 'idle':
self.change_idle_img()
if self.action != 'idle':
self.frame += 1
if self.frame >= len(self.animation_database[self.action]):
self.frame = 0
img_id = self.animation_database[self.action][self.frame]
self.img = self.animation_frames[img_id]
self.img = pygame.transform.flip(self.img, self.flip, False)
dis.blit(self.img, (self.rect.x, self.rect.y))
def draw_rect(self, dis):
pygame.draw.rect(dis, BLACK, self.rect, 1)
def handle_key_pressed(self):
dx = 0
dy = 0
self.vel_y += 0.2
if self.vel_y > 2:
self.vel_y = 2
dy += self.vel_y
keys = pygame.key.get_pressed()
if keys[pygame.K_d] or keys[pygame.K_RIGHT]:
self.check_action()
self.flip = True
dx += 1
if keys[pygame.K_a] or keys[pygame.K_LEFT]:
self.check_action()
self.flip = False
dx -= 1
if dx == 0:
self.action = 'idle'
self.frame = 0
self.movement = [dx, dy]
def pick_up(self, book):
if self.book == None:
keys = pygame.key.get_pressed()
if (keys[pygame.K_j] or keys[pygame.K_x]) and self.crawl:
self.book = book
self.change_action("crawl", "crawl_book")
return True
return False
def put_down(self):
if self.book != None:
keys = pygame.key.get_pressed()
if (keys[pygame.K_j] or keys[pygame.K_x]) and self.crawl_book:
self.book = None
self.change_action("crawl_book", "crawl")
return True
return False
def change_action(self, old_action, new_action):
if old_action == "crawl": self.crawl = False
elif old_action == "crawl_book": self.crawl_book = False
elif old_action == "walk": self.move = False
if new_action == "crawl": self.crawl = True
elif new_action == "crawl_book": self.crawl_book = True
elif new_action == "walk": self.move = True
if new_action == "walk":
if self.flip:
self.rect.x += 3
else:
self.rect.x -= 3
self.rect.y -= 8
if new_action == "crawl_book":
self.rect.y -= 3
self.change_idle_img()
self.rect.width = self.img.get_width()
self.rect.height = self.img.get_height()
def check_action(self):
if self.crawl_book:
self.action = 'crawl_book'
if self.crawl:
self.action = 'crawl'
if self.move:
self.action = 'walk'
def change_idle_img(self):
if self.crawl:
self.img = self.animation_frames['idle_0']
elif self.move:
self.img = self.animation_frames['idle_1']
elif self.crawl_book:
self.img = self.animation_frames['idle_2']
def reset(self, x, y):
self.img = self.animation_frames['idle_0']
self.rect = self.img.get_rect()
self.rect.x = x
self.rect.y = y
self.vel_y = 0
self.crawl= True
self.crawl_book = False
self.move = False
self.movement = [0, 0]
self.frame = 0
self.action = 'idle'
self.flip = True
self.book = None | ChuDucAnh242002/Moraq_baby | player.py | player.py | py | 4,243 | python | en | code | 1 | github-code | 90 |
29161057727 | import curses
import curses.ascii
import sys
from . import kernel
from . import setting
def ctrl(c):
"""Take str/bytes and return int"""
return curses.ascii.ctrl(ord(c))
# isspace(3) isgraph(3) isprint(3)
# 0x09 '\t' True False False
# 0x0A '\n' True False False
# 0x0B '\v' True False False
# 0x0C '\f' True False False
# 0x0D '\r' True False False
# 0x20 ' ' True False True
def isspace(c):
return curses.ascii.isspace(c)
def isgraph(c):
return curses.ascii.isgraph(c)
def isprint(c):
# return True if isgraph(3) or 0x20
# this isn't same as isgraph(3) + isspace(3) see above for details
return curses.ascii.isprint(c)
def isprints(l):
return len(l) > 0 and all(isprint(x) for x in l)
def to_chr_repr(c):
if isprint(c):
if isinstance(c, int):
return chr(c)
else:
return c
else:
return '.'
def iter_kbd_name():
yield "TAB"
yield "ENTER"
yield "ESCAPE"
yield "SPACE"
yield "DOWN"
yield "UP"
yield "LEFT"
yield "RIGHT"
yield "BACKSPACE"
yield "BACKSPACE2" # FIX_ME added for FreeBSD
yield "DELETE"
yield "RESIZE"
def get_code(term, use_stdout):
if use_stdout:
return curses.ascii.TAB, \
curses.ascii.LF, \
curses.ascii.ESC, \
curses.ascii.SP, \
curses.KEY_DOWN, \
curses.KEY_UP, \
curses.KEY_LEFT, \
curses.KEY_RIGHT, \
curses.KEY_BACKSPACE, \
curses.ascii.DEL, \
curses.KEY_DC, \
KEY_DEAD(0x100),
elif term.startswith("vt"):
return curses.ascii.TAB, \
curses.ascii.LF, \
curses.ascii.ESC, \
curses.ascii.SP, \
curses.KEY_DOWN, \
curses.KEY_UP, \
curses.KEY_LEFT, \
curses.KEY_RIGHT, \
curses.ascii.BS, \
KEY_DEAD(0x100), \
curses.ascii.DEL, \
curses.KEY_RESIZE,
else:
return curses.ascii.TAB, \
curses.ascii.LF, \
curses.ascii.ESC, \
curses.ascii.SP, \
curses.KEY_DOWN, \
curses.KEY_UP, \
curses.KEY_LEFT, \
curses.KEY_RIGHT, \
curses.KEY_BACKSPACE, \
curses.ascii.DEL, \
curses.KEY_DC, \
curses.KEY_RESIZE,
def KEY_DEAD(x):
return DEAD | (x << 16)
ERROR = curses.ERR
DEAD = 0xDEAD
CONTINUE = KEY_DEAD(0)
INTERRUPT = KEY_DEAD(1)
QUIT = KEY_DEAD(2)
TAB = KEY_DEAD(3)
ENTER = KEY_DEAD(4)
ESCAPE = KEY_DEAD(5)
SPACE = KEY_DEAD(6)
DOWN = KEY_DEAD(7)
UP = KEY_DEAD(8)
LEFT = KEY_DEAD(9)
RIGHT = KEY_DEAD(10)
BACKSPACE = KEY_DEAD(11)
BACKSPACE2 = KEY_DEAD(12)
DELETE = KEY_DEAD(13)
RESIZE = KEY_DEAD(14)
def get_backspaces():
return BACKSPACE, BACKSPACE2
def get_arrows():
return DOWN, UP, LEFT, RIGHT
def init(term):
l = get_code(term, setting.use_stdout)
for i, s in enumerate(iter_kbd_name()):
config = getattr(setting, "key_" + s.lower())
if config is not None:
setattr(this, s, config)
else:
setattr(this, s, l[i])
this = sys.modules[__name__]
init(kernel.get_term_info())
| woutershep/fileobj | src/kbd.py | kbd.py | py | 3,512 | python | en | code | null | github-code | 90 |
31836566619 | # PERSEGI
def hitung_luas_persegi(): # membuat fungsi
sisi1 = float(input("Masukkan panjang persegi: ")) # input sisi dengan tipe data float
sisi2 = float(input("Masukkan lebar persegi: ")) # input sisi dengan tipe data float
luas = sisi1 * sisi2 # menggunakan rumus persergi
return luas # mengembalikan nilai untuk ditampung sementara
luas_persegi = hitung_luas_persegi() # memebuat tempat untuk hasil dari fungsi
print("Luas persegi adalah:", luas_persegi) # mencetak hasil
# LINGKARAN
def hitung_luas_lingkaran():
jari = float(input("Masukkan jari jari lingkaran: "))
luas = 3.14 * jari * jari
return luas
luas_lingkaran = hitung_luas_lingkaran()
print("luas lingkaran adalah:", luas_lingkaran)
# SEGITIGA
def hitung_luas_segitiga():
a = float(input("masukan alas segitiga:"))
t = float(input("masukan tinggi segitiga: "))
luas = 0.5 * a * t
return luas
luas_segitiga = hitung_luas_segitiga()
print('luas segitiga adalah', luas_segitiga)
# BELAH KETUPAT
def hitung_luas_ketupat():
d1 = float(input("masukan d1:"))
d2 = float(input("masukan d2: "))
luas = 0.5 * d1 * d2
return luas
luas_ketupat = hitung_luas_ketupat()
print('luas segitiga adalah', luas_ketupat)
# untuk tipe data bisa disesuaikan karena disini saya pakai float agar biar dapat memuat angka pecahan agar hasil lebih akurat | rizqy6/PTTHON | UTS/soal1.py | soal1.py | py | 1,363 | python | id | code | 0 | github-code | 90 |
18497927399 | H,W = map(int,input().split())
a =[list(map(int,input().split())) for i in range(H)]
serching_pair = False
pair_count = 0
ans = []
#蛇行運転しながら奇数ペアを見つけるたびに出力
for i in range(H):
if i%2 == 0:#左から右
for j in range(0,W,1):
if a[i][j]%2==1:
if serching_pair:
ans[pair_count].append([i+1,j+1])
pair_count+=1
serching_pair=False
else:
ans.append([[i+1,j+1]])
serching_pair=True
else:
if serching_pair:
ans[pair_count].append([i+1,j+1])
else:#右から左
for j in range(W-1,-1,-1):
if a[i][j]%2==1:
if serching_pair:
ans[pair_count].append([i+1,j+1])
pair_count+=1
serching_pair=False
else:
ans.append([[i+1,j+1]])
serching_pair=True
else:
if serching_pair:
ans[pair_count].append([i+1,j+1])
ans_size=0
for i in range(0,pair_count,1):
ans_size+=len(ans[i])-1
print(ans_size)
for i in range(0,pair_count,1):
for j in range(1,len(ans[i])):
print(" ".join(list(map(str,ans[i][j-1]+ans[i][j]))))
| Aasthaengg/IBMdataset | Python_codes/p03263/s394612166.py | s394612166.py | py | 1,364 | python | en | code | 0 | github-code | 90 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.