blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 281 | content_id stringlengths 40 40 | detected_licenses listlengths 0 57 | license_type stringclasses 2 values | repo_name stringlengths 6 116 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 313 values | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 18.2k 668M ⌀ | star_events_count int64 0 102k | fork_events_count int64 0 38.2k | gha_license_id stringclasses 17 values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 107 values | src_encoding stringclasses 20 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.02M | extension stringclasses 78 values | content stringlengths 2 6.02M | authors listlengths 1 1 | author stringlengths 0 175 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
0bfc60a9230cbb555e1d836fbbbe0a5073ce8d3a | 6e3e56c6be905d0e2d833332dd9bcd4b7b86f642 | /opticspy/ray_tracing/tests/test9_matrix.py | e4636739874a8500847d02c8f728c55856dbea6f | [
"MIT"
] | permissive | carmelom/opticspy | 91bc66ba5d858fef906050910753f2c573ec6db6 | f6fe254d0e84551a7c3379bbbe357db2469b26fe | refs/heads/master | 2020-04-07T14:50:43.460533 | 2018-11-28T01:19:35 | 2018-11-28T01:19:35 | 158,463,569 | 1 | 0 | MIT | 2018-11-20T23:20:15 | 2018-11-20T23:20:15 | null | UTF-8 | Python | false | false | 1,247 | py |
import lens, trace, glass
New_Lens = lens.Lens(lens_name='triplet',creator='XF')
New_Lens.lens_info()
New_Lens.add_wavelength(wl = 656.30)
New_Lens.add_wavelength(wl = 546.10)
New_Lens.add_wavelength(wl = 486.10)
New_Lens.list_wavelengths()
New_Lens.add_field(angle=0)
New_Lens.add_field(angle=7)
New_Lens.add_field(angle=10)
New_Lens.list_fields()
New_Lens.add_surface(number=1,radius=10000000,thickness=1000000,glass='air')
New_Lens.add_surface(number=2,radius=41.15909,thickness=6.097555 ,glass='BSM18_OHARA')
New_Lens.add_surface(number=3,radius=-957.83146,thickness=9.349584,glass='air')
New_Lens.add_surface(number=4,radius=-51.32104,thickness=2.032518,glass='PBM22_OHARA')
New_Lens.add_surface(number=5,radius=42.37768 ,thickness=5.995929 ,glass='air')
New_Lens.add_surface(number=6,radius=10000000,thickness=4.065037,glass='air',STO=True)
New_Lens.add_surface(number=7,radius=247.44562,thickness=6.097555,glass='BSM18_OHARA')
New_Lens.add_surface(number=8,radius=-40.04016,thickness=85.593426,glass='air')
New_Lens.add_surface(number=9,radius=10000000,thickness=0,glass='air')
#trace.trace_sys(New_Lens)
New_Lens.EFL()
New_Lens.BFL()
New_Lens.OAL(2,9)
New_Lens.image_position()
New_Lens.EP()
New_Lens.EX()
print New_Lens.EP_position | [
"alex.fanxing@gmail.com"
] | alex.fanxing@gmail.com |
f88d29fa7dbc679c1e2ad1d8c39b16ea74e26308 | 69692e30da9cf48e51011a330cb9a4523e14be7c | /flappy.py | 99cbac4490859d363122c0c70aa72390bda4a3ae | [] | no_license | pranav-ml/Flappy_AI_NEAT | 39396cc244ee3bb1eb719ff0aecec58dacde8b95 | 2c27f011dc604d8f1d5b9e693a7126878e384cbf | refs/heads/main | 2023-06-17T00:06:13.627857 | 2021-07-08T07:15:59 | 2021-07-08T07:15:59 | 359,089,343 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,656 | py | import neat
import pygame
import random
import os
pygame.init()
win_width = 500
win_height = 700
birdimg = [pygame.transform.scale2x(pygame.image.load(os.path.join("imgs", "bird1.png"))),
pygame.transform.scale2x(pygame.image.load(os.path.join("imgs", "bird2.png"))),
pygame.transform.scale2x(pygame.image.load(os.path.join("imgs", "bird3.png")))]
pipeimg = pygame.transform.scale2x(pygame.image.load(os.path.join("imgs", "pipe.png")))
baseimg = pygame.transform.scale2x(pygame.image.load(os.path.join("imgs", "base.png")))
bgimg = pygame.transform.scale2x(pygame.image.load(os.path.join("imgs", "bg.png")))
win = pygame.display.set_mode((win_width, win_height))
sfont = pygame.font.SysFont("comicsans", 50, True)
gen = 0
class Bird:
max_rotation = 25
animation = 5
rot_vel = 20
def __init__(self, x, y):
self.x = x
self.y = y
self.tilt = 0
self.vel = 0
self.tick_count = 0
self.img = birdimg[0]
self.img_count = 0
self.d = 0
def jump(self):
self.vel = -10.5
self.tick_count = 0
self.height = self.y
def move(self):
self.tick_count += 1
self.d = self.vel * self.tick_count + 1.5 * self.tick_count ** 2
if self.d >= 16:
self.d = 16
self.y += self.d
if self.d < 0:
self.tilt = self.max_rotation
else:
if self.tilt > -90:
self.tilt -= self.rot_vel
else:
self.tilt = -90
def draw(self):
if self.img_count >= 12:
self.img_count = 0
if self.d < 0:
self.img = birdimg[self.img_count // 6]
else:
self.img = birdimg[1]
rotated_img = pygame.transform.rotate(self.img, self.tilt)
new_rect = rotated_img.get_rect(center=self.img.get_rect(topleft=(int(self.x), int(self.y))).center)
win.blit(rotated_img, new_rect.topleft)
def get_mask(self):
return pygame.mask.from_surface(self.img)
class Base:
VEL = 5
WIDTH = baseimg.get_width()
IMG = baseimg
def __init__(self, y):
self.y = y
self.x1 = 0
self.x2 = self.WIDTH
def move(self):
self.x1 -= self.VEL
self.x2 -= self.VEL
if self.x1 + self.WIDTH < 0:
self.x1 = self.x2 + self.WIDTH
if self.x2 + self.WIDTH < 0:
self.x2 = self.x1 + self.WIDTH
def draw(self):
win.blit(self.IMG, (self.x1, self.y))
win.blit(self.IMG, (self.x2, self.y))
class Pipe():
def __init__(self, x):
self.x = x
self.vel = 5
self.gap = 200
self.height = 0
self.ptop = pygame.transform.flip(pipeimg, False, True)
self.pbottom = pipeimg
self.top = 0
self.bottom = 0
self.passed = False
self.set_height()
def set_height(self):
self.height = random.randrange(50, 430)
self.top = self.height - self.ptop.get_height()
self.bottom = self.height + self.gap
def move(self):
self.x -= self.vel
def draw(self):
win.blit(self.ptop, (self.x, self.top))
win.blit(self.pbottom, (self.x, self.bottom))
def collide(self, bird):
bird_mask = bird.get_mask()
top_mask = pygame.mask.from_surface(self.ptop)
bottom_mask = pygame.mask.from_surface(self.pbottom)
top_offset = (self.x - bird.x, self.top - round(bird.y))
bottom_offset = (self.x - bird.x, self.bottom - round(bird.y))
b_point = bird_mask.overlap(bottom_mask, bottom_offset)
t_point = bird_mask.overlap(top_mask, top_offset)
if b_point or t_point:
return True
else:
return False
def redrawWin(pipes, birds, base):
win.blit(bgimg, (0, -100))
base.draw()
for pipe in pipes:
pipe.draw()
for bird in birds:
bird.draw()
text = sfont.render("Score:" + str(score), 1, (255, 255, 255))
win.blit(text, (20, 20))
text = sfont.render("Gen:" + str(gen), 1, (255, 255, 255))
win.blit(text, (350, 20))
text = sfont.render("Alive:" + str(len(birds)), 1, (255, 255, 255))
win.blit(text, (20, 60))
pygame.display.update()
def main(genomes, config):
space = 0
clock = pygame.time.Clock()
global score
score = 0
global gen
gen += 1
base = Base(630)
pipes = [Pipe(700)]
run = True
birds = []
nets = []
ge = []
for l, g in genomes:
net = neat.nn.FeedForwardNetwork.create(g, config)
nets.append(net)
birds.append(Bird(100, 200))
g.fitness = 0
ge.append(g)
while run:
clock.tick(30)
pipe_ind = 0
if len(birds) > 0:
if len(pipes) > 0 and birds[0].x > pipes[0].x + pipes[0].ptop.get_width():
pipe_ind = 1
else:
break
for x, bird in enumerate(birds):
bird.move()
bird.img_count += 1
ge[x].fitness += 0.1
output = nets[x].activate(
(bird.y, abs(bird.y - pipes[pipe_ind].height), abs(bird.y - pipes[pipe_ind].bottom)))
if output[0] <0.5:
bird.jump()
else:
pass
base.move()
for pipe in pipes:
for x, bird in enumerate(birds):
if pipe.collide(bird):
ge[x].fitness -= 5
birds.remove(bird)
nets.pop(x)
ge.pop(x)
pipe.move()
if pipe.x == 0:
pipes.append(Pipe(500))
score += 1
for g in ge:
g.fitness += 10
if pipe.x + pipe.ptop.get_width() < 0:
pipes.remove(pipe)
for x, bird in enumerate(birds):
if bird.y >= 560 or bird.y < 0:
birds.remove(bird)
nets.pop(x)
ge.pop(x)
for event in pygame.event.get():
if event.type == pygame.QUIT:
run = False
pygame.quit()
exit()
redrawWin(pipes, birds, base)
def run(config_file):
config = neat.config.Config(neat.DefaultGenome, neat.DefaultReproduction,
neat.DefaultSpeciesSet, neat.DefaultStagnation,
config_file)
p = neat.Population(config)
p.run(main, 100)
if __name__ == '__main__':
local_dir = os.path.dirname(__file__)
config_path = os.path.join(local_dir, 'config-feedforward.txt')
run(config_path)
| [
"52795266+pranav882655@users.noreply.github.com"
] | 52795266+pranav882655@users.noreply.github.com |
7a8744fcccb0be5a4a7b881d554988f0f7ca965b | 78099e90f5224623f58db79e6e1f91fe8d44a0b3 | /arbitary.py | afac986c5627aab6e1273bf9dce016e055d23e91 | [] | no_license | savitadevi/function | 8151d6e79657d086f9c095f94cd0706e12cbea8f | 4d521dc546fd8c9b037cb78185e69a2bfbf6aa06 | refs/heads/main | 2023-06-14T14:13:15.493256 | 2021-07-13T06:55:23 | 2021-07-13T06:55:23 | 382,122,599 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 247 | py | def student_name(*names):
# for student_name in names:
print("hi",names)
student_name("savita","amla","anisha","suman")
def isgraterthen_20(a,b=20):
print(a,b)
isgraterthen_20(10)
student_name("savita","amla","anisha","suman") | [
"savitadevi20@navgurukul.org"
] | savitadevi20@navgurukul.org |
38a73f29cb633dc9396633c72607a3415fa7ed7e | 0640c53cd62def556a098f1e732deee8c1348c9e | /IIITSERC-ssad_2015_a3_group1-88a823ccd2d0/Abhishek Vinjamoori/DonkeyKongFinal/src/player.py | c284e27402935cc64dd01b598004dd4b0546ffa9 | [] | no_license | anirudhdahiya9/Open-data-projecy | 579867fe8716076819734cebdbc6e15bb471bb39 | 26d629f8348f0110fa84b02009e787a238aff441 | refs/heads/master | 2021-01-10T13:50:19.855983 | 2016-03-23T22:46:03 | 2016-03-23T22:46:03 | 54,598,189 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 689 | py | import pygame
size=16
class player(person):
def __init__(self,name,initX,initY):
person.__init__(name,initX,initY)
self.marioright =pygame.image.load('../images/m1.png')
self.marioright=pygame.transform.scale(self.marioright,(size,size))
self.mariowalkright=pygame.image.load('../images/m2.png')
self.mariowalkright=pygame.transform.scale(self.mariowalkright,(size,size))
self.mariowalkleft=pygame.transform.scale(self.mariowalkright,(size,size))
self.mariowalkleft=pygame.transform.flip(mariowalkright,1,0)
self.marioleft=pygame.transform.scale(self.marioright,(size,size))
self.marioleft=pygame.transform.flip(marioleft,1,0)
self.mario=self.marioright
| [
"anirudhdahiya9@gmail.com"
] | anirudhdahiya9@gmail.com |
64bdac7133fc0b26f17c3e74ef60a624dea2bf9a | f4b79529109fbb4055f334d0d9c7c96cb0710447 | /colour/utilities/tests/test_deprecated.py | bfbb70c00812c9381bc5e3c93242eec9a75ef368 | [
"BSD-3-Clause"
] | permissive | trevorandersen/colour | 167381b3d03e506a270a8d2a519a164808995437 | 02b595b26313c4b4f55adc41d599f90c4c9edbcd | refs/heads/develop | 2021-07-15T04:48:19.585586 | 2021-01-23T23:51:44 | 2021-01-23T23:51:44 | 230,421,054 | 0 | 0 | BSD-3-Clause | 2019-12-28T12:54:20 | 2019-12-27T10:10:30 | null | UTF-8 | Python | false | false | 962 | py | # -*- coding: utf-8 -*-
import sys
from colour.utilities.deprecation import (ModuleAPI, ObjectRenamed,
ObjectRemoved)
class deprecated(ModuleAPI):
def __getattr__(self, attribute):
return super(deprecated, self).__getattr__(attribute)
NAME = None
"""
An non-deprecated module attribute.
NAME : object
"""
NEW_NAME = None
"""
A module attribute with a new name.
NAME : object
"""
sys.modules['colour.utilities.tests.test_deprecated'] = (deprecated(
sys.modules['colour.utilities.tests.test_deprecated'], {
'OLD_NAME':
ObjectRenamed(
name='colour.utilities.tests.test_deprecated.OLD_NAME',
new_name='colour.utilities.tests.test_deprecated.NEW_NAME'),
'REMOVED':
ObjectRemoved(name='colour.utilities.tests.test_deprecated.REMOVED'
)
}))
del ModuleAPI
del ObjectRenamed
del ObjectRemoved
del sys
| [
"thomas.mansencal@gmail.com"
] | thomas.mansencal@gmail.com |
75a9cfc273e95b139b790cb77661c83a42b47977 | 363cbc43dad2f8a3cb08ca343b95c1d08f6c60c6 | /analysis/work/macros/dep/pyapp.py | 0ef566d048bb0050c6080083b995f20860b7e4dc | [] | no_license | DebabrataBhowmik/MonoHiggsToGG | 45af4363928b48d51ae50210a18a1bb19eb909c5 | b22984b06d3b1f767dcf15796c66f07581bf39f0 | refs/heads/master | 2022-01-26T02:30:39.866888 | 2019-04-02T09:50:26 | 2019-04-02T09:50:26 | 179,046,566 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,027 | py | from optparse import OptionParser, OptionGroup, make_option
import sys
import os
# -------------------------------------------------------------
class pyapp(object):
def __init__(self,option_list,option_groups=[],defaults=None):
self.objs_ = []
self.canvs_ = []
self.files_ = {}
# generic options
opt_list = [
make_option("-I","--interactive",
action="store_true",dest="interactive",
default=sys.flags.interactive,help="default: %default",metavar=""),
make_option("-O","--outdir",
action="store",dest="outdir",type="string",
default="",help="default: %default"),
make_option("-S","--save",
action="store_true",dest="save",
default=False,help="default: %default"),
#make_option("--saveas",
# action="callback",dest="saveas",type="string",callback=ScratchAppen(),
# default=["png","root"], help="default: %default", metavar=""),
make_option("-v","--verbose",
action="store_true",dest="verbose",
default=False,help="default: %default"),
] # end opt_list
parser = OptionParser("usage: %prog [options]")
opt_groups = [ ("PyApp Options", opt_list ) ] + option_groups
if len(option_list) > 0:
opt_groups.append( ("User Options", option_list) )
for name,opts in opt_groups:
gr = OptionGroup(parser,name)
for opt in opts:
gr.add_option(opt)
parser.add_option_group(gr)
(self.options, self.args) = parser.parse_args()
# make output directory
if self.options.outdir:
if os.path.isdir(self.options.outdir):
print("Will write to Output Directory: %s" %self.options.outdir)
else:
print("Making Output Directory: %s" %self.options.outdir)
os.mkdir(self.options.outdir)
global ROOT
import ROOT
def run(self):
self.__call__(self.options,self.args)
def save(self,clear=False):
for c in self.canvs_:
if not c: continue
c.Modified()
for fmt in self.options.saveas:
c.SaveAs("%s/%s.%s" %( self.options.outdir, c.GetName(), fmt ))
if clear:
for c in self.canvs_:
del c
self.canvs_ = []
# def keep(self,objs,format=False):
# if type(objs) == list:
# for obj in objs:
# self.keep(obj,format)
# return
# try:
# if objs.IsA().InheritsFrom("TCanvas"):
# self.canvs_.append(objs)
# else:
# self.objs_.append(objs)
# except:
# self.objs_.append(objs)
# try:
# if objs.IsA().InheritsFrom("TFile"):
# key = "%s::%s" %( os.path.abspath(objs.GetName()), self.normalizeTFileOptions(objs.GetOption()))
# self.files_[key] = objs
# except:
# pass
# if format:
# self.format(objs,self.options.styles)
#
# def format(self,objs,styles):
# for key,st in styles.iteritems():
# if fnmatch(objs.GetName(),key) or objs.GetName() == key:
| [
"mez34@cornell.edu"
] | mez34@cornell.edu |
14b4f6dcbd92af3bd50f8ccf81dff0b95e561081 | 089a6215e04433d95e4f8af78130f79b504e94b9 | /marketing/migrations/0007_background.py | 528395ae9a0d82a41d7523f942c26d32670984f3 | [] | no_license | Bulalu/Blog | d32e3bdb162b023afeebe32e878a1d08c701098b | 80911e3523b8e6ae84f94c55b633d1e1405ed292 | refs/heads/master | 2023-06-18T01:13:42.088370 | 2021-07-13T18:52:12 | 2021-07-13T18:52:12 | 369,866,555 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 619 | py | # Generated by Django 3.2 on 2021-06-01 15:36
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('marketing', '0006_remove_gallery_background'),
]
operations = [
migrations.CreateModel(
name='Background',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('picture', models.ImageField(blank=True, null=True, upload_to='')),
('featured', models.BooleanField(default=False)),
],
),
]
| [
"elishabulalu031@gmail.com"
] | elishabulalu031@gmail.com |
b7712591863fbb072cbbe65ce7675a1ae332eda4 | 7a09f8c752dec3e5afca50490914fdc8187113eb | /citrine-client/citrine_client/api/api.py | 4366bbfccdd2492d228ac37135b0b301e3bd5d1c | [] | no_license | antonpaquin/citrine | b6ae1075519b90b811d576d3ae4dcc3397c97eb3 | 47101592560b4cfa63bd3448430962829e59c3ba | refs/heads/master | 2022-11-28T08:16:57.094007 | 2020-07-16T00:51:44 | 2020-07-16T00:51:44 | 270,427,816 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,623 | py | import json
import functools
from typing import *
import requests
import citrine_client.errors as errors
import citrine_client.api.util as util
from citrine_client.server import SyncRequest, AsyncRequest, DaemonLink
__all__ = [
'PackageClient',
'CitrineClient',
]
class PackageClient(object):
def __init__(self, host: str, port: int, autocancel: bool = True, async_: bool = False):
self.server = DaemonLink(host=host, port=port)
self.autocancel = autocancel
self.async_ = async_
if async_:
self.Request = functools.partial(AsyncRequest, cancel=autocancel)
else:
self.Request = SyncRequest
def install(
self,
name: Optional[str] = None,
specfile: Optional[str] = None,
localfile: Optional[str] = None,
url: Optional[str] = None,
package_hash: Optional[str] = None,
progress_callback: Optional[Callable[[Dict], None]] = None,
) -> Dict:
request_data = util.package_install_params(name, specfile, localfile, url, package_hash)
req = self.Request(
server=self.server,
endpoint='/package/install',
**request_data,
)
if self.async_:
return req.run(callback=progress_callback) # daijoubu desu
else:
return req.run()
def fetch(
self,
name: Optional[str] = None,
specfile: Optional[str] = None,
localfile: Optional[str] = None,
url: Optional[str] = None,
package_hash: Optional[str] = None,
progress_callback: Optional[Callable[[Dict], None]] = None,
) -> Dict:
request_data = util.package_install_params(name, specfile, localfile, url, package_hash)
req = self.Request(
server=self.server,
endpoint='/package/install',
**request_data,
)
if self.async_:
return req.run(callback=progress_callback)
else:
return req.run()
def activate(
self,
name: str,
version: Optional[str],
progress_callback: Optional[Callable[[Dict], None]] = None,
):
req = self.Request(
server=self.server,
endpoint='/package/activate',
jsn={'name': name, 'version': version},
)
if self.async_:
return req.run(callback=progress_callback)
else:
return req.run()
def deactivate(
self,
name: str,
version: Optional[str],
progress_callback: Optional[Callable[[Dict], None]] = None,
):
req = self.Request(
server=self.server,
endpoint='/package/deactivate',
jsn={'name': name, 'version': version},
)
if self.async_:
return req.run(callback=progress_callback)
else:
return req.run()
def remove(
self,
name: str,
version: Optional[str],
progress_callback: Optional[Callable[[Dict], None]] = None,
):
req = self.Request(
server=self.server,
endpoint='/package/remove',
jsn={'name': name, 'version': version},
)
if self.async_:
return req.run(callback=progress_callback)
else:
return req.run()
def list(
self,
progress_callback: Optional[Callable[[Dict], None]] = None,
):
req = self.Request(
server=self.server,
endpoint='/package/list',
method='get',
)
if self.async_:
return req.run(callback=progress_callback)
else:
return req.run()
def search(
self,
query: str,
progress_callback: Optional[Callable[[Dict], None]] = None,
):
req = self.Request(
server=self.server,
endpoint='/package/search',
jsn={'query': query},
)
if self.async_:
return req.run(callback=progress_callback)
else:
return req.run()
class CitrineClient(object):
# Synchronous consumer of the asynchronous API
def __init__(self, host: str, port: int, autocancel: bool = True, async_: bool = False):
self.server = DaemonLink(host=host, port=port)
self.package = PackageClient(host=host, port=port, autocancel=autocancel, async_=async_)
self.autocancel = autocancel
self.async_ = async_
if async_:
self.Request = functools.partial(AsyncRequest, cancel=autocancel)
else:
self.Request = SyncRequest
def heartbeat(self) -> Dict:
url = f'http://{self.server.host}:{self.server.port}/'
resp = util.wrap_request(requests.get, url, timeout=10)
try:
return json.loads(resp.decode('utf-8'))
except json.JSONDecodeError:
raise errors.InvalidResponse('Server response was not JSON', data={'response': r.content.decode('utf-8')})
def run(
self,
target: str,
params: Dict = None,
progress_callback: Optional[Callable[[Dict], None]] = None,
) -> Dict:
if not params:
params = {}
req = self.Request(
server=self.server,
endpoint=f'/run/{target}',
jsn=params,
)
if self.async_:
return req.run(callback=progress_callback)
else:
return req.run()
def _run(
self,
target_package: str,
target_model: str,
params: Dict = None,
progress_callback: Optional[Callable[[Dict], None]] = None,
) -> Dict:
# TODO: _run wants numpy arrays
# Consider enforcing / coercing params to {str: np.ndarray}
# Which might have implications for serialization (tensor protobuf?)
if not params:
params = {}
req = self.Request(
server=self.server,
endpoint=f'/_run/{target_package}/{target_model}',
jsn=params,
)
if self.async_:
return req.run(callback=progress_callback)
else:
return req.run()
def result(
self,
result_hash: str
) -> bytes:
url = f'http://{self.server.host}:{self.server.port}/result/{result_hash}'
resp = util.wrap_request(requests.get, url, timeout=10)
return resp
| [
"git@antonpaqu.in"
] | git@antonpaqu.in |
54390514accfef3d5b46666a5f12457557475660 | b75918b2ac1dfaf2c1219f40d63004900c9338b1 | /app/main.py | bb88fce0c41afd8a113ef6dc5777bfc4d1d5a774 | [] | no_license | solashirai/ExplainableCourseRecommender | e0f036da9814a0187daa5635da0ff2f86386026d | 6a2795cfc4536548ac3679b3d23b953e55a50a37 | refs/heads/main | 2023-04-14T14:27:36.054830 | 2021-04-19T02:29:48 | 2021-04-19T02:29:48 | 302,346,189 | 1 | 0 | null | 2021-04-18T16:13:48 | 2020-10-08T13:17:44 | Python | UTF-8 | Python | false | false | 4,531 | py | from flask import Flask, request, abort
import rdflib
from escore.pipeline import RecommendCoursesPipeline
from escore.services.course import GraphCourseQueryService
from escore.services import PlanOfStudyRecommenderService
from escore.utils.path import DATA_DIR
from escore.models import StudentPOSRequirementContext, CourseCandidate, Student, PlanOfStudy
from typing import Tuple
from frex.stores import LocalGraph
app = Flask(__name__)
# for testing locally
kg_files = tuple((DATA_DIR / file).resolve() for file in [
"courses.ttl",
"scheduled_courses.ttl",
"rpi_departments.ttl",
"parsed_grad_requirements.ttl",
"users.ttl",
])
COURSEKG_GRAPH = LocalGraph(file_paths=kg_files)
# COURSEKG_GRAPH = RemoteGraph(
# sparql_endpoint="?"
# )
COURSE_QS = GraphCourseQueryService(queryable=COURSEKG_GRAPH)
PLACEHOLDER_PIPE = RecommendCoursesPipeline(course_query_service=COURSE_QS)
PR_SERVICE = PlanOfStudyRecommenderService(
course_query_service=COURSE_QS
)
@app.route("/escore_api/")
def hello_world():
return "Hello, World!"
@app.route("/escore_api/dummy_get_rec", methods=["GET"])
def dummy_recommend_courses():
args = request.args
# dummy plan of study and student to test
pos = PlanOfStudy(
uri=rdflib.URIRef('placeholder_pos1'),
class_year=2022,
planned_major=None,
planned_degree=None,
completed_courses=frozenset(),
completed_course_sections=frozenset(),
ongoing_course_sections=frozenset(),
planned_courses=frozenset()
)
student = Student(
uri=rdflib.URIRef('placeholder_stud1'),
study_plan=pos,
name='john doe',
class_year=2022,
topics_of_interest=frozenset(),
registered_courses=frozenset(),
advisor=None,
)
context = StudentPOSRequirementContext(student=student, plan_of_study=pos,
requirements=frozenset(COURSE_QS.get_all_requirements()))
rec_courses: Tuple[CourseCandidate, ...] = PLACEHOLDER_PIPE(context=context)
app.logger.info(f'retrieved recommended courses.')
rec_course_codes = [rc.domain_object.course_code.name for rc in rec_courses]
return {'recommend_course_codes': rec_course_codes}
@app.route("/escore_api/get_recommended_courses_for_student", methods=["GET"])
def get_course_recommendation_for_student():
args = request.args
#https%3A%2F%2Ftw.rpi.edu%2Fontology-engineering%2Foe2020%2Fcourse-recommender-individuals%2Fusrowen
student_uri = rdflib.URIRef(args["student_uri"])
student = COURSE_QS.get_student_by_uri(student_uri=student_uri)
print(f'got student {student.name}')
# will plan of study be saved somehow...? or have person input and pass it via this method...?
# assuming POS will have some structure... ignoring for now since it's not properly used anyways
pos = args.get('plan_of_study', None)
if pos is None:
pos = student.study_plan
print(f'got student plan of study')
context = StudentPOSRequirementContext(student=student, plan_of_study=pos,
requirements=frozenset(COURSE_QS.get_all_requirements()))
rec_courses: Tuple[CourseCandidate, ...] = PLACEHOLDER_PIPE(context=context)
app.logger.info(f'retrieved recommended courses.')
rec_course_codes = [rc.domain_object.course_code.name for rc in rec_courses]
return {'recommend_course_codes': rec_course_codes}
#
# except NotFoundException as e:
# abort(404, description=e)
# except MalformedContentException as e:
# abort(500, description=e)
@app.route("/escore_api/get_pos_rec_for_student", methods=["GET"])
def get_pos_recommendation_for_student():
args = request.args
# ?student_uri=https%3A%2F%2Ftw.rpi.edu%2Fontology-engineering%2Foe2020%2Fcourse-recommender-individuals%2Fusrowen
student_uri = rdflib.URIRef(args["student_uri"])
student = COURSE_QS.get_student_by_uri(student_uri=student_uri)
print(f'got student {student.name}')
pos_rec = PR_SERVICE.get_pos_recommendation_for_target_student(student=student)
rec_sem_courses = {f'{sec.section_object.term} {sec.section_object.year} semester': [cand.domain_object.name
for cand in sec.section_candidates]
for sec in pos_rec.solution_section_sets[1].sections}
return {'recommend_course_per_semester': rec_sem_courses}
if __name__ == '__main__':
app.run(host='0.0.0.0', port=5000)
| [
"solashakashirai@gmail.com"
] | solashakashirai@gmail.com |
dc70e874342123a38005f05ad3a80c1ee0045ec1 | 2c69245fa6b65affaa40755785504df4c12dd3b5 | /phraser/tools/fix_header_guards.py | c4547145f126923db909ff79524774271c76e916 | [
"MIT"
] | permissive | knighton/phraser | 1b711a20193e4722e50d41e0ea11c69dca7bfcef | a4b213260cd9b24fb3052973a1268c021f965ce8 | refs/heads/master | 2021-01-17T09:04:22.561009 | 2016-04-01T21:32:10 | 2016-04-01T21:32:10 | 34,379,599 | 1 | 2 | null | 2016-04-04T21:12:41 | 2015-04-22T08:51:15 | C++ | UTF-8 | Python | false | false | 1,193 | py | #!/usr/bin/python
#
# Fix each .h header guard (useful after moving files around).
import os
DOIT = True
def each_header(root_dir):
for root, dirs, files in os.walk(root_dir):
for name in files:
if name.endswith('.h'):
f = os.path.join(root, name)
yield f
def header_guard_from_file_name(f):
if f.startswith('./'):
f = f[2:]
return f.replace('/', '_').replace('.h', '_H_').upper()
def fix_header_guards(root_dir):
for fn in each_header(root_dir):
new_header = header_guard_from_file_name(fn)
text = open(fn).read()
ss = text.split()
try:
assert ss[0] == '#ifndef'
except:
print 'WTF:', fn
assert False
old_header = ss[1]
if old_header != new_header:
if DOIT:
open(fn, 'wb').write(text.replace(old_header, new_header))
else:
print 'Would change a header:'
print ' file:', fn
print ' old: ', old_header
print ' new: ', new_header
def main():
fix_header_guards('.')
if __name__ == '__main__':
main()
| [
"iamknighton@gmail.com"
] | iamknighton@gmail.com |
49a24167225575397b1f04c898fcb7f418c33265 | fbb652932457761afb2f3a1a86a6d823ed459416 | /prep_HSV.py | cb2606311b66da31fa759e8b276a5abf6b2e8c78 | [] | no_license | grace-hansen/hic | 0187899a95d3a32410997e5dbd2af7e532928986 | 89143f421f660ebee3dbba9ebfa42c883e5e9202 | refs/heads/main | 2023-01-08T13:46:37.346580 | 2020-10-30T20:59:15 | 2020-10-30T20:59:15 | 308,743,317 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,538 | py | #!/usr/bin/python
import gzip, subprocess
import argparse
import pandas as pd
parser = argparse.ArgumentParser()
parser.add_argument("dir", help="full path to HSV directory you're analyzing. File 'IDs' should be there, with paths to 3 ibeds to process")
args=parser.parse_args()
##################################
dir=args.dir
IDs=open("%s/IDs"%dir,'r').readlines()
IDs=[i.strip() for i in IDs]
################## Remove interactions > 1MB and trans interactions ##################
ibed1=pd.read_csv(IDs[0],sep='\t')
ibed1=ibed1[ibed1['bait_chr']==ibed1['otherEnd_chr']]
ibed1=ibed1[abs(ibed1['bait_start']-ibed1['otherEnd_start'])<1000000]
ibed1=ibed1.rename(columns={"N_reads": "nreads1", "score": "score1"})
ibed2=pd.read_csv(IDs[1],sep='\t')
ibed2=ibed2[ibed2['bait_chr']==ibed2['otherEnd_chr']]
ibed2=ibed2[abs(ibed2['bait_start']-ibed2['otherEnd_start'])<1000000]
ibed2=ibed2.rename(columns={"N_reads": "nreads2", "score": "score2"})
ibed3=pd.read_csv(IDs[2],sep='\t')
ibed3=ibed3[ibed3['bait_chr']==ibed3['otherEnd_chr']]
ibed3=ibed3[abs(ibed3['bait_start']-ibed3['otherEnd_start'])<1000000]
ibed3=ibed3.rename(columns={"N_reads": "nreads3", "score": "score3"})
############### Merge interactions ##################
ibed1_2=ibed1.merge(ibed2,on=['bait_chr','bait_start','bait_end','otherEnd_chr','otherEnd_start','otherEnd_end'])
interactions=ibed3.merge(ibed1_2,on=['bait_chr','bait_start','bait_end','otherEnd_chr','otherEnd_start','otherEnd_end'])
interactions.to_csv("%s/interactions_tmp.txt"%dir,sep='\t',index=False) | [
"gthansen@uchicago.edu"
] | gthansen@uchicago.edu |
dc05d5b9461786806eeb245eea9f6ecd21b570dc | d3dd473234f8bfcd9e85d6e74dcf2aaabe67c792 | /calculus_I.py | 076a87bc83131d1fc08152842f5f58eb943a0269 | [] | no_license | JohnyTheLittle/pythonSciPlay | edf434df8cb9a6c028120410c660a6791b6efb35 | 2977cd769ed71af1651dfc0a9b6acd1a5fa94c0d | refs/heads/main | 2023-07-29T17:09:36.711540 | 2021-09-15T16:52:31 | 2021-09-15T16:52:31 | 406,852,392 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 914 | py | class Polynomial:
"""Basic polynomial class"""
def __init__(self, coeffs):
self.coeffs = coeffs
def __repr__(self):
return f"Polynomial({repr(self.coeffs)})"
def __call__(self, x):
return sum(coeff*x**i for i, coeff
in enumerate(self.coeffs))
def differentiate(self):
coeffs = [i*c for i, c in enumerate(self.coeffs[1:], start=1)]
return Polynomial(coeffs)
def integrate(self, constant=0):
"""Integrate the polynimial, returning the integral"""
coeffs = [float(constant)]
coeffs += [c/i for i, c in enumerate(self.coeffs, start=1)]
return Polynomial(coeffs)
P = Polynomial([8, 6, 5, 4, 3])
print(P.__call__(2))
print(P)
DP=P.differentiate().differentiate()
print(DP)
print(DP.integrate().integrate())
a = [1,2,3,4,5,6,7,8,9,10]
b=[(value+1)/number for value, number in enumerate(a)]
print(b) | [
"76442434+JohnyTheLittle@users.noreply.github.com"
] | 76442434+JohnyTheLittle@users.noreply.github.com |
4f922402c561283dd1b13b2698226e6a81d4f4a2 | d28f87594f36349c6654cdb2700ef0cbdb2ea2c3 | /venv/bin/flask | a8e0db023a2e9a8349490e77f10ab9cbbba8b62b | [] | no_license | ejisenstein/flask_tutorial | eba4bddf277f8854a9121e64cc35e9544b3342ce | 624c710e0ea6ac8e8c7764a6ab0b06992578e0d4 | refs/heads/master | 2022-11-25T12:21:22.352586 | 2020-08-05T23:52:37 | 2020-08-05T23:52:37 | 284,311,234 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 247 | #!/Users/evanisenstein/flask_tutorial/venv/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from flask.cli import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"ejisenstein@gmail.com"
] | ejisenstein@gmail.com | |
6fd9f967435ec0b4885b32a2c99175b9c69f4247 | c2849586a8f376cf96fcbdc1c7e5bce6522398ca | /ch28/ex28-25.py | d6a7549a80d0c5a59f5e9b81527f0036703e34dc | [] | no_license | freebz/Learning-Python | 0559d7691517b4acb0228d1cc76de3e93915fb27 | 7f577edb6249f4bbcac4f590908b385192dbf308 | refs/heads/master | 2020-09-23T01:48:24.009383 | 2019-12-02T12:26:40 | 2019-12-02T12:26:40 | 225,371,155 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,775 | py | # 셸브에서 객체 업데이트하기
# updatedb.py 파일: 데이터베이스의 Person 객체를 업데이트
import shelve
db = shelve.open('persondb') # 동일한 파일명으로 셸브를 다시 염
for key in sorted(db): # 데이터베이스 객체들을 보여 주기 위해 반복
print(key, '\t=>', db[key]) # 커스터마이즈 포맷으로 출력
sue = db['Sue Jones'] # 가져오기 위해 키에 의해 인덱싱
sue.giveRaise(.10) # 클래스의 메서드를 사용하여 메모리를 업데이트함
db['Sue Jones'] = sue # 셸브에 업데이트하기 위해 키에 할당함
db.close() # 변경 후 데이터베이스를 닫음
# python updatedb.py
# Bob Smith => [Person: job=None, name=Bob Smith, pay=0]
# Sue Jones => [Person: job=dev, name=Sue Jones, pay=100000]
# Tom Jones => [Manager: job=mgr, name=Tom Jones, pay=50000]
# python updatedb.py
# Bob Smith => [Person: job=None, name=Bob Smith, pay=0]
# Sue Jones => [Person: job=dev, name=Sue Jones, pay=110000]
# Tom Jones => [Manager: job=mgr, name=Tom Jones, pay=50000]
# python updatedb.py
# Bob Smith => [Person: job=None, name=Bob Smith, pay=0]
# Sue Jones => [Person: job=dev, name=Sue Jones, pay=121000]
# Tom Jones => [Manager: job=mgr, name=Tom Jones, pay=50000]
# python updatedb.py
# Bob Smith => [Person: job=None, name=Bob Smith, pay=0]
# Sue Jones => [Person: job=dev, name=Sue Jones, pay=133100]
# Tom Jones => [Manager: job=mgr, name=Tom Jones, pay=50000]
# python
import shelve
db = shelve.open('persondb') # 데이터베이스를 다시 염
rec = db['Sue Jones'] # 객체를 키에 의해 가져옴
rec
# [Person: Sue Jones, 146410]
rec.lastName()
# 'Jones'
rec.pay
# 146410
| [
"freebz@hananet.net"
] | freebz@hananet.net |
de468f2dc11f3dadf1dd1f9ff9ad775c4c5cf943 | 5141d540e42c82ac0ded31436b5d35686e73d107 | /Enumerate.py | 0447bf312e03ec9b0af7b8edbb2281b7741f3c43 | [] | no_license | roxanacaraba/Learning-Python | 5dfcbf39e9e103f1fc5b834da747903136256419 | 411f42ae626559c699bdab3e2fe3fa6a12d8f304 | refs/heads/main | 2023-01-12T14:18:39.782747 | 2020-11-18T19:19:47 | 2020-11-18T19:19:47 | 313,975,689 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 104 | py | for index, name in enumerate(["Mara", "Crina", "George"]):
print("Index : %d => %s" % (index, name)) | [
"roxana_elena_98@yahoo.com"
] | roxana_elena_98@yahoo.com |
265f21ddcae1ef723fb7a9b46fa7cc8330804a1b | 92a4f6675a593ca9a8e70f19f4dd24e5a882baa9 | /app.py | bc4fca5c2b3a52c777b5cc39313ddc08acfcbaf1 | [] | no_license | vijay818/ML_API | f5c8496c4d031d8d8c22ee208604df64689eaed4 | d38aa7911c412974021383b82619a21f4224db81 | refs/heads/master | 2023-05-12T02:53:35.408077 | 2020-07-27T11:51:39 | 2020-07-27T11:51:39 | 282,873,624 | 0 | 0 | null | 2023-05-01T21:25:05 | 2020-07-27T11:11:51 | HTML | UTF-8 | Python | false | false | 5,106 | py | import numpy as np
from flask import Flask, request, jsonify, render_template
import pickle
import pandas as pd
app = Flask(__name__)
custbehav_model = pickle.load(open('custbehav_model.pkl','rb'))
churnpred_model = pickle.load(open('churnpred_model.pkl','rb'))
tsforecast_model = pickle.load(open('ts_sales_model.pkl','rb'))
@app.route('/')
def home():
return render_template('home.html')
@app.route('/result',methods=['POST'])
def result():
if 'mba_button' in request.form:
return render_template('mba_form.html')
elif 'FrequentPurchases' in request.form:
df = pd.read_csv("frequent.csv")
features = request.form.get('FrequentPurchases')
#int_features = [int(x) for x in request.form.values()]
#final_features = [np.array(int_features)]
#prediction = model.predict(final_features)
output = df[df.antecedents == features]["consequents"][:1]
s = output.to_string(index = False)
return render_template('mba_form.html', prediction_text=s)
elif 'custbehav_button' in request.form:
return render_template('custbehav_form.html')
elif 'PageValues' in request.form:
#int_fetss = [x for x in request.form.values()]
#return str(int_fetss)
int_features = [float(x) for x in request.form.values()]
final_features = [np.array(int_features)]
prediction = custbehav_model.predict(final_features)
output = prediction[0]
return render_template('custbehav_form.html', prediction_text = 'Customer Propensity is {}'.format(output))
elif 'custchurn_button' in request.form:
#return "Custchurn button clicked"
return render_template('churnpred_form.html')
elif 'CurrentEquipmentDays' in request.form:
#for x in request.form.values():
#int_fets = [x for x in request.form.values() if x!= 'Submit']
#return str(int_fets)
int_features = [float(x) for x in request.form.values() if x!= 'Submit']
final_features = [np.array(int_features)]
prediction = churnpred_model.predict(final_features)
output = prediction[0]
return render_template('churnpred_form.html', prediction_text = 'Churn value is {}'.format(output))
elif 'tsforecast_button' in request.form:
#return "Timeseries Forecast button clicked"
return render_template('tsforecast_form.html')
elif 'NumberofMonths' in request.form:
#return "Timeseries Forecast response button clicked"
if(request.form.get('NumberofMonths') == '1'):
prediction = tsforecast_model.predict(start='2019-05-01', end='2019-05-01',dynamic=True)
output = prediction.to_string(index=False)
lst = []
for i in prediction:
i = int(i)
lst.append(i)
return render_template('tsforecast_form.html', prediction_text = 'Sales Forecast for next 1 months with an RMSE of 50.7 is: {}'.format(lst))
elif(request.form.get('NumberofMonths') == '2'):
prediction = tsforecast_model.predict(start='2019-05-01', end='2019-06-01',dynamic=True)
output = prediction.to_string(index=False)
lst = []
for i in prediction:
i = int(i)
lst.append(i)
return render_template('tsforecast_form.html', prediction_text = 'Sales Forecast for next 2 months with an RMSE of 59.7 is: {}'.format(lst))
elif(request.form.get('NumberofMonths') == '3'):
prediction = tsforecast_model.predict(start='2019-05-01', end='2019-07-01',dynamic=True)
output = prediction.to_string(index=False)
lst = []
for i in prediction:
i = int(i)
lst.append(i)
return render_template('tsforecast_form.html', prediction_text = 'Sales Forecast for next 3 months with an RMSE of 65 is: {}'.format(lst))
elif(request.form.get('NumberofMonths') == '4'):
prediction = tsforecast_model.predict(start='2019-05-01', end='2019-08-01',dynamic=True)
output = prediction.to_string(index=False)
lst = []
for i in prediction:
i = int(i)
lst.append(i)
return render_template('tsforecast_form.html', prediction_text = 'Sales Forecast for next 4 months with an RMSE of 78.45 is: {}'.format(lst))
elif(request.form.get('NumberofMonths') == '5'):
prediction = tsforecast_model.predict(start='2019-05-01', end='2019-09-01',dynamic=True)
output = prediction.to_string(index=False)
lst = []
for i in prediction:
i = int(i)
lst.append(i)
return render_template('tsforecast_form.html', prediction_text = 'Sales Forecast for next 5 months with an RMSE of 93.65 is: {}'.format(lst))
if __name__ == "__main__":
app.run(debug=True)
| [
"noreply@github.com"
] | noreply@github.com |
c21977172cedfe5e8dc386a7ee59ff0827fe54bb | ccb5eb6d2dca109b5c3f1e16be61044659877246 | /face_recognition/redisTest.py | 00a01407f80f1190a7dda4706a5f29d70e91df2c | [
"MIT"
] | permissive | rooneyzhang/face_recognition | 37b6905532e6b8a6f5066a1327f183dc5f3ebbd4 | 8519869bcf6784ff6536c5f283e2f28917aba81b | refs/heads/master | 2020-04-20T07:10:07.204573 | 2019-02-01T15:03:37 | 2019-02-01T15:03:37 | 168,703,851 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 999 | py | import redis
import json
r = redis.Redis(host='111.1.13.42', port=6379, db=0, password='3L3ygScS')
i=0
while i<10:
msg='''{
"code": 0,
"persons": [{
"type": 0,
"name": "张三%s",
"department": "技术部",
"position": "员工",
"employeeID":%s,
"image": "./static/face/%s.jpg"
}],
"camera": [{
"id": "1",
"position": "position"
}]
}
''' %(str(i),str(1000+i),str(1000+i))
r.lpush("face",msg)
i=i+1
msg= r.rpop("face")
if msg:
data=json.loads(str(msg.decode()))
if type(data) is dict:
print(str(data["persons"]))
person = data["persons"][0]
#person = json.loads(str(data["persons"]))
print(person["type"])
print(person["name"])
print(person["department"])
print(person["position"])
print(person["employeeID"])
print(person["image"])
else :
print("nothing") | [
"rooneyzhang@139.com"
] | rooneyzhang@139.com |
500bd8cda7c4d750145a6eff90ea8b2d5f379b8e | 1eb63abe393d5b6919f62b1638ad22d4e79c8058 | /co2_server.py | fedc420b97b18593c4b09aa48195082df3b99ff1 | [
"MIT"
] | permissive | henrikglass/co2server-util | 5d545e48038f3087fa36961783037e288d6113d6 | 349848ea162d3ef18395f5c7cd2448799e9e0779 | refs/heads/master | 2020-09-14T21:50:16.845048 | 2019-11-26T15:33:43 | 2019-11-26T15:33:43 | 223,268,024 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,919 | py | import sys, getopt
import inotify.adapters
import socket
import time
import _thread
local_ip = "xxx.xxx.xxx.xxx"
local_port = 6666
co2_table = ""
co2_table_lock = _thread.allocate_lock()
# handle client
def handle_client(conn):
with co2_table_lock:
conn.send(co2_table.encode())
conn.close()
# Updates co2_table from file.
def load_co2_table():
global co2_table
with co2_table_lock:
time.sleep(10) # Probably not necessary. But why not. We have time.
file = open("co2_table.txt", "r")
co2_table = file.read()
file.close()
# periodically update co2_table contents
def perpetually_load_co2_table():
# update table on start
load_co2_table()
print("read file!")
i = inotify.adapters.Inotify()
i.add_watch('co2_table.txt')
# listen for inotify events
for event in i.event_gen(yield_nones=False):
(a, type_names, path, filename) = event
# If file was written to update co2_table
if(type_names[0] == 'IN_CLOSE_WRITE'):
print("read file again!")
load_co2_table()
# parse args
def parse_args(argv):
global local_ip
global local_port
try:
opts, args = getopt.getopt(argv, "i:p:")
except:
print("Error parsing input arguments")
sys.exit()
for opt, arg in opts:
if opt == '-i': local_ip = arg
if opt == '-p': local_port = int(arg)
def main(argv):
parse_args(argv)
_thread.start_new_thread(perpetually_load_co2_table, ())
# handle incoming connections
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
server.bind((local_ip, local_port))
server.listen(5)
while True:
conn, addr = server.accept()
_thread.start_new_thread(handle_client, (conn,))
if __name__ == '__main__':
main(sys.argv[1:])
| [
"hglass@kth.se"
] | hglass@kth.se |
3e0a856177c0d402b98ed229c8529ec154be6332 | 8419c3c826dd41630e57c6523fe6de79eca2facb | /workshop of python/samples/ex02Main.py | 7176c022b4c56ea5e02bb7f818bec18b4d7d882a | [] | no_license | thomasernste/python | 4d009f5a891fd7c4a3432a42ea94f94379f6d0de | 7a59d2e37626d4de3a3b7e6942363c424798ad46 | refs/heads/master | 2016-09-05T23:17:18.093787 | 2012-04-29T14:38:28 | 2012-04-29T14:38:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 965 | py | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
"""docstring
"""
__revision__ = '0.1'
import sys,os
def usage():
#定义函数usage
print "inputFile"
pass
def error():
usage()
sys.exit(-1)
#强制该脚本马上结束,并返回-1作为error code
def readLines(inf):
for line in open(inf):
#打开文件inf,并按行读入
print line
if __name__=="__main__":
argvNum=2
if len(sys.argv)<argvNum:
#获取命令行的参数,sys.argv为数组,len(list)为求该数组list的元素个数。
error()
print sys.argv[0]
inf=sys.argv[1]
#sys.argv的index从0开始,但是sys.argv[0]为该脚本的名字
readLines(inf)
#函数传递按照引用(即c里面的指针)的方法传递。
#如果该参数引用的值本身不能改变如string类/int类,可以看成是pass by value
#如果该参数引用的值能改变如数组,可以看成是pass by reference
| [
"wangchj04@gmail.com"
] | wangchj04@gmail.com |
2a61efec65faad332f5b97afb091dc99ae1c3656 | 0d06176bcd859d88653e7768bbbd7b59613cbd78 | /data/parser_test3.py | 7021990aaa0005f37e16578f4a3c60b2a710f465 | [] | no_license | Manoo-hao/project | 0c490391141428b33183c9b817a052f4b2ffe62c | 9d54ff96fdd5f46dc1ad0e325c5c28ef226ef1f0 | refs/heads/master | 2016-09-06T14:38:23.400040 | 2015-03-01T23:47:32 | 2015-03-01T23:47:32 | 29,956,094 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,275 | py | #takes the raw data file and creates 4 text files that can each be loaded into a table in mySQL
#!/usr/bin/python
#Working version of parser - non functional - refer to attached README.md for more information
#step 1. open the data file
dataset='GDS2469_full.soft'
fh = open(dataset) #don't quite understand why this line doesn't work when I open 'dataset'
#step 2. read the first line and then read more lines while the line doesn't match a specific pattern
line = fh.readline()
while line[:20] != '!dataset_table_begin': #check first 20 characters, if it doesn't match this, read next line.
line=fh.readline()
header = fh.readline().strip()
#capture the column names
colnames={}
index=0
for title in header.split('\t'):
colnames[title]=index
print '%s\t%s'%(title,index)
index=index+1
#open our output files, one per table.
genefile=open('genes.txt','w')
expressionfile=open ('expression.txt','w')
probefile=open('probes.txt','w')
#defines which columns are to go in each output file. For samples it is the 3rd header until the gene title header and they will be separated by '\t'
genefields=['Gene ID', 'Gene symbol', 'Gene title']
samples=header.split('\t')[2:int(colnames['Gene title'])]
probefields=['ID_REF','Gene ID']
def buildrow(row, fields):
'''Creates a tab separated list of values according to the columns listed in fields
row: a list of values
fields: a list of columns. Only the values in row corresponding to the columns in fields are output
returns: A string that is a tab separated list of values terminated with a newline
'''
newrow=[]
for f in fields:
newrow.append(row[int(colnames[f])])
return "\t".join(newrow)+"\n"
#creates the rows for the expression file, is slightly different because for each probe and experiment there are several gene expression values.
def build_expression(row, samples):
'''Builds tab separated rows for expression data. For each of the samples listed
it generates a line with the probe id, sample id and expression value.
row: a list of values
samples: a list of column headings corresponding to the samples
'''
exprrows=[]
for s in samples:
newrow=[s,]
newrow.append(row[int(colnames['ID_REF'])])
newrow.append(row[int(colnames[s])])
exprrows.append("\t".join(newrow))
return "\n".join(exprrows)+"\n"
#initialise a counter to count how many probe rows were processed.
#writes the data to the files
rows = 0
for line in fh.readlines():
try:
if line[0]=='!':
continue
row=line.replace('\n','').split('\t')
if row[int(colnames['Gene ID'])] =='': #genes originally spat out a lot of empty lines. This statement gets rid of lines that contain nothing.
continue
genefile.write(buildrow(row,genefields))
probefile.write(buildrow(row,probefields))
expressionfile.write(build_expression(row, samples))
rows = rows +1 #increment the row counter
except:
pass
#close the created files after the data has been writen to them
genefile.close()
expressionfile.close()
probefile.close()
#print out a message to indicate success with outputting the data.
print '%s rows processed'%rows
| [
"mblank@dundee.ac.uk"
] | mblank@dundee.ac.uk |
c43cf5bd76ca64e3afc552491182bc52b6832180 | dfedc59c56eac494c6a8065053926181f3269dc7 | /multiplyList.py | 8a75217fe237461f748c3829da24fe0ea066736d | [] | no_license | jillo-abdullahi/python-codes | ed275fd9a0d0ed8166bf3a9ebdb937c26ccba3aa | fd9be1d21e85238d1fafc531fb2e51b70646f99c | refs/heads/master | 2021-05-04T20:02:39.108659 | 2018-08-16T20:51:13 | 2018-08-16T20:51:13 | 106,828,968 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 187 | py | def product(myList):
prod = 1
count = len(myList)
while count > 0:
prod*= myList[count-1]
count -=1
return prod
if __name__ == '__main__':
nums = [2,5,9,6]
print product(nums) | [
"jillo.abdulla@gmail.com"
] | jillo.abdulla@gmail.com |
b55eaffae77ba47f89aa333a3efe15ef7fa40e94 | 2677aee2f807af1dd3c31d65168881285d13a228 | /vietocr/tool/translate.py | 246b4bc109d3898b90d2330799ca8ac7340d6f1c | [
"Apache-2.0"
] | permissive | holigonberg/vietocr | 2a80a6edad0e45f7e1eca74ea4a303255f23e51a | 98446eff0f951ac6c8bf04c0180a3dc951f4fe7b | refs/heads/master | 2023-01-06T04:08:10.034657 | 2020-11-02T07:33:54 | 2020-11-02T07:33:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,925 | py | import torch
import numpy as np
import math
from PIL import Image
from torch.nn.functional import log_softmax
from vietocr.model.transformerocr import VietOCR
from vietocr.model.vocab import Vocab
from vietocr.model.beam import Beam
def batch_translate_beam_search(img, model, beam_size=4, candidates=1, max_seq_length=128, sos_token=1, eos_token=2):
# img: NxCxHxW
model.eval()
sents = []
with torch.no_grad():
src = model.cnn(img)
memories = model.transformer.forward_encoder(src)
for i in range(memories.size(1)):
memory = memories[:,i,:].repeat(1, beam_size, 1) # TxNxE
sent = beamsearch(memory, model, beam_size, candidates, max_seq_length, sos_token, eos_token)
sents.append(sent)
sents = np.asarray(sents)
return sents
def translate_beam_search(img, model, beam_size=4, candidates=1, max_seq_length=128, sos_token=1, eos_token=2):
# img: 1xCxHxW
model.eval()
with torch.no_grad():
src = model.cnn(img)
memory = model.transformer.forward_encoder(src) #TxNxE
sent = beamsearch(memory, model, beam_size, candidates, max_seq_length, sos_token, eos_token)
return sent
def beamsearch(memory, model, beam_size=4, candidates=1, max_seq_length=128, sos_token=1, eos_token=2):
# memory: Tx1xE
model.eval()
device = memory.device
beam = Beam(beam_size=beam_size, min_length=0, n_top=candidates, ranker=None, start_token_id=sos_token, end_token_id=eos_token)
with torch.no_grad():
memory = memory.repeat(1, beam_size, 1) # TxNxE
for _ in range(max_seq_length):
tgt_inp = beam.get_current_state().transpose(0,1).to(device) # TxN
decoder_outputs = model.transformer.forward_decoder(tgt_inp, memory)
log_prob = log_softmax(decoder_outputs[:,-1, :].squeeze(0), dim=-1)
beam.advance(log_prob.cpu())
if beam.done():
break
scores, ks = beam.sort_finished(minimum=1)
hypothesises = []
for i, (times, k) in enumerate(ks[:candidates]):
hypothesis = beam.get_hypothesis(times, k)
hypothesises.append(hypothesis)
return [1] + [int(i) for i in hypothesises[0][:-1]]
def translate(img, model, max_seq_length=128, sos_token=1, eos_token=2):
"data: BxCXHxW"
model.eval()
device = img.device
with torch.no_grad():
src = model.cnn(img)
memory = model.transformer.forward_encoder(src)
translated_sentence = [[sos_token]*len(img)]
max_length = 0
while max_length <= max_seq_length and not all(np.any(np.asarray(translated_sentence).T==eos_token, axis=1)):
tgt_inp = torch.LongTensor(translated_sentence).to(device)
# output = model(img, tgt_inp, tgt_key_padding_mask=None)
# output = model.transformer(src, tgt_inp, tgt_key_padding_mask=None)
output, memory = model.transformer.forward_decoder(tgt_inp, memory)
output = output.to('cpu')
values, indices = torch.topk(output, 5)
indices = indices[:, -1, 0]
indices = indices.tolist()
translated_sentence.append(indices)
max_length += 1
del output
translated_sentence = np.asarray(translated_sentence).T
return translated_sentence
def build_model(config):
vocab = Vocab(config['vocab'])
device = config['device']
model = VietOCR(len(vocab),
config['backbone'],
config['cnn'],
config['transformer'],
config['seq_modeling'])
model = model.to(device)
return model, vocab
def resize(w, h, expected_height, image_min_width, image_max_width):
new_w = int(expected_height * float(w) / float(h))
round_to = 10
new_w = math.ceil(new_w/round_to)*round_to
new_w = max(new_w, image_min_width)
new_w = min(new_w, image_max_width)
return new_w, expected_height
def process_image(image, image_height, image_min_width, image_max_width):
img = image.convert('RGB')
w, h = img.size
new_w, image_height = resize(w, h, image_height, image_min_width, image_max_width)
img = img.resize((new_w, image_height), Image.ANTIALIAS)
img = np.asarray(img).transpose(2,0, 1)
img = img/255
return img
def process_input(image, image_height, image_min_width, image_max_width):
img = process_image(image, image_height, image_min_width, image_max_width)
img = img[np.newaxis, ...]
img = torch.FloatTensor(img)
return img
def predict(filename, config):
img = Image.open(filename)
img = process_input(img)
img = img.to(config['device'])
model, vocab = build_model(config)
s = translate(img, model)[0].tolist()
s = vocab.decode(s)
return s
| [
"pbcquoc@gmail.com"
] | pbcquoc@gmail.com |
939f03ae3f0af068c616428a2919345017565c9a | d16290884552147e93ff98baf2da15353c606a87 | /examples/docs/configs/base_config.py | ed81f1102fa70a4ca96c8c770e35a4a25e018378 | [
"BSD-3-Clause"
] | permissive | ipython/traitlets | 2c535c33e990afbcd7e9f7b272bc2a90a6711495 | 05d6ecb6b1f90736cdeacf41c7d5e84b147c1dbd | refs/heads/main | 2023-09-04T10:04:14.795968 | 2023-08-01T09:44:46 | 2023-08-01T09:44:46 | 14,993,021 | 600 | 216 | BSD-3-Clause | 2023-09-14T01:34:34 | 2013-12-06T20:45:45 | Python | UTF-8 | Python | false | false | 122 | py | # Example config used by load_config_app.py
c = get_config() # noqa
c.MyClass.name = 'coolname'
c.MyClass.ranking = 100
| [
"noreply@github.com"
] | noreply@github.com |
fc06b94ccb9936fdc165854c8672c27c890088ab | 9aee74a948ad53958a27857a6ffec9f0c91e821b | /tutorial/config/sw.py | 3c5c3dac5f1ee6bf2f1bb6bb83ec3fc2655312da | [] | no_license | liho98/distributed-crawler | d6307348abd90c09c77aff82df6b444e832b97f7 | 00576a2c0b069e637650f27a80c63401eaa4039d | refs/heads/master | 2020-11-27T16:39:08.937990 | 2019-12-22T07:15:20 | 2019-12-22T07:15:20 | 229,531,987 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 264 | py | # -*- coding: utf-8 -*-
from __future__ import absolute_import
from .worker import *
CRAWLING_STRATEGY = 'frontera.strategy.basic.BasicCrawlingStrategy' # path to the crawling strategy class
LOGGING_CONFIG='logging-sw.conf' # if needed
MAX_PAGES_PER_HOSTNAME = 10 | [
"tanlh-wa16@student.tarc.edu.my"
] | tanlh-wa16@student.tarc.edu.my |
4ff9852f889116972db313142451e61f7dfaa0fd | 3b8d4b70d8f3f245c0a713d46d7f06f9dc437c3d | /Boosting&DecisionTree/helper.py | fe63d9923894a27facc092889883a6b650b8a352 | [] | no_license | wangby511/MachineLearningAlgorithms | b00077ade4e6210afeba5b368e975320489c1f78 | 35c71f4da61324306d69f5518d2d0496bd8c3289 | refs/heads/master | 2020-04-07T12:34:23.148979 | 2018-12-11T08:53:58 | 2018-12-11T08:53:58 | 158,372,700 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,348 | py | import numpy as np
y_train = [0, 0, 2, 1, 0, 0, 1, 0, 2, 1, 2, 1, 0, 0, 2, 1, 0, 1, 2, 1, 0, 0, 2, 1, 1, 0, 2, 0, 2, 1, 0, 0, 2, 1, 0, 0, 1, 2, 2, 0, 2, 1, 0, 0, 2, 2, 2, 1, 1, 1, 0, 0, 2, 2, 1, 2, 1, 2, 0, 2, 0, 1, 1, 2, 2, 0, 1, 0, 1, 1, 1, 0, 2, 0, 2, 1, 2, 1, 2, 1, 0, 2, 1, 2, 1, 0, 1, 2, 0, 1, 0, 0, 0, 1, 2, 0, 0, 2, 0, 1, 2, 1, 2, 2, 1, 1, 2, 1, 0, 1, 1, 0, 1, 2, 2, 2, 0, 0, 2, 2]
x_train = [[1.0, 2.0], [0.0, 1.0], [2.0, 1.0], [2.0, 1.0], [0.0, 1.0], [0.0, 2.0], [2.0, 1.0], [0.0, 0.0], [2.0, 0.0], [2.0, 0.0], [2.0, 1.0], [1.0, 0.0], [0.0, 2.0], [0.0, 2.0], [1.0, 1.0], [2.0, 1.0], [0.0, 1.0], [1.0, 0.0], [1.0, 0.0], [0.0, 1.0], [0.0, 1.0], [0.0, 1.0], [1.0, 0.0], [2.0, 1.0], [1.0, 1.0], [0.0, 1.0], [2.0, 1.0], [0.0, 2.0], [2.0, 1.0], [1.0, 1.0], [0.0, 2.0], [0.0, 1.0], [2.0, 1.0], [1.0, 1.0], [0.0, 2.0], [0.0, 1.0], [0.0, 0.0], [2.0, 1.0], [2.0, 1.0], [0.0, 1.0], [2.0, 1.0], [1.0, 1.0], [0.0, 1.0], [0.0, 1.0], [2.0, 1.0], [2.0, 2.0], [1.0, 1.0], [1.0, 1.0], [0.0, 0.0], [2.0, 0.0], [0.0, 2.0], [1.0, 2.0], [2.0, 1.0], [2.0, 1.0], [2.0, 1.0], [2.0, 1.0], [1.0, 1.0], [2.0, 1.0], [0.0, 1.0], [2.0, 1.0], [0.0, 1.0], [1.0, 1.0], [2.0, 1.0], [2.0, 1.0], [2.0, 1.0], [0.0, 2.0], [1.0, 0.0], [0.0, 2.0], [1.0, 1.0], [2.0, 1.0], [1.0, 1.0], [0.0, 1.0], [2.0, 1.0], [0.0, 2.0], [2.0, 0.0], [1.0, 1.0], [2.0, 1.0], [2.0, 1.0], [1.0, 1.0], [1.0, 1.0], [1.0, 2.0], [2.0, 1.0], [1.0, 1.0], [2.0, 1.0], [2.0, 1.0], [0.0, 1.0], [1.0, 1.0], [2.0, 0.0], [0.0, 1.0], [1.0, 0.0], [0.0, 1.0], [0.0, 1.0], [0.0, 2.0], [1.0, 0.0], [2.0, 0.0], [1.0, 2.0], [0.0, 2.0], [2.0, 1.0], [0.0, 1.0], [1.0, 0.0], [1.0, 1.0], [2.0, 1.0], [2.0, 1.0], [2.0, 1.0], [1.0, 0.0], [1.0, 0.0], [2.0, 1.0], [0.0, 0.0], [0.0, 1.0], [1.0, 1.0], [1.0, 0.0], [0.0, 1.0], [0.0, 0.0], [1.0, 0.0], [1.0, 1.0], [2.0, 1.0], [0.0, 2.0], [0.0, 2.0], [2.0, 2.0], [0.0, 0.0]]
label_count = {}
# for i in range(len(x_train)):
# if x_train[i][0] == 0:
# print (x_train[i],y_train[i])
# if (y_label.get(y_train[i]) == None):
# y_label[y_train[i]] = 1
# else:
# y_label[y_train[i]] = y_label[y_train[i]] + 1
#
# print (y_label)
for i in range(len(x_train)):
if x_train[i][0] == 2 and x_train[i][1] == 2:
print (x_train[i],y_train[i])
label_count[y_train[i]] = label_count.get(y_train[i], 0) + 1
print (label_count) | [
"noreply@github.com"
] | noreply@github.com |
09dce10b74b3626b0a4972fdc6438cf874d14140 | 1711624c2d13d19df52f6d1f3c471cf8e97cfeb9 | /obstacle.py | 24988b78291e5c23d7767abfa56f6aa4296e1f38 | [
"MIT"
] | permissive | YasuShimizu/2DH_Python | f53e7112ec33b98f848d3d974bccac4a0e9753a4 | 3fa8cf1d86015c29c778bf8e405fef30e13c12b4 | refs/heads/main | 2023-08-14T06:37:37.999569 | 2021-09-17T20:54:30 | 2021-09-17T20:54:30 | 407,676,898 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 671 | py | import numpy as np
import csv
def ob_ini(ijh,nx,ny):
fopen=open('obst.dat','r')
dataReader=csv.reader(fopen)
d1=next(dataReader); nobst=int(d1[0])
i1=np.zeros(nobst,dtype=int);i2=np.zeros_like(i1)
j1=np.zeros_like(i1);j2=np.zeros_like(i1)
for n in np.arange(0,nobst):
lp=next(dataReader)
i1[n]=int(lp[0]);i2[n]=int(lp[1]);j1[n]=int(lp[2]);j2[n]=int(lp[3])
# print(i1[n],i2[n],j1[n],j2[n])
for i in np.arange(0,nx+1):
for j in np.arange(0,ny+1):
if i>i1[n] and i<=i2[n] and j>j1[n] and j<=j2[n]:
ijh[i,j]=1
return ijh
# nobst: 障害物の個数
# i1,i2,j1,j2 : 障害物のx,y方向の範囲 | [
"noreply@github.com"
] | noreply@github.com |
337555e92f42e1fa73ca39f8a52b53366558da88 | bc8f02c870e939359c32d04016f989f0c7526226 | /constraint_adder.py | be8ac25b95b5f8d031b9f6af0994cabbceb480ea | [] | no_license | iamgroot42/gpu_image_segmentation | c53a85267cd373a391c27297ac1befb944067550 | ca97a78a28bf5b76659dcb990a3a312d6d2c6fe3 | refs/heads/master | 2021-01-18T23:46:45.615098 | 2017-10-01T11:35:17 | 2017-10-01T11:35:17 | 80,756,196 | 1 | 0 | null | 2017-10-01T11:35:18 | 2017-02-02T18:37:35 | Cuda | UTF-8 | Python | false | false | 952 | py | import cv2
import sys
object_points = []
background_points = []
counter = 0
data = None
def mouse_callback(event, x, y, flags, params):
global object_points
global background_points
global counter
global data
if event == cv2.EVENT_LBUTTONDOWN:
object_points.append((x, y))
elif event == cv2.EVENT_RBUTTONDOWN:
background_points.append((x, y))
def annotate_images(img_path):
global data
data = cv2.imread(img_path)
cv2.imshow('Image',data)
cv2.setMouseCallback('Image', mouse_callback)
cv2.waitKey(0)
cv2.destroyAllWindows()
def write_points(data, filename):
f = open(filename, 'w')
for point in data:
x,y = point
f.write(str(x) + " " + str(y) + "\n")
f.close()
if __name__ == "__main__":
file_path = sys.argv[1]
print("Left click to label object points")
print("Right click to label background points")
annotate_images(file_path)
write_points(object_points, "OBJECT")
write_points(background_points, "BACKGROUND")
| [
"anshuman14021@iiitd.ac.in"
] | anshuman14021@iiitd.ac.in |
57de4c04901ed7b03a770017a6fd726bcb1591a8 | db01067e88324466ba4743e5e53cd53de609c342 | /02. Data Types and Variables Lab/04. Convert Meters to Kilometers.py | e372e781a294d7783d346fcb4d7167305731e6ce | [] | no_license | IlkoAng/-Python-Fundamentals-Softuni | 07eaf89d340b2e60214ab5f8e896629ae680dc4a | 01a112b13e84ab2f29e6fc4ed39f08f395d54429 | refs/heads/main | 2023-05-18T02:11:53.676763 | 2021-06-06T15:39:22 | 2021-06-06T15:39:22 | 371,475,022 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 83 | py | meters = int(input())
kilometers = meters / 1000
print(f"{kilometers:.2f}")
| [
"noreply@github.com"
] | noreply@github.com |
28397aabe41c8c446924bd42dac1f8b0b733888e | c16ea02da3c1ce8cd47a2ec5ec89cd12ddcb1170 | /text_classifier/evaluate.py | 0c0ceed5f09b40f38e032dd5b447f34b02f82cb3 | [] | no_license | ndhuy13/text-classifier | 18019647e33e0b4a964664cb5933b3a4a6eb645b | 6cdb5d62fde0f6084d6c3e35634ca453c092b5dd | refs/heads/master | 2020-06-23T15:36:50.487990 | 2019-07-24T15:57:59 | 2019-07-24T15:57:59 | 198,665,753 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 933 | py | import pandas as pd
df = pd.read_csv('final_result.csv')
true_1 = 0
true_2 = 0
no_recomendation = 0
# for index, row in df.iterrows():
# if row['actual'] == row['pred_1']:
# if row['prob_1'] > 0.7:
# true_1 += 1
# else: no_recomendation += 1
# continue
# if row['actual'] == row['pred_2']:
# if row['prob_1'] > 0.5:
# true_2 += 1
# else: no_recomendation += 1
for index, row in df.iterrows():
if row['actual'] == row['pred_1']:
true_1 += 1
continue
if row['actual'] == row['pred_2']:
true_2 += 1
num_of_row = df.shape[0]
df_false = df[df['actual'] != df['pred_1']]
df_false = df_false[df_false['actual'] != df_false['pred_2']]
df_false.to_csv('final_false_result.csv')
print('Accuracy_1:', true_1/(num_of_row ))
print('Accuracy_2:', (true_2 + true_1)/(num_of_row))
# print('No Recomendation:', no_recomendation/num_of_row) | [
"noreply@github.com"
] | noreply@github.com |
804fce58f691e31d3e92890598c1ed56b17b3018 | 94b11f14db5320153b9b17e274a85714e00311cd | /junk_functions.py | 38c19bf01eeb7350d5e370932c2dcfe28e0bec82 | [] | no_license | RoelvdBurght/Amstelhaegee | d43d3b77489eb669881d668b8a743ef9fe69f868 | 4f5c7d183e341b5d81b1f8acd50360c27d1f2dba | refs/heads/master | 2021-01-22T19:36:12.469957 | 2017-05-31T15:57:33 | 2017-05-31T15:57:33 | 85,217,397 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,947 | py | # berekent de afstand tussen alle huizen en stopt deze in een lijst
# en returned een lijst met freespace
def distToAll(houseList):
distList = []
finalList = []
for i in range(len(houseList)):
for j in range(len(houseList)):
dist = houseList[i].distanceTo(houseList[j])
if dist > 0:
distList.append(dist)
finalList.append(min(distList))
distList = []
return finalList
# calculates value of map with houselist
def valueOfMap(houseList):
houseList = houseList[4:]
value = 0
i = 0
freespace = distToAll(houseList)
for house in houseList:
free = freespace[i]
if house.freespace == 6:
value += calculateValue(house, free)
elif house.freespace == 3:
value += calculateValue(house, free)
elif house.freespace == 2:
value += calculateValue(house, free)
i += 1
return value
def initDistList2(houseList):
length = len(houseList)-4
distList = [[0 for x in range(length)] for y in range(length)]
for i in range(length):
for j in range(length):
#isWater = False
#if houseList[i].freespace == 0:
#isWater = True
#distList[i][j] = "w"
#if houseList[j].freespace == 0:
# isWater = True
sameHouse = False
if i == j:
sameHouse = True
distList[i][j] = 0
if not isWater and not sameHouse:
if distanceBetween(houseList[i],houseList[j]) == 0.5:
print(distanceBetween(houseList[i],houseList[j]))
print(houseList[i])
print(houseList[j])
distList[i][j] = distanceBetween(houseList[i],houseList[j])
test = []
for list in distList[4:]:
test.append(min(x for x in list if x > 0))
print(test)
return distList | [
"hbpvanlaatum123@gmail.com"
] | hbpvanlaatum123@gmail.com |
3975a34d344b212900e775b2a112c24e3b636be7 | b2158c964969e2909d8c139874dc9fb505c779a3 | /01_basic/exercise_057.py | 221569001bba20c3e0cc35014da902a5d5d2226c | [
"MIT"
] | permissive | sideroff/python-exercises | 1bbe1373cc410c8ec9db29107c3d5fd40e2a861f | 6a9cc55735d977a71697204c734b3ade84a0c4fd | refs/heads/master | 2022-12-10T11:36:41.729465 | 2020-03-20T21:24:15 | 2020-03-20T21:24:15 | 227,201,554 | 0 | 0 | MIT | 2022-12-08T03:24:09 | 2019-12-10T19:41:54 | Python | UTF-8 | Python | false | false | 202 | py | from time import time
def main():
for i in range(1, 1_000_000): 1+1
if __name__ == '__main__':
start = time()
main()
end = time()
print("main method executed in %fs" % (end-start)) | [
"ivan.sideroff@gmail.com"
] | ivan.sideroff@gmail.com |
0e8a8c9db2daba3ad29b85d76f93c342307a4454 | 1c2024e74a67823098800a448771d84568b70130 | /app/migrations/0026_auto_20161204_1833.py | 94038ad2e8be7747dbe53799558463291c443ed9 | [] | no_license | Anroc/donaalda-backend | 739f8b06b4f91c7ae161f6b70090343341f19ec7 | 456d3291225b0b747c1d0034080e15dcf00d2802 | refs/heads/master | 2020-06-25T04:07:07.274204 | 2017-06-08T17:07:03 | 2017-06-08T17:07:03 | 199,194,180 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 627 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.4 on 2016-12-04 18:33
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('app', '0025_auto_20161204_1825'),
]
operations = [
migrations.RemoveField(
model_name='scenario',
name='short_description',
),
migrations.AddField(
model_name='scenario',
name='description',
field=models.TextField(blank=True, max_length='500', null=True, verbose_name='Kurzbeschreibung'),
),
]
| [
"m.petzolt@campus.tu-berlin.de"
] | m.petzolt@campus.tu-berlin.de |
f01060f592d1abc58f57bafa72cef814ef388759 | 80f4e0e2a22901614c5aa492fd124409c7ba2366 | /webhooks.py | 062429e891df692322278716c80f893a36a2efcb | [] | no_license | raghavaroranit/geeko | 455e43f1b47c4ae30c6318534f355d58c97a061e | 5f42fe2e36f7a618e1c2b76118f005b2edb45a49 | refs/heads/master | 2022-11-26T20:34:39.952248 | 2020-08-06T11:33:41 | 2020-08-06T11:33:41 | 285,532,020 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 437 | py | def main():
url = '<webhook URL>' ## Provide the webhook URL
bot_message = {
'text' : 'XYZ'} ## Provide the message
message_headers = { 'Content-Type': 'application/json; charset=UTF-8'}
http_obj = Http()
response = http_obj.request(
uri=url,
method='POST',
headers=message_headers,
body=dumps(bot_message),
)
print(response)
if __name__ == '__main__':
main()
| [
"noreply@github.com"
] | noreply@github.com |
ffa470ff9e81b24875b7204132daa2adbe70f388 | 8e6ad6cddab066d179fed93d61c4484f2784faaa | /3-7-1.py | 809ee675f2032a616b08cdb7a82771a0c9de5b15 | [] | no_license | lsm9275/python_section3 | 55ef181587cab57d718e67e6fee70ae23689545d | 179e2cb21ff57a8c0470eb9b5ef9069f20087c75 | refs/heads/master | 2023-02-06T15:48:58.422580 | 2020-12-28T15:16:27 | 2020-12-28T15:16:27 | 325,032,353 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,088 | py | import sys
import io
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
import time
sys.stdout = io.TextIOWrapper(sys.stdout.detach(), encoding = 'utf-8')
sys.stderr = io.TextIOWrapper(sys.stderr.detach(), encoding = 'utf-8')
print('hi')
class NcafeWriteAtt:
#초기화 실행(Webdriver 설정)
def __init__(self):
chrome_options = Options()
chrome_options.add_argument("--headless")#CLI
self.driver = webdriver.Chrome(chrome_options=chrome_options, executable_path='D:/6_PWork/5_inflearn/01_Python_Automation_and_GUI/Section3/webdriver/chrome/chromedriver')
self.driver.implicitly_wait(5)
#네이버카페 로그인
def writeAttendCheck(self):
self.driver.get('https://nid.naver.com/nidlogin.login?mode=form&url=https%3A%2F%2Fwww.naver.com')
self.driver.find_element_by_name('id').send_keys('lsm9275')
self.driver.find_element_by_name('pw').send_keys('vhgkdrh50!')
self.driver.find_element_by_xpath('//*[@id="log.login"]').click()
self.driver.implicitly_wait(30)
| [
"lsm9275@gmail.com"
] | lsm9275@gmail.com |
f42b93cbe0a3cebe3261c9629c5afbd195c1c45e | 41b5c31d6ab8c3ab3161d4e2e3d4f5df2374e803 | /edx_aws_1/week2/week2/bin/rst2man.py | 4667fe3b65cef5532e1c69f8ad08580f6ef383a4 | [] | no_license | jgjgjgpark/edx_amazon | e6fb01435db42cde645a4415be89a615f0626d6d | 92a05c6d7d8f621993206d31d44dff5eaf7fb1e9 | refs/heads/master | 2020-09-24T08:59:26.237184 | 2019-12-03T21:09:00 | 2019-12-03T21:09:00 | 225,713,844 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 639 | py | #!/Users/darkhyun/study/edx_aws_1/week2/week2/bin/python3
# Author:
# Contact: grubert@users.sf.net
# Copyright: This module has been placed in the public domain.
"""
man.py
======
This module provides a simple command line interface that uses the
man page writer to output from ReStructuredText source.
"""
import locale
try:
locale.setlocale(locale.LC_ALL, '')
except:
pass
from docutils.core import publish_cmdline, default_description
from docutils.writers import manpage
description = ("Generates plain unix manual documents. " + default_description)
publish_cmdline(writer=manpage.Writer(), description=description)
| [
"darkhyun@MacBook-Pro.local"
] | darkhyun@MacBook-Pro.local |
2e10f0a84c0052cf8f4684cd3356f6cf55ae1c5a | e90473ed89f5e4cac85bdbee7e8c99d11020caf9 | /15b.py | a80d6018e26feaf37449ce4754017cef5eff94e3 | [] | no_license | tpogden/advent-2020 | 807bbada2abbcb15eaec0fc6ab665b86588bb95b | fd0bac9e39728d9011fd0b8dd8c608c89432d4f7 | refs/heads/main | 2023-02-06T01:07:11.628484 | 2020-12-17T16:33:21 | 2020-12-17T16:33:21 | 317,501,426 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 698 | py | # Exactly the same as 15a, just different n.
def read_arr(fpath='data/15-demo.txt'):
with open(fpath) as f:
arr = [int(i) for i in f.readline().strip().split(',')]
return arr
# arr = read_arr(fpath='data/15-demo.txt')
arr = read_arr(fpath='data/15.txt')
def play(start_arr, n=2020):
prev_dict = {a_i:i+1 for i, a_i in enumerate(start_arr[:-1])}
next_val = start_arr[-1]
for i in range(len(start_arr), n+1):
spoken = next_val
if spoken in prev_dict:
next_val = i - prev_dict[spoken]
else:
next_val = 0
prev_dict[spoken] = i
return spoken
sol = play(start_arr=arr, n=30000000)
print('sol:', sol)
| [
"t@ogden.eu"
] | t@ogden.eu |
2e9c7cd18aee3630dad7bdc354d048fb39c3c218 | 155e67c490779a0bcbde55f60d57747576e98c5b | /manager.py | 527b0fe2e62ecd668dcfe25b5fd271eab182d699 | [
"MIT"
] | permissive | dai1345291582/infomation | 9420373bd70674caaab55b40730952f08cc7212e | cde1263f523b7e9dc8cd202c54783d627e080478 | refs/heads/master | 2020-04-01T01:37:40.864592 | 2018-10-14T14:23:15 | 2018-10-14T14:23:15 | 152,746,357 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 300 | py | from flask_script import Manager
from flask_migrate import Migrate, MigrateCommand
from info import db, create_app
from info.models import *
app = create_app('developement')
manage = Manager(app)
Migrate(app, db)
manage.add_command('db', MigrateCommand)
if __name__ == "__main__":
manage.run()
| [
"1345291582@qq.com"
] | 1345291582@qq.com |
bce2bf6200fd54e982429be07bc78ceb941aa813 | e0d9844e123fa0706388814b9f29758258589487 | /version_info.py | 0e3295c8309a411c0cfb18f664f84b223ed0a2ca | [] | no_license | pigpigman8686/seg | b5cf5261a5744e89ed5e5b145f60b0ccc3ba2c0c | 61c3816f7ba76243a872fe5c5fc0dede17026987 | refs/heads/master | 2023-04-10T22:22:35.035542 | 2021-04-22T06:24:36 | 2021-04-22T06:24:36 | 360,398,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,701 | py | # UTF-8
#
# For more details about fixed file info 'ffi' see:
# http://msdn.microsoft.com/en-us/library/ms646997.aspx
VSVersionInfo(
ffi=FixedFileInfo(
# filevers and prodvers should be always a tuple with four items: (1, 2, 3, 4)
# Set not needed items to zero 0. Must always contain 4 elements.
filevers=(
int('0.0.0'.split('.')[0]),
int('0.0.0'.split('.')[1]),
int('0.0.0'.split('.')[2]),
0
),
prodvers=(
int('0.0.0'.split('.')[0]),
int('0.0.0'.split('.')[1]),
int('0.0.0'.split('.')[2]),
0
),
# Contains a bitmask that specifies the valid bits 'flags'r
mask=0x3f,
# Contains a bitmask that specifies the Boolean attributes of the file.
flags=0x0,
# The operating system for which this file was designed.
# 0x4 - NT and there is no need to change it.
OS=0x40004,
# The general type of file.
# 0x1 - the file is an application.
fileType=0x1,
# The function of the file.
# 0x0 - the function is not defined for this fileType
subtype=0x0,
# Creation date and time stamp.
date=(0, 0)
),
kids=[
StringFileInfo(
[
StringTable(
'040904B0',
[StringStruct('CompanyName', 'caicy'),
StringStruct('FileDescription', 'seg'),
StringStruct('FileVersion', '0.0.0.0'),
StringStruct('InternalName', 'seg'),
StringStruct('LegalCopyright', '© caicy. All rights reserved.'),
StringStruct('OriginalFilename', 'seg.exe'),
StringStruct('ProductName', 'seg'),
StringStruct('ProductVersion', '0.0.0.0')])
]),
VarFileInfo([VarStruct('Translation', [1033, 1200])])
]
) | [
"952361195@qq.com"
] | 952361195@qq.com |
5c5bfc4ce0a3e8ab2f96c80acd2b1f5d2a5628b3 | 1d14629ea48b884889bfba6e2248cace7aea39ea | /app.py | f29fcba24ac920740d4fc310b55e7e7a37064e7b | [] | no_license | dabomb1004/healthai | 8bad48c496bbfa8347c1b2923ecbc72f88e7ace4 | cef0f3627a4d44b8dcfc03b9067fffe3a94827a5 | refs/heads/main | 2023-06-18T09:59:41.111328 | 2021-07-12T06:33:50 | 2021-07-12T06:33:50 | 362,975,016 | 0 | 0 | null | 2021-04-30T07:50:54 | 2021-04-29T23:42:01 | OpenEdge ABL | UTF-8 | Python | false | false | 26 | py | from FlaskApp import app
| [
"noreply@github.com"
] | noreply@github.com |
9242f4ea83a7422750143a3c49795cc9feb9ba80 | da86fabc1987f828d6d6f57727c9f9f6eb19da28 | /appointments/local_settings.py | 6bedc49b8ea59c480c074eedbff2f706fb5ba163 | [] | no_license | erzubin/Appointment_test | 99b3b693fd0083ca7adbe9ce86f6acf319c7d21a | cba3ca3b6351ac5556b146f0ecdaa8bc3c2e0c5d | refs/heads/master | 2020-03-17T22:04:44.852123 | 2018-05-21T00:02:55 | 2018-05-21T00:02:55 | 133,987,785 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 238 | py | import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
DEBUG= True
| [
"erzubin@gmail.com"
] | erzubin@gmail.com |
8408f5cc05ba244e2d41265e9e326a1cb9af06b4 | 04d6ed51d5f8f7d4e670d036839aa3f60b8c0af8 | /homeschool/schools/migrations/0005_auto_20200605_1549.py | 7c6f149873f36adfef4aeb7a1c82b200540f88be | [
"MIT"
] | permissive | Lelouchyagami/homeschool | c38072ffc9f63646f164e642c5b337caa66141e1 | 49665aea487405c92c2af596b545f2553e766de8 | refs/heads/master | 2022-12-14T07:12:56.021661 | 2020-07-12T17:14:34 | 2020-07-12T17:14:34 | 252,213,902 | 0 | 0 | MIT | 2022-12-08T03:59:09 | 2020-04-01T15:26:48 | Python | UTF-8 | Python | false | false | 453 | py | # Generated by Django 3.0.2 on 2020-06-05 15:49
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('schools', '0004_auto_20200419_1716'),
]
operations = [
migrations.AlterField(
model_name='schoolyear',
name='days_of_week',
field=models.PositiveIntegerField(default=31, help_text='The days of the week when this runs'),
),
]
| [
"parth.agarwal@treebohotels.com"
] | parth.agarwal@treebohotels.com |
b8b637deeefc0e5a9a13d5bc104b042403a5b259 | 7022638eb436c3aadb898402558194972c939018 | /articles/urls.py | 3d07fd33c239fea6d240301ef3a279b2afc28a99 | [] | no_license | atabekdemurtaza/News | b75d15f968f67560db91ecdc2f7d1c73060446dc | 70fdf1037cdc74e56f66ce6fdf190a01c64693f5 | refs/heads/master | 2023-01-14T08:35:37.008297 | 2020-11-10T07:09:23 | 2020-11-10T07:09:23 | 311,574,057 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 446 | py | from django.urls import path
from . import views
urlpatterns = [
path('',views.ArticleListView.as_view(), name='article_list'),
path('<int:pk>/edit/', views.ArticleUpdateView.as_view(), name='article_edit'),
path('<int:pk>/', views.ArticleDetailView.as_view(), name='article_detail'),
path('<int:pk>/delete/', views.ArticleDeleteView.as_view(), name='article_delete'),
path('new/',views.ArticleCreateView.as_view(), name='article_new'),
] | [
"mychannel21war@gmail.com"
] | mychannel21war@gmail.com |
13a3080277c203a5274967e9bc5ba451c329bedb | a7e9728ed86926586971fc6afad82638a8d87114 | /class_dog_practice.py | 69b32fb19ed24517faf79fcd7aab1f8e5efe34cd | [] | no_license | hsuyuming/python_basic_recap | 19329c6f850e08c58d239c3286bccebb0ed9ec9f | 8a7045ccf47fec7ee3037998567a54d99d3f07cc | refs/heads/master | 2021-01-02T10:47:39.843179 | 2020-02-14T01:06:40 | 2020-02-14T01:06:40 | 239,585,797 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,116 | py | # 嘗試自定義一個表示狗的類(dog)
# 類是現實中的抽象!
# 屬性: name, age, gender, hight.....
# 方法:叫、咬、跑
class Dog():
"""
表示狗的類
"""
def __init__(self,**kwargs):
self.name = kwargs["name"]
self.age = kwargs["age"]
self.gender = kwargs["gender"]
self.height = kwargs["height"]
pass
def bark(self):
print("狗叫~~~~")
def bite(self):
print("咬人~~~~")
def run(self):
print("{} is running~~~~~~".format(self.name))
dog = Dog(name="dog", age=10, gender='male', height=30)
print(dog.name)
# 目前我們可以直接通過 object.property的方式來修改屬性的值,這種方式導致對象中的屬性可以隨意修改,非常不安全!!!因為值可以被任意修改不論對錯
dog.name = "cat"
dog.bark()
dog.run()
# 現在我們需要一種方式來增加數據的安全性
# 1.屬性不能任意修改(我讓你改你才能改,不讓你改你就不能改)
# 2.屬性不能修改為任意的值(年齡不能是負數)
#
| [
"abego452@gmail.com"
] | abego452@gmail.com |
9f2063baf4a9d75f36f3b0a4abad361699c2f0d1 | 05154751b040ea5390b66061796df7f568d7ca67 | /classification/taskGenerator.py | 47ea6b3c9b3b841f8c49470a9dd370d82ab76201 | [] | no_license | ScaramuzzinoGiovanna/Model-Agnostic_Meta-Learning | 85eab22ab08fabe2fb112e10bf0bf3ae75c0a0b4 | 04e4ea6c0d9db90593a0ac0500530d7553845d5a | refs/heads/master | 2023-07-07T22:44:44.375495 | 2021-09-04T10:56:06 | 2021-09-04T10:56:06 | 348,115,571 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,958 | py | import os
import random
import time
import cv2
import numpy as np
import tensorflow as tf
class TaskGenerator:
def __init__(self, dataset_name, n_way, shot_num, query_num, meta_batch_size):
'''
:param dataset_name: dataset name
:param n_way: a train task contains images from different N classes
:param shot_num: number of images used for meta-train
:param query_num: number of images used for meta-test
:param meta_batch_size: number of tasks in a batch
'''
self.dataset = dataset_name
self.meta_batch_size = meta_batch_size
self.n_way = n_way
self.shot_num = shot_num
self.query_num = query_num
if self.dataset == 'miniimagenet':
META_TRAIN_DIR = 'datasets/MiniImagenet/train'
META_VAL_DIR = 'datasets/MiniImagenet/val'
META_TEST_DIR = 'datasets/MiniImagenet/test'
self.metatrain_folders = [os.path.join(META_TRAIN_DIR, label) \
for label in os.listdir(META_TRAIN_DIR) \
if os.path.isdir(os.path.join(META_TRAIN_DIR, label))
]
self.metaval_folders = [os.path.join(META_VAL_DIR, label) \
for label in os.listdir(META_VAL_DIR) \
if os.path.isdir(os.path.join(META_VAL_DIR, label))
]
self.metatest_folders = [os.path.join(META_TEST_DIR, label) \
for label in os.listdir(META_TEST_DIR) \
if os.path.isdir(os.path.join(META_TEST_DIR, label))
]
if self.dataset == 'omniglot':
if self.shot_num != self.query_num:
self.query_num = self.shot_num
DATA_FOLDER = 'datasets/Omniglot'
character_folders = [
os.path.join(DATA_FOLDER, family, character) \
for family in os.listdir(DATA_FOLDER) \
if os.path.isdir(os.path.join(DATA_FOLDER, family)) \
for character in os.listdir(os.path.join(DATA_FOLDER, family))
]
# Shuffle dataset
random.seed(9314)
random.shuffle(character_folders)
n_val = 100
n_train = 1200 - n_val
self.metatrain_folders = character_folders[:n_train]
self.metaval_folders = character_folders[n_train:n_train + n_val]
self.metatest_folders = character_folders[n_train + n_val:]
# Record the relationship between image label and the folder name in each task
self.label_map = []
def get_batch(self, type):
# return batch set for type: train, val or test
if type == 'train':
folders = self.metatrain_folders
elif type == 'val':
folders = self.metaval_folders
elif type == 'test':
folders = self.metatest_folders
else:
raise Exception('error type dataset split selected')
batch_set = []
self.label_map = []
for i in range(self.meta_batch_size):
folders_idx = np.array(np.random.choice(len(folders), self.n_way, False))
sampled_folders = np.array(folders)[folders_idx].tolist()
labels = np.arange(self.n_way).tolist()
np.random.shuffle(labels)
folder_with_label = list(zip(sampled_folders, labels))
support_x, support_y, query_x, query_y = self.generate_set(folder_with_label)
batch_set.append((support_x, support_y, query_x, query_y))
return batch_set
def shuffle_set(self, set_x, set_y):
# Shuffle sets
set_seed = random.randint(0, 100)
random.seed(set_seed)
random.shuffle(set_x)
random.seed(set_seed)
random.shuffle(set_y)
return set_x, set_y
def extract_images(self, image_file):
# reads and preprocesses the images
if self.dataset == 'omniglot':
img = cv2.imread(image_file)
if img.shape[0] != 28 or img.shape[1] != 28:
img = cv2.resize(img, (28, 28), interpolation=cv2.INTER_AREA)
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY).astype(np.float32) / 255
dict_cv2_rotations = {'clock_90': cv2.ROTATE_90_CLOCKWISE, 'c_clock_90': cv2.ROTATE_90_COUNTERCLOCKWISE,
'rotate_180': cv2.ROTATE_180, 'no_rotation:': 0}
random_cv2_rotation_key = np.random.choice(list(dict_cv2_rotations.keys()), 1)[0]
if dict_cv2_rotations[random_cv2_rotation_key] != 0:
img_rotate = cv2.rotate(img, dict_cv2_rotations[random_cv2_rotation_key])
else:
img_rotate = img
return np.reshape(img_rotate, (28, 28, 1))
if self.dataset == 'miniimagenet':
img = cv2.imread(image_file).astype(np.float32) / 255
return img
def generate_set(self, folder_list):
# generate support and query sets ( dividing images and labels)
support_x = []
support_y = []
query_x = []
query_y = []
for i, elem in enumerate(folder_list):
folder = elem[0]
label = elem[1]
tmp_image_files = [os.path.join(folder, img) for img in
np.random.choice(os.listdir(folder), self.shot_num + self.query_num, False)]
tmp_support_x = random.sample(tmp_image_files, self.shot_num)
tmp_query_x = [img_file for img_file in tmp_image_files if img_file not in tmp_support_x]
support_x.extend([self.extract_images(img_file) for img_file in tmp_support_x])
query_x.extend([self.extract_images(img_file) for img_file in tmp_query_x])
# applied one hot to labels
support_y.extend([tf.one_hot(label, self.n_way) for _ in range(len(tmp_support_x))])
query_y.extend([tf.one_hot(label, self.n_way) for _ in range(len(tmp_query_x))])
# shuffle images and labels
support_x, support_y = self.shuffle_set(support_x, support_y)
query_x, query_y = self.shuffle_set(query_x, query_y)
# convert to tensor
support_x = tf.convert_to_tensor(np.array(support_x))
support_y = tf.convert_to_tensor(np.array(support_y))
query_x = tf.convert_to_tensor(np.array(query_x))
query_y = tf.convert_to_tensor(np.array(query_y))
return support_x, support_y, query_x, query_y
if __name__ == '__main__':
tasks = TaskGenerator(dataset_name="miniimagenet", n_way=5, shot_num=1, query_num=15, meta_batch_size=4)
start = time.time()
for i in range(4): # iterations
batch_set = tasks.get_batch('train')
x, y, _, _ = batch_set[0]
print(len(x[0]))
print(time.time() - start)
| [
"scaramuzzinogiovanna95@gmail.com"
] | scaramuzzinogiovanna95@gmail.com |
5e97ba660de0843bf2656343b8e3e5e92ca4cb19 | b1d971b2f78b592f4ec0bcdd652df9ba68146db6 | /simple blog viewer/port/projects/migrations/0001_initial.py | 1107e98586743000a445316c2d1d54c1864b53e7 | [] | no_license | Dyavathrocky/django | 315cc700138fae9b1a8e1e40b616f4843f4dd184 | 5496ec8f2eadebb6b1ec33aba6d48f2661103850 | refs/heads/master | 2020-04-20T07:39:31.077159 | 2019-12-05T03:57:00 | 2019-12-05T03:57:00 | 168,716,302 | 0 | 0 | null | 2019-12-27T11:55:07 | 2019-02-01T15:16:40 | Python | UTF-8 | Python | false | false | 692 | py | # Generated by Django 2.0.1 on 2019-12-05 02:48
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Project',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=100)),
('description', models.TextField()),
('technology', models.CharField(max_length=20)),
('image', models.FilePathField(path='/img')),
],
),
]
| [
"noreply@github.com"
] | noreply@github.com |
729ffe87975dbbc443c0865d023fd89fd57f7aa9 | 99e57f00fcaf4469c1c1b79f2d17176aaef9a790 | /sales_forecast/models/sale_allocation.py | cdce00e85495ffde7e478e89044f78b1410f3649 | [] | no_license | detian08/mcl | d007ffd0e869f3bd9a8c74bc8473119901f0de2a | 32d61148326c931aca0107c3894061773f287e33 | refs/heads/master | 2022-03-23T19:36:29.608645 | 2019-12-11T10:15:50 | 2019-12-11T10:15:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,678 | py | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import uuid
from itertools import groupby
from datetime import datetime, timedelta
from werkzeug.urls import url_encode
from odoo import api, fields, models, _,exceptions
from odoo.exceptions import UserError, AccessError
from odoo.osv import expression
from odoo.tools import float_is_zero, float_compare, DEFAULT_SERVER_DATETIME_FORMAT
from odoo.tools.misc import formatLang
from odoo.addons import decimal_precision as dp
class SaleAllocation(models.Model):
_name = "sale.forecaste"
z_period = fields.Selection([('Monthly','Monthly'),('weekly','weekly'),('Daily','Daily')],string ='Period',store = True)
z_from_date = fields.Date(string = 'From Date',store = True)
z_to_date = fields.Date(string = 'To Date',store = True)
z_allow_linw = fields.One2many('sale.forecaste.line', 'z_allow_id', string='allow Lines', copy=True, auto_join=True)
@api.constrains('z_to_date')
def _check_date(self):
for r in self:
if r.z_to_date < self.z_from_date:
raise models.ValidationError('To Date should be greater than From Date')
class SaleAllocationLine(models.Model):
_name = "sale.forecaste.line"
z_allow_id = fields.Many2one('sale.forecaste',string = 'allow id',store = True)
z_team_id = fields.Many2one('crm.team',string = 'Sale Team',store = True)
z_user_id = fields.Many2one('res.users',string = 'Sale Person',store = True)
z_product_id = fields.Many2one('product.product',string = 'Product',store = True)
z_forecasted_qnty = fields.Float(string = 'Forecasted quantity',store = True)
z_forecasted_val = fields.Float(string = 'Forecasted Value',store = True)
| [
"adarsh@prixgen.com"
] | adarsh@prixgen.com |
3b894bae0ee3d517642226dbc9f04bef13455b2e | f19aaf9948760a270dcc8ba2941ec2765b4fcfa4 | /apps/project_manager/migrations/0001_initial.py | 07b507b24f8f47a8b310af6c11a54321ab758df4 | [] | no_license | nawikart/DjangoOne | 68622d5a43904e8919d032e535f14b7734f2d1b8 | 67f180447b99228daccfbd1b520bdd87711fb3e3 | refs/heads/master | 2020-03-20T21:02:22.137632 | 2018-06-19T00:32:31 | 2018-06-19T00:32:31 | 137,720,240 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,400 | py | # Generated by Django 2.0.6 on 2018-06-18 04:40
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('user', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Project',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=64)),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='projects', to='user.User')),
],
),
migrations.CreateModel(
name='Task',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('description', models.CharField(max_length=256)),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('project', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='tasks', to='project_manager.Project')),
],
),
]
| [
"nawi@outpost-asia.com"
] | nawi@outpost-asia.com |
f80501d3f8c9302427833896662c728a94200144 | e846a43e85c9ba4e64898e1d2280c5e58ef04149 | /main.py | 30784a59d4111551608f9d4f5bb0744985e0da8f | [] | no_license | eeo/music-library | 2603ebaa9dfe3996c8d3e3e588d1bf5652c05102 | dda03c9b7e88a1945dd58913d590278d9c444d95 | refs/heads/master | 2021-01-10T12:29:49.630136 | 2016-02-02T17:38:34 | 2016-02-02T17:38:34 | 50,930,918 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 629 | py | import os
from mutagen.easyid3 import EasyID3, mutagen
from mutagen.easymp4 import EasyMP4
extensions = [".mp3"]
tags_names = ["TPE1", "TCON", "TALB", "TIT2", "TPUB", "TRCK"]
listdir = []
for x in os.listdir(path="."):
file = os.path.splitext(x)
if(file[1] in extensions) :
listdir.append(file)
print("LIST OF FILES:\t", listdir)
for file in listdir:
name = file[0]+file[1]
m = mutagen.File(name)
tag = dict.fromkeys(tags_names)
for t in tag.keys():
tag[t] = m[t]
print(t, tag[t])
os.rename(name, "{0} - {1}{2}".format(tag["TRCK"], tag["TIT2"], file[1]))
print("_______")
| [
"Елена Ермилова"
] | Елена Ермилова |
e6673d2280d9c7d4e10d0d700c8d5bae67520953 | 918ad606828781af3e4f38a3104222db67508104 | /utils/custom_utils.py | 6ae3b812faead19da5f230b5304a73e9f969c1a4 | [] | no_license | wang-zifu/Book-Generator | 39a4e7fe5e093998a74e361cb5eff2277062fefa | 47a3682613aeac271adfc52b0b77578c658245f2 | refs/heads/master | 2023-01-08T04:53:09.992085 | 2020-11-11T01:44:05 | 2020-11-11T01:44:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,547 | py | import torch
import nltk
import spacy
spacy_en = spacy.load('en')
def create_sentence(model, sentence, input_vocab, output_vocab,
device, max_length=50):
# Create tokens using spacy and everything in lower case (which is what our vocab is)
if type(sentence) == str:
tokens = [token.text.lower() for token in spacy_en(sentence)]
else:
tokens = [token.lower() for token in sentence]
# Add <SOS> and <EOS> in beginning and end respectively
tokens.insert(0, input_vocab.init_token)
tokens.append(input_vocab.eos_token)
# Go through each vocab token and convert to an index
text_to_indices = [input_vocab.vocab.stoi[token] for token in tokens]
# Convert to Tensor
sentence_tensor = torch.LongTensor(text_to_indices).unsqueeze(1).to(device)
outputs = [output_vocab.vocab.stoi["<sos>"]]
for i in range(max_length):
trg_tensor = torch.LongTensor(outputs).unsqueeze(1).to(device)
with torch.no_grad():
output = model(sentence_tensor, trg_tensor)
best_guess = output.argmax(2)[-1, :].item()
outputs.append(best_guess)
if best_guess == output_vocab.vocab.stoi["<eos>"]:
break
generated_sentence = [output_vocab.vocab.itos[idx] for idx in outputs]
# remove start token
return generated_sentence[1:]
def tokenize_text(text):
return [tok.text for tok in spacy_en.tokenizer(text)]
def modified_bleu(data, model, input_vocab, output_vocab, device):
targets = []
outputs = []
sum_bleu = 0
data_input = enumerate(data)
while len(targets) < 100:
example = data[np.random.randint(len(data))]
src = vars(example)["i"]
trg = vars(example)["o"]
prediction = create_sentence(model, src, input_vocab, output_vocab, device)
prediction = prediction[:-1] # remove <eos> token
targets.append([trg])
outputs.append(prediction)
return nltk.translate.bleu_score.corpus_bleu(targets, outputs,
smoothing_function=nltk.translate.bleu_score.SmoothingFunction().method4)
def foldify(data, k_folds):
batches = []
for _, batch in enumerate(data):
batches.append(batch)
output = []
chunk_size = len(batches)//k_folds
iterations = 0
for i in range(0, len(batches), chunk_size):
iterations += 1
if(iterations < k_folds):
output.append(batches[i:i+chunk_size])
else:
output.append(batches[i:-1])
break
return output | [
"kalebugalde@gmail.com"
] | kalebugalde@gmail.com |
056c60cae9d680e109d300727181d9f3ce385913 | 28a474b78561a1bff18dc09965abb8eb77f21386 | /course/forms.py | 6a4d7987fd3009c9521c7a326e7ff504e7c1a7e0 | [] | no_license | Ahmedkbbj/edusite | 7a992a7ed6395284ec01d542767dd54969cb0c1a | c3f4a29ff361c9684fe3c7e8cae19c5e34ec7e4c | refs/heads/master | 2022-06-11T07:56:08.098176 | 2020-05-09T10:31:24 | 2020-05-09T10:31:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 928 | py | from django import forms
from course.models import Course
class CreateCrseForm(forms.ModelForm):
class Meta:
model = Course
fields = ['ce_dptcls', 'ce_abb', 'ce_name']
widgets ={
'ce_abb': forms.TextInput(attrs={
'class':'form-control',
'id': 'ce_abb',
'required': True,
'placeholder':'Abbreviation'
}),
'ce_name': forms.TextInput(attrs={
'class':'form-control',
'id': 'ce_name',
'required': True,
'placeholder':'Course Name'
}),
}
class UpdateCrseForm(forms.ModelForm):
class Meta:
model = Course
fields = ['ce_dptcls', 'ce_abb', 'ce_name']
def save(self, commit=True):
crse = self.instance
crse.abb = self.cleaned_data['ce_abb']
crse.name = self.cleaned_data['ce_name']
if commit:
crse.save()
return crse
| [
"ytlamal@connect.ust.hk"
] | ytlamal@connect.ust.hk |
93d874fcb0503c0266f53ab533313773a94261c8 | 159d4ae61f4ca91d94e29e769697ff46d11ae4a4 | /venv/lib/python3.9/site-packages/nbclient/util.py | 9b672357b05be0de493e5f59054ae05e0086f448 | [
"MIT"
] | permissive | davidycliao/bisCrawler | 729db002afe10ae405306b9eed45b782e68eace8 | f42281f35b866b52e5860b6a062790ae8147a4a4 | refs/heads/main | 2023-05-24T00:41:50.224279 | 2023-01-22T23:17:51 | 2023-01-22T23:17:51 | 411,470,732 | 8 | 0 | MIT | 2023-02-09T16:28:24 | 2021-09-28T23:48:13 | Python | UTF-8 | Python | false | false | 3,205 | py | """General utility methods"""
# Copyright (c) Jupyter Development Team.
# Distributed under the terms of the Modified BSD License.
import asyncio
import sys
import inspect
from typing import Callable, Awaitable, Any, Union
def check_ipython() -> None:
# original from vaex/asyncio.py
IPython = sys.modules.get('IPython')
if IPython:
version_str = IPython.__version__ # type: ignore
# We get rid of any trailing ".dev"
version_str = version_str.replace(".dev", "")
IPython_version = tuple(map(int, version_str.split('.')))
if IPython_version < (7, 0, 0):
raise RuntimeError(f'You are using IPython {IPython.__version__} ' # type: ignore
'while we require 7.0.0+, please update IPython')
def check_patch_tornado() -> None:
"""If tornado is imported, add the patched asyncio.Future to its tuple of acceptable Futures"""
# original from vaex/asyncio.py
if 'tornado' in sys.modules:
import tornado.concurrent # type: ignore
if asyncio.Future not in tornado.concurrent.FUTURES:
tornado.concurrent.FUTURES = \
tornado.concurrent.FUTURES + (asyncio.Future, ) # type: ignore
def just_run(coro: Awaitable) -> Any:
"""Make the coroutine run, even if there is an event loop running (using nest_asyncio)"""
# original from vaex/asyncio.py
loop = asyncio._get_running_loop()
if loop is None:
had_running_loop = False
try:
loop = asyncio.get_event_loop()
except RuntimeError:
# we can still get 'There is no current event loop in ...'
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
else:
had_running_loop = True
if had_running_loop:
# if there is a running loop, we patch using nest_asyncio
# to have reentrant event loops
check_ipython()
import nest_asyncio
nest_asyncio.apply()
check_patch_tornado()
return loop.run_until_complete(coro)
def run_sync(coro: Callable) -> Callable:
"""Runs a coroutine and blocks until it has executed.
An event loop is created if no one already exists. If an event loop is
already running, this event loop execution is nested into the already
running one if `nest_asyncio` is set to True.
Parameters
----------
coro : coroutine
The coroutine to be executed.
Returns
-------
result :
Whatever the coroutine returns.
"""
def wrapped(*args, **kwargs):
return just_run(coro(*args, **kwargs))
wrapped.__doc__ = coro.__doc__
return wrapped
async def ensure_async(obj: Union[Awaitable, Any]) -> Any:
"""Convert a non-awaitable object to a coroutine if needed,
and await it if it was not already awaited.
"""
if inspect.isawaitable(obj):
try:
result = await obj
except RuntimeError as e:
if str(e) == 'cannot reuse already awaited coroutine':
# obj is already the coroutine's result
return obj
raise
return result
# obj doesn't need to be awaited
return obj
| [
"davidycliao@gmail.com"
] | davidycliao@gmail.com |
f98b212f7684a451a65f3afe7b8e7ed58e3ffeb3 | c8a31cb40807a168e34292c61f80b4c0a71211ed | /camera.py | 08dbac890c1ee916512ad7e693c6aa30cffc5f3b | [] | no_license | rephus/wearable-camera | b4c74b2cd75b93ed681ce536bef756b23cf5aa93 | f23f592652d8a8f38f3c91c551e95adbc15c5331 | refs/heads/master | 2021-01-22T21:22:26.512959 | 2017-03-18T19:26:27 | 2017-03-18T19:26:27 | 85,418,200 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 819 | py | # https://www.raspberrypi.org/learning/getting-started-with-picamera/worksheet/
from picamera import PiCamera
from time import sleep, time
import datetime
FREQUENCY = 30 # timelapse frequency, in seconds
FOLDER = "/home/pi/photos" # Folder to store photos
camera = PiCamera()
# Suggested settings
camera.resolution = (3032, 2008) # 6mpx
camera.resolution = (2560, 1920) #5mpx
camera.framerate = 15
camera.start_preview()
sleep(2) # Sleep 2 additional seconds to give time to start
while True:
# Sample: A picture will be saved in $FOLDER/picture17-03-18_17:34.jpg
timestamp = datetime.datetime.now().strftime('%y-%m-%d_%H:%M:%S')
picture = '{}/picture{}.jpg'.format(FOLDER,timestamp)
print("Take picture {}".format(picture))
camera.capture(picture)
sleep(FREQUENCY)
camera.stop_preview()
| [
"rephus@gmail.com"
] | rephus@gmail.com |
55e40b3bf8071fca05b6467d0f35479817d7a0dd | 2f98aa7e5bfc2fc5ef25e4d5cfa1d7802e3a7fae | /python/python_11546.py | 1fc582462bd10e1e682a285ef7047c01e969c822 | [] | no_license | AK-1121/code_extraction | cc812b6832b112e3ffcc2bb7eb4237fd85c88c01 | 5297a4a3aab3bb37efa24a89636935da04a1f8b6 | refs/heads/master | 2020-05-23T08:04:11.789141 | 2015-10-22T19:19:40 | 2015-10-22T19:19:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 20 | py | # export PYTHONPATH
| [
"ubuntu@ip-172-31-7-228.us-west-2.compute.internal"
] | ubuntu@ip-172-31-7-228.us-west-2.compute.internal |
996595a07a75ebce547c75c4db7a4e7010eb959f | f7f1dfc7e14beae90e78bbd29c24ed488dfff6dd | /new_benchmarks/bm_zlib.py | c0bdd8611abb55a46c12d7e70a024daef55f560c | [] | no_license | bhavishyagopesh/gsoc_2017 | 573f013ddf40663e8f47657dd3495bb5abaf3bd4 | 9a868f49f0f6767ba4dd7432c535fc92d2541e07 | refs/heads/master | 2020-01-19T21:13:57.883162 | 2017-08-24T04:12:27 | 2017-08-24T04:12:27 | 94,213,554 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,381 | py | """
Benchmark zlib module.
"""
import perf
from six.moves import xrange
import zlib
import binascii
def add_cmdline_args(cmd, args):
if args.benchmark:
cmd.append(args.benchmark)
DATA = b'This is to benchmark zlib.'*100000
COMPRESSED = zlib.compress(DATA)
DECOMPRESSED = zlib.decompress(COMPRESSED)
def bench_compress(loops):
range_it = xrange(loops)
t0 = perf.perf_counter()
for _ in range_it:
zlib.compress(DATA)
dt = perf.perf_counter() - t0
return dt
def bench_decompress(loops):
range_it = xrange(loops)
t0 = perf.perf_counter()
for _ in range_it:
zlib.decompress(COMPRESSED)
dt = perf.perf_counter() - t0
return dt
BENCHMARKS = {
"compress": bench_compress,
"decompress": bench_decompress,
}
if __name__ == "__main__":
runner = perf.Runner(add_cmdline_args=add_cmdline_args)
runner.metadata['description'] = "Performance of the zlib module"
parser = runner.argparser
parser.add_argument("benchmark", nargs='?', choices=sorted(BENCHMARKS))
options = runner.parse_args()
if options.benchmark:
benchmarks = (options.benchmark,)
else:
benchmarks = sorted(BENCHMARKS)
for bench in benchmarks:
name = '%s' % bench
bench_func = BENCHMARKS[bench]
runner.bench_time_func(name, bench_func, inner_loops=10)
| [
"bhavishyagopesh@gmail.com"
] | bhavishyagopesh@gmail.com |
263af1eb8f7e2a492fd18367b1ef7f782b64d6c3 | d57db248cad7af0fc9685ac74d6dc068b41dc907 | /pyango/test1/booktest/migrations/0002_humaninfo.py | b7f5a9536d2e7b4de7fe3ea3717fb85f63285ec2 | [] | no_license | ankle7766/pyango | d77408588c8f2e120cfa91707f5a9959b74bca70 | e98388befec3c8404da0be850394849a6ed0bb07 | refs/heads/main | 2023-05-07T14:47:40.832224 | 2021-06-04T14:09:44 | 2021-06-04T14:09:44 | 371,297,621 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 747 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('booktest', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='HumanInfo',
fields=[
('id', models.AutoField(verbose_name='ID', primary_key=True, serialize=False, auto_created=True)),
('hname', models.CharField(max_length=20)),
('hgender', models.BooleanField(default=False)),
('hcomment', models.CharField(max_length=128)),
('hbook', models.ForeignKey(to='booktest.BookInfo',on_delete=models.CASCADE)),
],
),
]
| [
"pan7766118@gmail.com"
] | pan7766118@gmail.com |
b48c3b08fd97bb52892a0c5d3d04514672b1908c | 27ee9bddfbb9186bf0dd6b370dd5854f47350766 | /fmdl_algo/tests/tests_FMDLAlgoUtils/test_FMDLAlgoUtils_getApproximatedRoiBoundaryPoints.py | 9459c208b6710ed25f68827df8e8f7115e10289b | [] | no_license | hooman67/advanceDataExplorationAndAugmentationNotebooks | 275df0f5b24c6f41c2e14d30937d2498c60d689b | 0ba95d2508c287e7bb0469bc13927ed10eb4bd81 | refs/heads/master | 2022-12-11T02:43:31.114609 | 2020-03-03T23:22:39 | 2020-03-03T23:22:39 | 167,065,467 | 0 | 0 | null | 2022-11-22T03:36:10 | 2019-01-22T20:52:27 | Jupyter Notebook | UTF-8 | Python | false | false | 2,291 | py | from fmdlAlgo.FMDLAlgo import FMDLData
from utils.FMDLAlgoUtils import getApproximatedRoiBoundaryPoints
import unittest
import numpy as np
class Test_FMDLAlgoUtils_getApproximatedRoiBoundaryPoints(unittest.TestCase):
groundTruth_maxContour = np.load(
'tests/networksAndImages-forUnitTesting/roiBoundaryContour_FMDL_2018.04.30_19.11.20.npy')
roi_boundary_points_reduction_factor = 0.01
groundTruth_approximated_roi_boundary = np.load(
'tests/networksAndImages-forUnitTesting/approxRoiBoundary_FMDL_2018.04.30_19.11.20.npy')
groundTruth_maxContour_intersecting = np.load(
'tests/networksAndImages-forUnitTesting/selfIntersectingROI_maxContour.npy')
groundTruth_approximated_roi_boundary_intersecting = np.load('tests/networksAndImages-forUnitTesting/selfIntersectingROI_approxRoiBoundary.npy')
def test_outputs_noIntersection(self):
approximated_roi_boundary = getApproximatedRoiBoundaryPoints(self.groundTruth_maxContour, self.roi_boundary_points_reduction_factor)
self.assertTrue(np.allclose(approximated_roi_boundary, self.groundTruth_approximated_roi_boundary, atol=1))
def test_outputs_intersection(self):
approximated_roi_boundary = getApproximatedRoiBoundaryPoints(self.groundTruth_maxContour_intersecting, self.roi_boundary_points_reduction_factor)
self.assertTrue(np.allclose(approximated_roi_boundary, self.groundTruth_approximated_roi_boundary_intersecting, atol=1))
def test_missingApproxRoi(self):
approximated_roi_boundary = getApproximatedRoiBoundaryPoints([], self.roi_boundary_points_reduction_factor)
self.assertTrue(np.allclose(approximated_roi_boundary, [], atol=1))
def test_badApproxRoi1(self):
approximated_roi_boundary = getApproximatedRoiBoundaryPoints(np.zeros(100), self.roi_boundary_points_reduction_factor)
self.assertTrue(np.allclose(approximated_roi_boundary, [], atol=1))
def test_badApproxRoi2(self):
approximated_roi_boundary = getApproximatedRoiBoundaryPoints([1,2,3,4,5], self.roi_boundary_points_reduction_factor)
self.assertTrue(np.allclose(approximated_roi_boundary, [], atol=1)) | [
"houman_sh2001@hotmail.com"
] | houman_sh2001@hotmail.com |
87664fa96f5619cf491af3c15187044ff7fe0dc3 | 4fc78edd1c0d18621d9cb91e79ff1f9d6fad8fa4 | /digest.py | 99f280463e8c39562dcf7849ffa58f02cb191b76 | [] | no_license | plesner/avg | 0baf1ebb1bc39d57fb1df1cd254bf9057ef5a63c | dc5ae89d0755a28cb50e29ae3c93df203c690d6c | refs/heads/master | 2021-01-19T18:51:24.844074 | 2012-12-30T09:49:43 | 2012-12-30T09:49:43 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,471 | py | #!/usr/bin/python
import sys
class Stream(object):
def __init__(self, str):
self.cursor = 0
self.parts = str.split(" ")
def has_more(self):
return self.cursor < len(self.parts)
def next(self):
result = self.peek()
self.cursor += 1
return result
def peek(self):
return self.parts[self.cursor]
x = 0.0
y = 0.0
start_x = None
start_y = None
def main(args):
global x, y
assert len(args) == 1
stream = Stream(open(args[0], "rt").read())
def format_float(val):
return "e(%s)" % val
def parse_coord(coord):
[x_str, y_str] = coord.split(",")
return (float(x_str), float(y_str))
def update_start():
global start_x, start_y
if start_x is None:
start_x = x
start_y = y
def emit(is_rel, draws, op, *args):
global x, y, start_x, start_y
if draws:
update_start()
else:
start_x = start_y = None
def process_coord(str):
(vx, vy) = parse_coord(str)
if is_rel:
vx += x
vy += y
return "%s, %s" % (format_float(vx), format_float(vy))
print " %s(%s, %s, %s)," % (op, format_float(x), format_float(y), ", ".join(map(process_coord, args)))
(dx, dy) = parse_coord(args[-1])
if is_rel:
x += dx
y += dy
else:
x = dx
y = dy
def emit_straight(is_rel, is_vertical, op, arg):
global x, y
update_start()
d = float(arg)
nx = x
ny = y
if is_rel:
if is_vertical:
ny += d
else:
nx += d
else:
if is_vertical:
ny = d
else:
nx = d
print " %s(%s, %s, %s, %s)," % (op, format_float(x), format_float(y), format_float(nx), format_float(ny))
x = nx
y = ny
while stream.has_more():
instr = stream.next()
if instr == "m":
emit(True, False, "move_to", stream.next())
while stream.has_more() and len(stream.peek()) > 1:
emit(True, True, "line_to", stream.next())
elif instr == "M":
emit(False, False, "move_to", stream.next())
while stream.has_more() and len(stream.peek()) > 1:
emit(False, True, "line_to", stream.next())
elif instr == "L":
while stream.has_more() and len(stream.peek()) > 1:
emit(False, True, "line_to", stream.next())
elif instr == "l":
while stream.has_more() and len(stream.peek()) > 1:
emit(True, True, "line_to", stream.next())
elif instr == "v":
while stream.has_more() and len(stream.peek()) > 1:
emit_straight(True, True, "line_to", stream.next())
elif instr == "V":
while stream.has_more() and len(stream.peek()) > 1:
emit_straight(False, True, "line_to", stream.next())
elif instr == "h":
while stream.has_more() and len(stream.peek()) > 1:
emit_straight(True, False, "line_to", stream.next())
elif instr == "h":
while stream.has_more() and len(stream.peek()) > 1:
emit_straight(False, False, "line_to", stream.next())
elif instr == "c":
while stream.has_more() and len(stream.peek()) > 1:
emit(True, True, "curve_to", stream.next(), stream.next(), stream.next())
elif instr == "C":
while stream.has_more() and len(stream.peek()) > 1:
emit(False, True, "curve_to", stream.next(), stream.next(), stream.next())
elif instr == "z":
emit(False, False, "line_to", "%s,%s" % (start_x, start_y))
else:
print instr
print " end()"
if __name__ == "__main__":
main(sys.argv[1:])
| [
"christian.plesner.hansen@gmail.com"
] | christian.plesner.hansen@gmail.com |
a9879bdaadff217daa50056b96de0890cb3a4886 | 21d21402c70d8a95d9a4b492078e3fb36e2c9af1 | /kevin_schmitt/Django/survey_form/apps/amadon/urls.py | 8b49a29b4ba010e799b37331a5f06798327eed10 | [] | no_license | hmp36/python_aug_2017 | df897a1b0aa161300386192d48e3fcac9eb495c8 | 8747429b91b09349e5b5469d8932593b06f645e1 | refs/heads/master | 2021-04-29T23:16:50.149226 | 2017-09-11T20:14:37 | 2017-09-11T20:14:37 | 121,552,666 | 1 | 0 | null | 2018-02-14T19:34:54 | 2018-02-14T19:34:54 | null | UTF-8 | Python | false | false | 295 | py | from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^$', views.index),
url(r'^buy$', views.buy),
url(r'^receipt$', views.receipt),
url(r'^reset$', views.reset),
]
# (?P<num>\d+) is the variable for any number typed into the URI | [
"kschmit1@aisd.net"
] | kschmit1@aisd.net |
d8c6d77075894fb8fdf24de873d53fc724eb82a2 | 672b57ee6ad36cab8eff4802c423d5f836ebcab0 | /scraper/models.py | 6e27a3f4f8b2b808e7396ce290cb0582e0400c0f | [] | no_license | stanislavn/thrustfeed | a6b76dd485c80c1a16156930d078eb67267ec30d | b6a79d11b777048ff4f93629eea70c161f612d33 | refs/heads/master | 2023-02-18T19:22:25.228888 | 2021-01-24T13:08:26 | 2021-01-24T13:08:26 | 332,446,445 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,539 | py | from django.db import models
class Product(models.Model):
availability = models.CharField(max_length=200, blank = True, null=True)
itemCondition = models.CharField(max_length=200, blank = True, null=True)
price = models.FloatField(max_length=200, blank = True, null=True)
priceCurrency = models.CharField(max_length=200, blank = True, null=True)
url = models.URLField(max_length=2200, unique=True)
brand = models.CharField(max_length=200, blank = True, null=True)
color = models.CharField(max_length=900, blank = True, null=True)
depth = models.CharField(max_length=400, blank = True, null=True)
gtin12 = models.CharField(max_length=200, blank = True, null=True)
logo = models.URLField(max_length=2200, blank = True, null=True)
manufacturer = models.CharField(max_length=900, blank = True, null=True)
mpn = models.CharField(max_length=900, blank = True, null=True)
sku = models.CharField(max_length=900, blank = True, null=True)
alternateName = models.CharField(max_length=900, blank = True, null=True)
description = models.CharField(max_length=2200, blank = True, null=True)
image = models.URLField(max_length=2200, blank = True, null=True)
name = models.CharField(max_length=900, blank = True, null=True)
compatible = models.CharField(max_length=2200, blank = True, null=True)
def __str__(self):
return self.name | [
"29331439+stanislavn@users.noreply.github.com"
] | 29331439+stanislavn@users.noreply.github.com |
50e66a565a5b022effb51c22c66abcd32f5971c5 | 04303c090c6465b64f06205003ff246922bf3131 | /modules/ascii.py | 3389b29bfaa558bdffb5acdcf66be6fcb322f241 | [
"MIT"
] | permissive | merlinfuchs/clancy | 0845ca58f8e43e946099b529bc1b09ca2f0de84a | 7c08025a47a48ae2fa90ce8916fb32fa6311db68 | refs/heads/main | 2023-03-10T21:38:37.047609 | 2021-02-22T21:48:46 | 2021-02-22T21:48:46 | 337,463,255 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,785 | py | from dbots.cmd import *
from util import *
APPEND_COMMANDS = [
("flipall", "┻━┻︵ \(°□°)/ ︵ ┻━┻"),
("rageflip", "(ノಠ益ಠ)ノ彡┻━┻"),
("bearflip", "ʕノ•ᴥ•ʔノ ︵ ┻━┻"),
("jakeflip", "(┛❍ᴥ❍)┛彡┻━┻"),
("flipbattle", "(╯°□°)╯︵ ┻━┻ ︵ ╯(°□° ╯)"),
("magicflip", "(/¯◡ ‿ ◡)/¯ ~ ┻━┻"),
("flipdude", "(╯°Д°)╯︵ /(.□ . \)"),
("herculesflip", "(/ .□.)\ ︵╰(゜Д゜)╯︵ /(.□. \)"),
("happyflip", "┻━┻ ︵ ლ(⌒-⌒ლ)"),
("fucktable", "(┛◉Д◉) ┛彡┻━┻"),
("fixtable", "┬──┬ ¯\_(ツ)"),
("smirk", "¬‿¬"),
("creapy", "(◕‿◕)"),
("disapprove", "ಠ_ಠ"),
("lenny", "( ˘ ͜ʖ ˘)"),
("cool", "(⌐■_■)"),
("smug", "⚈ ̫ ⚈"),
("fight", "(ง ͠° ͟ل͜ ͡°)ง"),
("dead", "(✖╭╮✖)"),
("glomp", "(づ ̄ ³ ̄)づ"),
("bearhug", "ʕっ• ᴥ • ʔっ")
]
class AsciiModule(Module):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def make_callable(_value):
async def cmd_func(ctx, message=""):
await send_webhook_response(ctx, f"{message} {_value}")
return cmd_func
for name, value in APPEND_COMMANDS:
self.commands.append(Command(
name=name,
description=f"Append {value} to your message.",
callable=make_callable(value),
options=[CommandOption(
type=CommandOptionType.STRING,
name="message",
description="Your message",
required=False
)]
))
| [
"merlinfuchs2001@gmail.com"
] | merlinfuchs2001@gmail.com |
458055eac4e2f8e25e954abafd1524e2dbd765d2 | d7f98cf5b4a20d3efa05d32726cf2528d27104b3 | /Chapter 4/4-1.py | 382b39a5e2dd6082971d18c03baaf1cdb7259a74 | [] | no_license | BeanSparrow/MCIS540hunt | 40e797a138d7fb02c4e6cddcde3c999640c86e73 | cf89027da9e9dfabf64520035b10c630e39c8072 | refs/heads/main | 2023-04-15T20:26:54.294912 | 2021-05-03T00:37:53 | 2021-05-03T00:37:53 | 350,469,722 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,953 | py | # Exercise 4-1
import math
try:
# see if Swampy is installed as a package
from swampy.TurtleWorld import *
except ImportError:
# otherwise see if the modules are on the PYTHONPATH
from TurtleWorld import *
def square(t, length):
"""Draws a square with sides of the given length.
Returns the Turtle to the starting position and location.
"""
for i in range(4):
fd(t, length)
lt(t)
def polyline(t, n, length, angle):
"""Draws n line segments.
t: Turtle object
n: number of line segments
length: length of each segment
angle: degrees between segments
"""
for i in range(n):
fd(t, length)
lt(t, angle)
def polygon(t, n, length):
"""Draws a polygon with n sides.
t: Turtle
n: number of sides
length: length of each side.
"""
angle = 360.0/n
polyline(t, n, length, angle)
def arc(t, r, angle):
"""Draws an arc with the given radius and angle.
t: Turtle
r: radius
angle: angle subtended by the arc, in degrees
"""
arc_length = 2 * math.pi * r * abs(angle) / 360
n = int(arc_length / 4) + 1
step_length = arc_length / n
step_angle = float(angle) / n
# making a slight left turn before starting reduces
# the error caused by the linear approximation of the arc
lt(t, step_angle/2)
polyline(t, n, step_length, step_angle)
rt(t, step_angle/2)
def circle(t, r):
"""Draws a circle with the given radius.
t: Turtle
r: radius
"""
arc(t, r, 360)
# the following condition checks whether we are
# running as a script, in which case run the test code,
# or being imported, in which case don't.
if __name__ == '__main__':
world = TurtleWorld()
bob = Turtle()
bob.delay = 0.001
# draw a circle centered on the origin
radius = 100
pu(bob)
fd(bob, radius)
lt(bob)
pd(bob)
circle(bob, radius)
wait_for_user() | [
"stvhunt132@gmail.com"
] | stvhunt132@gmail.com |
16f08340f13e5ef8e599df67e8d5494e198b58e8 | cb8c63aea91220a9272498d5ea6cca0a0738b16a | /numberfun.py | 1215b93e182f08eb2efaf9e5f70760eb790eb933 | [] | no_license | akantuni/Kattis | 1265de95bfe507ce7b50451a16f19720b86bef44 | 12f31bb31747096bf157fcf6b1f9242d91654533 | refs/heads/master | 2021-12-14T11:18:27.723045 | 2021-12-11T05:43:37 | 2021-12-11T05:43:37 | 111,472,667 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 387 | py | n = int(input())
for i in range(n):
nums = input().split()
nums = [int(num) for num in nums]
a, b, c = nums
if a + b == c:
print("Possible")
elif a * b == c:
print("Possible")
elif a - b == c or b - a == c:
print("Possible")
elif a == c * b or b == c * a:
print("Possible")
else:
print("Impossible")
a, b = b, a
| [
"akantuni@gmail.com"
] | akantuni@gmail.com |
89a3fb0509cb0063cf07ef8459140ca03e76fb17 | 36b72d1f3c6dd8ff842068052097498efff08f9d | /BankAccount.py | db8df9c8054c7e5b2c4bded6fdf6cd6ded9e3413 | [] | no_license | KuponoK/bank-account | 5423a02bbb00fc1989dedd31c82b61149e865d80 | 9713b3be7e9a5cbe5d3cac1fbf3fdcdf632782bc | refs/heads/main | 2023-01-11T23:40:18.895932 | 2020-11-10T02:31:52 | 2020-11-10T02:31:52 | 311,468,516 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,347 | py | # bank account class
class BankAccount:
def __init__(self, full_name, account_number, routing_number, balance):
self.__full_name = full_name
self.__account_number = account_number
self.__routing_number = routing_number
self.__balance = balance
# deposit function
def deposit(self, amount):
self.__balance += amount
# withdraw function
def withdraw(self, amount):
if (amount <= self.__balance):
self.__balance -= amount
print(f'Amount Withdrawn: {amount}')
else:
print('Insufficient funds')
self.__balance -= 10
# get_balance method
def get_balance(self):
print(f'You have a balance of {self.__balance}')
return self.__balance
# add_intrest method
def add_interest(self):
interest = self.__balance * 0.00083
self.__balance += round(interest, 2)
# print_receipt method
def print_receipt(self):
print(f'{self.__full_name}\n Account No: {self.__account_number}\n Routing No: {self.__routing_number}\n Balance: {self.__balance}')
# 3 different bank account examples
Po_Kealiinohomoku = BankAccount('Po Kealiinohomoku', 12345678, 87654321, 100)
Po_Kealiinohomoku.deposit(20.50)
Po_Kealiinohomoku.withdraw(10)
Po_Kealiinohomoku.add_interest()
Po_Kealiinohomoku.get_balance() | [
"kuponokealiinohomoku@AthinaspleWatch.localdomain"
] | kuponokealiinohomoku@AthinaspleWatch.localdomain |
79caaf5bfb2be6e088227f82e6450840e49f28d5 | aed32b1f500f0096e480f559b196adad88b15a8d | /python/junior/math.py | 5857f55794951485796a0519ebb5317ec26dd751 | [] | no_license | fypp/leetCode | 9f95b75fe1f553a6ccc9480d4a1059ed853da6e1 | 2bb797cb0d55eca02807574d6fe58761f9582274 | refs/heads/master | 2021-04-03T01:26:32.191371 | 2018-04-21T02:16:46 | 2018-04-21T02:16:46 | 124,632,407 | 0 | 0 | null | 2018-04-21T02:16:47 | 2018-03-10T07:19:36 | null | UTF-8 | Python | false | false | 2,189 | py | class Solution(object):
def countPrimes(self, n):
"""
:type n: int
:rtype: int
"""
if n < 2:
return 0
composite_dict = {}
prime_dict = {}
primes_count = 0
for i in range(2, n):
if i not in composite_dict:
primes_count += 1
prime_dict[primes_count] = i
composite_dict[i] = 1
j = 1
while j <= primes_count and i * prime_dict[j] < n:
composite_dict[i * prime_dict[j]] = 1
if i % prime_dict[j] == 0:
break
j += 1
return primes_count
def isPowerOfThree(self, n):
"""
:type n: int
:rtype: bool
"""
if n == 1:
return True
while n > 3:
if n % 3 != 0:
return False
n /= 3
if n == 3:
return True
else:
return False
def fizzBuzz(self, n):
"""
:type n: int
:rtype: List[str]
"""
result = []
for i in range(n):
if (i + 1) % 3 == 0 and (i + 1) % 5 == 0:
result.append("FizzBuzz")
elif (i + 1) % 3 == 0:
result.append("Fizz")
elif (i + 1) % 5 == 0:
result.append("Buzz")
else:
result.append(str(i + 1))
return result
def romanToInt(self, s):
"""
:type s: str
:rtype: int
"""
value_dict = {"I": 1, "V": 5, "X": 10, "L": 50, "C": 100, "D": 500, "M": 1000}
str_list = list(s)
len_list = len(str_list)
result = 0
if len_list ==0:
return None
elif len_list == 1:
return value_dict[str_list[0]]
for i in range(1, len_list):
value_second = value_dict[str_list[i]]
value_fist = value_dict[str_list[i-1]]
if value_fist >= value_second:
result += value_fist
else:
result -= value_fist
result += value_dict[str_list[-1]]
return result
| [
"chenshifeng@chenshifeng.lan"
] | chenshifeng@chenshifeng.lan |
fbd79e2ea249c672bce152386465c6b69ec3b0fa | c67d38c1417c6579175ab4716ac0d84441b5aaa6 | /format_check.py | d7e2d8fa0bdca686a461112932e8dd8839d4259f | [] | no_license | huangy10/AutoUpdateArtworks | 348ff549f40b1c895e186a8a6753c071592e70d0 | eae91f486213624619ad9481351bac22af6df0d1 | refs/heads/master | 2021-01-20T06:22:30.394478 | 2017-03-07T04:51:21 | 2017-03-07T04:51:21 | 83,503,320 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 292 | py | # check the format of file names to guarantee the integrity
def check_folder_structure(root_path):
pass
def check_data_integrity(root_path):
"""
Check the integrity of image set
:param root_path: the root path where images are stored
:return: Boolean
"""
pass
| [
"woody1573@163.com"
] | woody1573@163.com |
2c1d3c28bd4ad211a96d33919d83d0e2e68e138a | ef44ff6f41eb2946615686ffe7d2cde866587821 | /chinaapi/qq/weibo/open.py | c408ae58ffbd251a984fb771c0f3ebdb8920ca18 | [] | no_license | randy-ran/ChinaAPI | 6130b050d147d8ddb26f9dc4d8af517ae68af5cf | afb1dd9aa5d0ac61fec1cab1385f92319fe7d83e | refs/heads/master | 2021-01-17T21:46:58.125522 | 2014-01-20T11:21:20 | 2014-01-20T11:21:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,200 | py | # coding=utf-8
from chinaapi.open import ClientBase, Method, OAuth2Base, Token as TokenBase, App
from chinaapi.exceptions import InvalidApi, ApiResponseError
from chinaapi.utils import parse_querystring
IS_POST_METHOD = {
'user': lambda m: m in ['verify'],
'friends': lambda m: m in ['addspecial', 'delspecial', 'addblacklist', 'delblacklist'],
't': lambda m: m in ['re_add', 'reply', 'comment', 'like', 'unlike'],
'fav': lambda m: m in ['addht', 'addt', 'delht', 'delt'],
'vote': lambda m: m in ['createvote', 'vote'],
'list': lambda m: m != 'timeline', # 只有timeline接口是读接口,其他全是写接口
'lbs': lambda m: True # 全是写接口
}
DEFAULT_IS_POST_METHOD = lambda m: False
RET = {
0: u'成功返回',
1: u'参数错误',
2: u'频率受限',
3: u'鉴权失败',
4: u'服务器内部错误',
5: u'用户错误',
6: u'未注册微博',
7: u'未实名认证'
}
def parse(response):
r = response.json_dict()
if 'ret' in r and r.ret != 0:
raise ApiResponseError(response, r.ret, RET.get(r.ret, u''), r.get('errcode', ''), r.get('msg', ''))
if 'data' in r:
return r.data
return r
class Token(TokenBase):
"""
openid:用户统一标识,可以唯一标识一个用户
openkey:与openid对应的用户key,是验证openid身份的验证密钥
"""
def __init__(self, access_token=None, expires_in=None, refresh_token=None, **kwargs):
super(Token, self).__init__(access_token, expires_in, refresh_token, **kwargs)
self.openid = kwargs.pop('openid', None)
self.openkey = kwargs.pop('openkey', None)
self.name = kwargs.pop('name', None)
class Client(ClientBase):
#写接口
_post_methods = ['add', 'del', 'create', 'delete', 'update', 'upload']
def __init__(self, app=App(), token=Token(), openid=None, clientip=None):
super(Client, self).__init__(app, token)
self.openid = openid
self.clientip = clientip
def _parse_response(self, response):
return parse(response)
def _prepare_url(self, segments, queries):
"""
因del为Python保留字,无法作为方法名,需将del替换为delete,并在此处进行反向转换。
"""
if segments[-1] == 'delete' and segments[-2] != 'list': # list本身有delete方法,需排除
segments[-1] = 'del'
return 'https://open.t.qq.com/api/{0}'.format('/'.join(segments))
def _prepare_method(self, segments):
if len(segments) != 2:
raise InvalidApi(self._prepare_url(segments, None))
model, method = tuple([segment.lower() for segment in segments])
if method.split('_')[0] in self._post_methods:
return Method.POST
elif IS_POST_METHOD.get(model, DEFAULT_IS_POST_METHOD)(method):
return Method.POST
return Method.GET
def _prepare_queries(self, queries):
queries.update(oauth_version='2.a', format='json', oauth_consumer_key=self.app.key)
if not self.token.is_expires:
queries['access_token'] = self.token.access_token
if self.openid:
queries['openid'] = self.openid
if 'clientip' not in queries and self.clientip:
queries['clientip'] = self.clientip
class OAuth2(OAuth2Base):
AUTH_URL = 'https://open.t.qq.com/cgi-bin/oauth2/authorize'
TOKEN_URL = 'https://open.t.qq.com/cgi-bin/oauth2/access_token'
def __init__(self, app):
super(OAuth2, self).__init__(app)
def _parse_token(self, response):
data = parse_querystring(response.text)
if 'errorCode' in data:
raise ApiResponseError(response, data['errorCode'], data.get('errorMsg', '').strip("'"))
return Token(**data)
def revoke(self, **kwargs):
""" 取消授权
请求参数:oauth或openid&openkey标准参数
返回是否成功取消
"""
kwargs['format'] = 'json'
response = self._session.get('http://open.t.qq.com/api/auth/revoke_auth', params=kwargs)
parse(response)
return True # 没有异常说明ret=0(ret: 0-成功,非0-失败)
| [
"14190635@qq.com"
] | 14190635@qq.com |
f6067061b3800b3c8ee2e7505ccd7992abdfcb4e | 4f3233abdd679d4e5aeaf9c542c9d8efc2e0d1c1 | /config/urls.py | 4d5c1629b781ac936c13998150122746cdf7526e | [] | no_license | TejsinghDhaosriya/tej-stock-search | 9fc195d09bea3c42aebff65dc376fdb87b68b121 | 83e2bcb2844a9d95968fdc8b1feacb648934fde9 | refs/heads/main | 2023-03-29T23:19:31.737704 | 2021-04-03T10:40:59 | 2021-04-03T10:40:59 | 354,230,388 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 804 | py | """config URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path,include
urlpatterns = [
path('admin/', admin.site.urls),
path("api/", include("config.api_router")),
]
| [
"tejsinghdhaosriya@gmail.com"
] | tejsinghdhaosriya@gmail.com |
07fccbff6dc36b6923a3339363c214e6ebc79309 | ecce8a10aabb24019296cebaa46503f91876796f | /football_app/football_app/referee/urls.py | da7d9c7f2911a0055d6de80c5285c7b36bb0e0b1 | [] | no_license | Yeldarmt/DJangoFootballApp | 28450d60fbd0ec98bdf6d223545e17062442f970 | d9568cd48089c0be55217d8aecadf65053b72420 | refs/heads/master | 2022-11-26T16:19:34.252927 | 2020-04-26T18:09:52 | 2020-04-26T18:09:52 | 237,893,654 | 0 | 0 | null | 2022-11-22T05:28:37 | 2020-02-03T05:42:24 | Python | UTF-8 | Python | false | false | 216 | py | from django.urls import path
from football_app.referee.views import RefereesListView, RefereeDetailView
urlpatterns = [
path('', RefereesListView.as_view()),
path('<int:pk>/', RefereeDetailView.as_view())
]
| [
"eldarmukhametkazin@gmail.com"
] | eldarmukhametkazin@gmail.com |
06afcc7f626aad4e5894419a5470762c4e5bcfab | a3944bb458fa972f51ded5d55460e6a697a76482 | /Printing_Models2.py | 488df0c0c46dbb0fb4bf32115cad8a23b6ad720c | [] | no_license | olopez15401/Python_Exercises | 866e95be0b07953ca0f823e15eeadcd21e68dfbc | c578e59a3586568381661ca8f5500e1c91b3634c | refs/heads/master | 2020-04-09T03:53:01.766865 | 2019-01-26T19:05:11 | 2019-01-26T19:05:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 235 | py | import Printing_Models_2 as printer
unprinted_designs = ['iphone case', 'robot pendant', 'dodecahedron']
completed_models = []
printer.print_models(unprinted_designs,completed_models)
printer.show_completed_models(completed_models)
| [
"olopez15401@gmail.com"
] | olopez15401@gmail.com |
e0a494ce4ad4d72e812f2860cf7f862e5d9881f4 | bfce201a7971b05b2fbe0af4819081d71c3850db | /fermi/Pass7Validation/compare_flight_mc_psf/v1/plot.py | 8b5dbc36066c171de57395421b6155e1fcbbbf1a | [] | no_license | joshualande/PhD-Work | 2fe52f82f726ad6166937a3daed342c8cd9aee2f | 1d834a19b5a9347ccad75bd5a76126d5fd840c64 | refs/heads/master | 2020-04-18T15:14:19.127171 | 2014-01-26T22:58:10 | 2014-01-26T22:58:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,911 | py | from matplotlib import rc
rc('ps',usedistiller='xpdf')
rc('text', usetex=True)
rc('font', family='serif', serif="Computer Modern Roman")
from mpl_toolkits.axes_grid.anchored_artists import AnchoredText
from matplotlib.patheffects import withStroke
import matplotlib
import h5py
import pylab as P
import numpy as np
from scipy.stats import chi2
file=h5py.File('/nfs/slac/g/ki/ki03/lande/fermi/data/monte_carlo/compare_flight_mc_psf/v1/merged.hdf5')
flux=np.asarray(file['flux_mc'])
ts_ext_P7SOURCE_V4=np.asarray(file['ts_ext_P7SOURCE_V4'])
ts_ext_P7SOURCE_V6=np.asarray(file['ts_ext_P7SOURCE_V6'])
ts=np.asarray(file['ts_P7SOURCE_V6'])
ts_point = ts - ts_ext_P7SOURCE_V6
index=np.asarray(file['index_mc'])
fig=P.figure(figsize=(5,3))
fig.subplots_adjust(right=0.95, top=0.95, bottom=0.15)
from mpl_toolkits.axes_grid.axes_grid import Grid
grid = Grid(fig, 111, nrows_ncols = (1, 2), axes_pad=0.0)
min_cdf = 1e-4
format_float = lambda f: r'$%s$' % str(f).replace('e-0',r'\times 10^')
print format_float(1e-4)
for i,(name,irf,all_ts_ext) in enumerate([
['(a)','P7SOURCE_V6',ts_ext_P7SOURCE_V6],
['(b)','P7SOURCE_V4',ts_ext_P7SOURCE_V4]
]):
max_ts=max(all_ts_ext) + 1
axes = grid[i]
grid[i].add_artist(
AnchoredText(name, frameon=False, loc=2, prop=dict(size=14,
path_effects=[withStroke(linewidth=5,foreground='w')])))
index_mc=2
for flux_mc, color in zip(
reversed([ 1e-8, 3e-8, 1e-7, 3e-7, 1e-6, 3e-6 ]),
['red', 'blue', 'green', 'black', 'orange', 'gray']
):
kwargs=dict(color=color)
select = (flux==flux_mc) & (index==index_mc) & (ts_point>25)
print 'index=%s, flux=%s, num=%s' % (index_mc,flux_mc,np.sum(select))
print np.mean(ts_point[select])
if np.sum(select) < 100:
continue
print irf, flux_mc, select
ts_ext = all_ts_ext[select]
ts_ext[ts_ext<0] = 0
bins=np.linspace(0,max_ts,1e3)
bin_center=bins[:-1] + (bins[1]-bins[0])/2
binned=np.histogram(ts_ext,bins=bins)[0]
if any(ts_ext>max_ts):
print '> max: ',irf,ts_ext[np.where(ts_ext>max_ts)]
cdf=np.cumsum(binned[::-1])[::-1]
cdf=cdf.astype(float)/cdf[0] # normalize
cdf[cdf == 0] = min_cdf
axes.semilogy(bin_center,cdf,linewidth=1,label=format_float(flux_mc), **kwargs)
y = chi2.sf(bins,1)/2
axes.semilogy(bins, y, 'red', linewidth=1, label='$\chi^2_1/2$', zorder=0, dashes=(5,3))
axes.set_ylim(min_cdf,1)
axes.set_xlabel(r'$\mathrm{TS}_\mathrm{ext}$')
axes.set_ylabel('Cumulative Density')
from lande_plotting import fix_axesgrid
fix_axesgrid(grid)
prop = matplotlib.font_manager.FontProperties(size=10)
grid[0].legend(loc=1, prop=prop, columnspacing=1)
grid[1].set_xlim(0,100)
P.savefig('extension_test.eps')
P.savefig('extension_test.pdf')
| [
"lande@37a9682d-6443-41a2-8582-b44379b6e86f"
] | lande@37a9682d-6443-41a2-8582-b44379b6e86f |
a6bf04eee157f8affa051c83f593901f58e2384f | 13130259156f6f9d16670cea88aa2608dd477d16 | /ranlp/quora/results.py | b70e44e2985744a977f50a0d49649dba64c0a0ff | [] | no_license | fkunneman/DiscoSumo | d459251d543be5f4df38292a96f52baf4b520a0b | ed8f214834cf0c2e04a3bc429253502f7e79fbf8 | refs/heads/master | 2022-12-14T13:34:41.496963 | 2019-07-31T15:57:02 | 2019-07-31T15:57:02 | 140,422,779 | 2 | 1 | null | 2022-12-08T04:57:55 | 2018-07-10T11:36:00 | Python | UTF-8 | Python | false | false | 585 | py | __author__='thiagocastroferreira'
import os
import json
from sklearn.metrics import f1_score, accuracy_score
if __name__ == '__main__':
path = 'results'
for fname in os.listdir(path):
if fname != 'dev':
results = json.load(open(os.path.join(path, fname)))
print(fname)
y_real = [w['y_real'] for w in results]
y_pred = [w['y_pred'] for w in results]
print('Accuracy: ', round(accuracy_score(y_real, y_pred), 2))
print('F1-Score: ', round(f1_score(y_real, y_pred), 2))
print(10 *'-')
| [
"f.kunneman@let.ru.nl"
] | f.kunneman@let.ru.nl |
0b03dea62f2f617d199febafdc8934f8f50c119c | be6c5c959bfee20c8712d0852f04a6afa3949adb | /assets/exercicios/ex-01-respostas/q2/Pedido.py | 30f01504ffee0df9853b1955eb79029409a558c9 | [] | no_license | LucianoBI/POO | 6e01aef731f926929819df52d1da819564d9bd73 | 70001165a8356db5ec3279a296fc8f77ec658bc4 | refs/heads/master | 2023-03-17T13:20:43.948105 | 2018-10-10T14:53:40 | 2018-10-10T14:53:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 367 | py | class Pedido:
def __init__(self, valor_total=0.0):
self.valor_total = valor_total
self.itens_pedido = []
def adicionar_item(self, item):
self.itens_pedido.append(item)
def obter_total(self):
total = 0.0
for item in self.itens_pedido:
total += (item.produto.valor*item.quantidade)
return total | [
"diegopessoa12@gmail.com"
] | diegopessoa12@gmail.com |
62ef51945e8fd4e850c7446372a0058b0ce54a21 | 33f2f4ed5242f256e2a31145125dad91699c1ead | /Leetcode/Contests/weekly_200_find_winner.py | 4d5aff5422154a9770e74affeafcf60927731bf5 | [] | no_license | Zahidsqldba07/competitive-programming-1 | b04b2962ce7bc4454008a3cbb7bee88c0e02251d | e35b3a1c95c559b7754d4fabf8d2c4f09b0c42b2 | refs/heads/master | 2023-07-11T01:39:38.188489 | 2021-08-29T19:29:47 | 2021-08-29T19:29:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,714 | py | # 5476. Find the Winner of an Array Game
'''
Given an integer array arr of distinct integers and an integer k.
A game will be played between the first two elements of the array (i.e. arr[0] and arr[1]). In each round of the game, we compare arr[0] with arr[1], the larger integer wins and remains at position 0 and the smaller integer moves to the end of the array. The game ends when an integer wins k consecutive rounds.
Return the integer which will win the game.
It is guaranteed that there will be a winner of the game.
'''
# TO COMPLETE/CORRECT
class Solution:
def getWinner(self, arr: List[int], k: int) -> int:
past_winner = -1
won_rounds = 0
while won_rounds <= k:
if arr[0] > arr[1]:
# print("winner arr[0]", arr[0], "won_rounds", won_rounds)
# arr[-1], arr[1] = arr[1], arr[-1]
temp = arr.pop(1)
arr.append(temp)
if arr[0] == past_winner:
won_rounds += 1
# if arr[0] > arr[1]:
# will win all the rest
# return arr[0]
else:
won_rounds = 1
past_winner = arr[0]
else: # new winner
# print("winner arr[1]", arr[1], "won_rounds", won_rounds)
# arr[-1], arr[1] = arr[1], arr[-1]
# arr[0], arr[-1] = arr[-1], arr[0]
temp = arr.pop(0)
arr.append(temp)
if arr[0] == past_winner:
won_rounds += 1
else:
won_rounds = 1
past_winner = arr[0]
return past_winner
| [
"shinghos@mit.edu"
] | shinghos@mit.edu |
c3fc9797f0ec63a0c75e4de9a29fc39a10c528e2 | f27beb3492a78e66ecf68bc39130146d0aef90e0 | /LeetCode/292_Nim游戏.py | 5466eab2117f57502a7990917f46d3140754f030 | [] | no_license | ziyeZzz/python | 2ca23558c698b3c40dbda0729c4e60d3a2fdfa5f | 0d5c3ba1d0f57200c921abf227629b7621e207ce | refs/heads/master | 2020-03-28T15:15:08.227527 | 2019-09-11T08:14:31 | 2019-09-11T08:14:31 | 120,991,802 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 600 | py | '''
你和你的朋友,两个人一起玩 Nim游戏:桌子上有一堆石头,每次你们轮流拿掉 1 - 3 块石头。 拿掉最后一块石头的人就是获胜者。你作为先手。
你们是聪明人,每一步都是最优解。 编写一个函数,来判断你是否可以在给定石头数量的情况下赢得游戏。
示例:
输入: 4
输出: false
解释: 如果堆中有 4 块石头,那么你永远不会赢得比赛;
因为无论你拿走 1 块、2 块 还是 3 块石头,最后一块石头总是会被你的朋友拿走。
'''
def canWinNim(n):
return n%4!=0 | [
"ziye123zoe@gmail.com"
] | ziye123zoe@gmail.com |
45bfaa924bfb448dc1c2b54c73be5810785af95f | 5b8cf355396b0ecfb3a891a0d2fd884081734f0a | /parseByCol.py | 9a3f90a36b009b7451105250ae64ecf276f04a22 | [] | no_license | rahulprabhakaran/GraphSpectrometer | 4ac327588c1b4689c305dfec97a07c1ac43cb1ef | ee55a60b4b273bf16ad88a239e25362952251dbe | refs/heads/master | 2021-05-29T07:11:20.172251 | 2014-02-04T02:47:58 | 2014-02-04T02:47:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,411 | py | import fiedler
import os.path
import sys
import json
def filename_parse(fn, filter_min=.001,col=2,filter_col=2):
"""Wraps file_parse and infers paramaters based on extensions.
Takes:
filename.
".out" files will be treated as rf-ace output and filtered by imortance
all other files will be treated as sif files.
returns:
The same tuple as filename_parse
"""
fo = open(fn)
out = ()
out = fiedler.file_parse(fo, node2=1, filter_col=filter_col, filter_min=filter_min, val_col=col)
fo.close()
return out
def main():
fn = sys.argv[1]
filter_min = ""
filter_min = float(sys.argv[2])
col = int(sys.argv[3])
filter_col = col
if len(sys.argv)>4:
filter_col=int(sys.argv[4])
print "Parseing %s min %s val_col %s filter_col %s"%(os.path.abspath(fn),filter_min,col,filter_col)
(adj_list, iByn, nByi) = filename_parse(fn, filter_min, col, filter_col)
fn = os.path.basename(fn)
fied = fiedler.fiedler(adj_list, fn=fn + str(filter_min), plot=False, n_fied=2)
fied["adj"] = adj_list
fied["iByn"] = iByn
fied["nByi"] = nByi
outfn=os.path.basename(fn) +".cutoff."+ str(filter_min) + ".json"
fo = open(outfn, "w")
print "Outputing fiedler results for %s to %s"%(os.path.abspath(fn),os.path.abspath(outfn))
json.dump(fied, fo)
fo.close()
if __name__ == '__main__':
main() | [
"ryanbressler@gmail.com"
] | ryanbressler@gmail.com |
4d853b31930bbf00def3c148a4c933d6555a3fed | d9637f39666ea0bcd9a0b7120492132bc71b0d07 | /Part 3/Config_1.py | a799a24e546d39966b07c9e13e37b596f22730bc | [] | no_license | karanajith2096/SML-Project | c06266a0caaa7465a9752b28ce56af5b6f7cc701 | 95f042ebcde1420ad35343e6da1c9f8d17ae7518 | refs/heads/master | 2020-08-02T04:20:57.577047 | 2019-12-09T04:41:45 | 2019-12-09T04:41:45 | 211,232,312 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,846 | py | # -*- coding: utf-8 -*-
"""Untitled1.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1HIkogrW1RAGPJ8Xt96PftX7BYBulvMOJ
"""
import keras
from keras.datasets import mnist
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten
from keras.layers import Conv2D, MaxPooling2D
from keras import backend as K
import matplotlib.pyplot as plt
batch_size = 128
num_classes = 10
epochs = 12
# input image dimensions
img_rows, img_cols = 28, 28
# the data, split between train and test sets
(x_train, y_train), (x_test, y_test) = mnist.load_data()
#data pre-processing
if K.image_data_format() == 'channels_first':
x_train = x_train.reshape(x_train.shape[0], 1, img_rows, img_cols)
x_test = x_test.reshape(x_test.shape[0], 1, img_rows, img_cols)
input_shape = (1, img_rows, img_cols)
else:
x_train = x_train.reshape(x_train.shape[0], img_rows, img_cols, 1)
x_test = x_test.reshape(x_test.shape[0], img_rows, img_cols, 1)
input_shape = (img_rows, img_cols, 1)
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train /= 255
x_test /= 255
print('x_train shape:', x_train.shape)
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')
# convert class vectors to binary class matrices
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
#creating CNN model
model = Sequential()
model.add(Conv2D(6, kernel_size=(3, 3),
activation='relu',
input_shape=input_shape))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(16, (3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Flatten())
model.add(Dense(120, activation='relu'))
model.add(Dense(84, activation='relu'))
model.add(Dense(num_classes, activation='softmax'))
# https://keras.io/optimizers/
model.compile(loss=keras.losses.categorical_crossentropy,
optimizer=keras.optimizers.Adadelta(lr=0.1, rho=0.95, epsilon=None, decay=0.0),
metrics=['accuracy'])
history = model.fit(x_train, y_train,
batch_size=batch_size,
epochs=epochs,
verbose=1,
validation_data=(x_test, y_test))
score = model.evaluate(x_test, y_test, verbose=0)
print('Test loss:', score[0])
print('Test accuracy:', score[1])
#print(score)
#storing values for training and validation loss
train_loss = history.history['loss']
val_loss = history.history['val_loss']
#print(history.history.keys())
#plotting graphs
plt.plot(range(1, 13), train_loss)
plt.xlabel("Epochs")
plt.ylabel("Training loss")
plt.title("Training Loss")
plt.show()
plt.plot(range(1, 13), val_loss)
plt.xlabel("Epochs")
plt.ylabel("Validation loss")
plt.title("Validation Loss")
plt.show() | [
"karanajith@gmail.com"
] | karanajith@gmail.com |
d6133e030913d6f52d6462bdb35d3f36d7348abf | dd861ad8a33e1ec43a969746ec58efbbd877ca58 | /telusko/urls.py | 607c4907b6abde60fe968f17fa05fe98bf638a5c | [] | no_license | sajibuzzaman/telusko-DjangoProject | 54c74b1136f4d69dda092fe4ab03958214bc4e60 | c4a8cadfa18544bbfe4c359c730cbc4e2ef318e8 | refs/heads/master | 2023-03-05T22:10:58.583721 | 2021-02-14T20:39:09 | 2021-02-14T20:39:09 | 338,895,207 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,015 | py | """telusko URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
from django.conf import settings
from django.conf.urls.static import static
urlpatterns = [
path('', include('travello.urls')),
path('admin/', admin.site.urls),
path('accounts/', include('accounts.urls'))
]
urlpatterns = urlpatterns + static(settings.MEDIA_URL, document_root = settings.MEDIA_ROOT)
| [
"muhammadsajibuzzaman1998@gmail.com"
] | muhammadsajibuzzaman1998@gmail.com |
426484e082f5347af323f2f8c92f2c61f1ad7259 | 83be30d71563db5ca25d1173cb8e46dadcbf8ebb | /src/GFM_MLC_labelwise.py | 7f9ac97c7ff8f66abee2e8d772a012a9296dbe5b | [] | no_license | tfmortie/f-measure | 270f4aec2e121407628a4cde0d6f479e716324ba | ceccdcb9888c5fa544a5f78ca12ec83557c84ed4 | refs/heads/master | 2020-05-02T21:59:25.560069 | 2018-07-05T13:30:23 | 2018-07-05T13:30:23 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,366 | py | """
GFM with multiclass classification for multilabel classification tasks
Both F1 and F2 scores are calculated
"""
import os
import ast
import sys
import argparse
import numpy as np
import pandas as pd
from tqdm import tqdm
# TF - Keras imports
import tensorflow as tf
import keras.backend as K
from keras.optimizers import Adam, SGD
from keras.callbacks import EarlyStopping, ModelCheckpoint
# Custom modules
import utils.generators as gn
from utils import csv_helpers
from classifiers.nn import GFM_labelwise_classifier
from classifiers.F_score import compute_F_score
from classifiers.gfm import GeneralFMaximizer, complete_matrix_columns_with_zeros
# Let TF see only one GPU
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
def GFM_MLC(args):
# Parameters
batch_size = 32
dataset = args.dataset
epochs = 1000 # early stopping on validation data
verbosity = 0
sklearn = False
c = args.c
print('Amount of regularization= {:.3f}'.format(c))
features_train = np.load('../data/{}/features/features_train_max.npy'.format(dataset))
features_validation = np.load('../data/{}/features/features_validation_max.npy'.format(dataset))
features_test = np.load('../data/{}/features/features_test_max.npy'.format(dataset))
n_features = features_train.shape[1]
# rescale
from sklearn.preprocessing import StandardScaler
featurescaler = StandardScaler().fit(features_train)
features_train = featurescaler.transform(features_train)
features_validation = featurescaler.transform(features_validation)
features_test = featurescaler.transform(features_test)
csv_path_train = '../data/{}/TRAIN.csv'.format(dataset)
csv_path_validation = '../data/{}/VALIDATION.csv'.format(dataset)
csv_path_test = '../data/{}/TEST.csv'.format(dataset)
df_train = pd.read_csv(csv_path_train)
df_validation = pd.read_csv(csv_path_validation)
df_test = pd.read_csv(csv_path_test)
train_steps = np.ceil(len(df_train) / batch_size)
validation_steps = np.ceil(len(df_validation) / batch_size)
test_steps = np.ceil(len(df_test) / batch_size)
# Extract ground truth labels
y_true_train = np.array([ast.literal_eval(df_train['marginal_labels'][i])
for i in range(len(df_train))])
y_true_validation = np.array([ast.literal_eval(df_validation['marginal_labels'][i])
for i in range(len(df_validation))])
y_true_test = np.array([ast.literal_eval(df_test['marginal_labels'][i])
for i in range(len(df_test))])
n_labels = y_true_train.shape[1]
y_gfm_train = np.array([ast.literal_eval(df_train['gfm_labels'][i])
for i in range(len(df_train))])
y_gfm_validation = np.array([ast.literal_eval(df_validation['gfm_labels'][i])
for i in range(len(df_validation))])
# Compute max_s: the maximum number of positive label for a single instance
max_s = np.max(np.array([np.max(np.sum(y_true_train, axis=1)),
np.max(np.sum(y_true_validation, axis=1)),
np.max(np.sum(y_true_test, axis=1))]))
# Containers
GFM_train_entries = []
GFM_validation_entries = []
GFM_test_entries = []
for label in range(n_labels):
# print('Label {} of {}...'.format(label, n_labels))
# extract one multinomial regression problem
if sklearn:
y_label_train = np.argmax(y_gfm_train[:, label, :], axis=1)
y_label_validation = np.argmax(y_gfm_validation[:, label, :], axis=1)
else:
y_label_train = y_gfm_train[:, label, :]
y_label_validation = y_gfm_validation[:, label, :]
# print(y_label_train.shape)
if sklearn:
from sklearn.linear_model import LogisticRegression
model = LogisticRegression(multi_class='ovr', solver='lbfgs', C=c)
else:
model = GFM_labelwise_classifier(n_features, max_s + 1, c).model
optimizer = Adam()
model.compile(loss='categorical_crossentropy', optimizer=optimizer)
callbacks = [
EarlyStopping(monitor='val_loss', min_delta=0,
patience=3, verbose=verbosity, mode='auto'),
ModelCheckpoint('../models/GFMMLC_labelwise_{}.h5'.format(dataset),
monitor='val_loss', save_best_only=True, verbose=verbosity)
]
model.fit(x=features_train, y=y_label_train,
batch_size=batch_size, epochs=epochs,
verbose=verbosity, callbacks=callbacks, validation_data=(features_validation, y_label_validation))
# Load best model
model.load_weights('../models/GFMMLC_labelwise_{}.h5'.format(dataset))
model.compile(loss='categorical_crossentropy', optimizer=optimizer)
pis_train = model.predict(features_train)
pis_validation = model.predict(features_validation)
pis_test = model.predict(features_test)
if sklearn:
from sklearn.preprocessing import OneHotEncoder
enc = OneHotEncoder()
enc.fit(np.argmax(np.argmax(y_gfm_train[:, :, :], axis=1), axis=1).reshape(-1, 1))
pis_train = enc.transform(pis_train.reshape(-1, 1)).toarray()
pis_validation = enc.transform(pis_validation.reshape(-1, 1)).toarray()
pis_test = enc.transform(pis_test.reshape(-1, 1)).toarray()
GFM_train_entries.append(pis_train)
GFM_validation_entries.append(pis_validation)
GFM_test_entries.append(pis_test)
# Combine all the predictonis
pis_train = np.stack(GFM_train_entries).transpose(1, 0, 2)
pis_validation = np.stack(GFM_validation_entries).transpose(1, 0, 2)
pis_test = np.stack(GFM_test_entries).transpose(1, 0, 2)
pis_train_final = [complete_matrix_columns_with_zeros(
mat[:, 1:], len=n_labels) for mat in pis_train]
pis_validation_final = [complete_matrix_columns_with_zeros(
mat[:, 1:], len=n_labels) for mat in pis_validation]
pis_test_final = [complete_matrix_columns_with_zeros(
mat[:, 1:], len=n_labels) for mat in pis_test]
# Compute optimal predictions for F1
for beta in [1, 2]:
GFM = GeneralFMaximizer(beta, n_labels)
# Run GFM algo on this output
(optimal_predictions_train, E_F_train) = GFM.get_predictions(predictions=pis_train_final)
(optimal_predictions_validation, E_F_validation) = GFM.get_predictions(
predictions=pis_validation_final)
(optimal_predictions_test, E_F_test) = GFM.get_predictions(predictions=pis_test_final)
# Evaluate F score
F_train = compute_F_score(y_true_train, optimal_predictions_train, t=0.5, beta=beta)
F_validation = compute_F_score(
y_true_validation, optimal_predictions_validation, t=0.5, beta=beta)
F_test = compute_F_score(y_true_test, optimal_predictions_test, t=0.5, beta=beta)
print('GFM_MLC ({})'.format(dataset))
print('-' * 50)
# print('F{} score on training data: {:.4f}'.format(beta, F_train))
# print('F{} score on validation data: {:.4f}'.format(beta, F_validation))
print('F{} score on test data: {:.4f}'.format(beta, F_test))
# Store test set predictions to submit to Kaggle
if (dataset == 'KAGGLE_PLANET') and (beta == 2):
# Map predictions to filenames
def filepath_to_filename(s): return os.path.basename(
os.path.normpath(s)).split('.')[0]
test_filenames = [filepath_to_filename(f) for f in df_test['full_path']]
GFM_predictions_mapping = dict(
zip(test_filenames, [csv_helpers.decode_label_vector(f) for f in optimal_predictions_test]))
# Create submission file
csv_helpers.create_submission_file(
GFM_predictions_mapping, name='Planet_GFM_MC_labelwise')
def main():
parser = argparse.ArgumentParser(
description='GFM_MLC')
parser.add_argument('dataset', type=str, help='dataset')
parser.add_argument('c', type=float, help='amount of regularization')
args = parser.parse_args()
GFM_MLC(args)
if __name__ == "__main__":
sys.exit(main())
# main()
| [
"stijn.decubber@ugent.be"
] | stijn.decubber@ugent.be |
5ad042bd73c818fb6e254df6b4cf72c179ab9b10 | 87b904ebf11d416567a7e49b91b8e9934f67c6f3 | /insert_row_simple.py | e642a918cd16c4942e211712d5181e4c9c09765b | [
"MIT"
] | permissive | NathanKr/pandas-playground | a701f524aa48f22f6680e48c597206e10f8222e5 | a5355c59cb61ca3a7dcce590ed42d56a6b943783 | refs/heads/main | 2023-06-05T11:07:52.061327 | 2021-07-02T02:35:15 | 2021-07-02T02:35:15 | 328,917,719 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 229 | py | import pandas as pd
df = pd.DataFrame([])
# most simple insert row
df = df.append(pd.Series([1,2,3]) , ignore_index=True) # insert at the end
df = df.append(pd.Series([4,5,6]) , ignore_index=True) # insert at the end
print(df) | [
"natankrasney@gmail.com"
] | natankrasney@gmail.com |
7972415cb517ef3bfc66323af6cb5649b2c53fb7 | c705252e5368efab6324f2c1716d50002ad22e80 | /1-2 first_exam_2.py | 7898110c7f46dc85665e8b2452fb488bb3fa3e84 | [] | no_license | younkyounghwan/python-exam | dd93124b91bc1d4f2690e9e3e9cb58ff8ef5623d | c0ed718d5dfcde65320da7c30f9a014b00e35151 | refs/heads/master | 2020-04-05T03:48:50.878125 | 2018-11-07T10:20:38 | 2018-11-07T10:20:38 | 156,528,226 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 176 | py | """
day 2
"""
print("day 2")
"""
"""
#
f = 3.4
print(f)
i = 1
print(i)
b = True
print(b)
s ="1"
print(i+f)
#print(s+i)
print(int(s)+i)
print(s+int(s))
i = 57
j = 28
| [
"youn7117@naver.com"
] | youn7117@naver.com |
31d025fb82a70ab3eec3800b715a265d2df76bf7 | 5301656114df4d80c3353536d85e1d15829b9839 | /Frontales/.metadata/.plugins/org.eclipse.core.resources/.history/4b/3004f7adcb4500111f76ccb337b0ec7c | 6cadea6b43bd7bbc821b8e36d65d68fbe7346c30 | [] | no_license | Ducatel/TeXloud | 5f383c3fa2c27b01555574fd513d6f551e302b81 | 38f8be8857e1c6de2e103bbbe39707b49e1375aa | refs/heads/master | 2021-01-01T19:34:51.563829 | 2012-03-01T12:05:13 | 2012-03-01T12:05:13 | 2,885,054 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,575 | #!/usr/bin/python
# -*-coding:utf-8 -*
'''
Created on 10 dec. 2011
@author: Davis Ducatel
'''
import socket
from re import match
import threading
import json
import Ordonnanceur
class Frontal(object):
'''
Classe qui gere le serveur de la frontale de compilation grace à
Son adresse IP
Son port d'ecoute
Son socket
Son ordonnanceur de serveur de compilation
Son ordonnanceur de serveur de données
'''
def __init__(self,adresse,port):
'''
Constructeur du serveur de la frontale
@param adresse: Adresse IP du serveur
@param port: Numéro du port d'ecoute
@raise ValueError: Declencher si le port ou l'adresse est incorrect
'''
regexAdresse="^[0-9]{1,3}.[0-9]{1,3}.[0-9]{1,3}.[0-9]{1,3}$"
regexPort="^[0-9]{1,5}$"
if match(regexAdresse,adresse) and match(regexPort,str(port)):
if isinstance(port, int):
self._adresse = adresse
self._port = port
else:
raise ValueError
else:
raise ValueError
self._ordonnanceurData=Ordonnanceur.Ordonnanceur("./../fichierServeur.xml","data")
self._ordonnanceurCompilation=Ordonnanceur.Ordonnanceur("./../fichierServeur.xml","compilation")
def lanceServeur(self):
"""
Methode qui lance la socket en ecoute
Puis qui accepte les connexions et appelle pour chacune d'elle la methode voulu dans un nouveau thread
"""
self._sock=socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self._sock.bind((self._adresse, self._port))
self._sock.listen(5)
while 1:
client, addr=self._sock.accept()
threading.Thread(target=self.getTrameOfHTTPServer,args=(client,addr)).start()
def getRequestOfHTTPServer(self,client,addr):
"""
Methode qui va recupere la demande du serveur web
et la traiter
"""
taille=1
messageComplet=""
while taille>0:
message=client.recv(1024)
message=message.decode()
messageComplet+=message
taille=len(message)
client.close()
self.examineRequete(messageComplet)
def routeRequest(self,requeteJSON):
'''
Méthode qui va router la requete vers le bon traitement afin de reformater la requete
puis qui va renvoyé au bon serveur de données
@param requeteJSON: la requete a examiner (au format json)
'''
requete=json.loads(requeteJSON)
if requete['label']=="create":
adresseIP,port,req=self.requestCreateNewUserDataSpace(requete)
elif requete['label']=="getProject":
adresseIP,port,req=self.requestGetProject(requete)
elif requete['label']=="compile":
adresseIP,port,req=self.requestCompile(requete)
elif requete['label']=="getFile":
adresseIP,port,req=self.requestGetFile(requete)
elif requete['label']=="deleteFile":
adresseIP,port,req=self.requestDeleteFile(requete)
elif requete['label']=="deleteProject":
adresseIP,port,req=self.requestDeleteProject(requete)
elif requete['label']=="sync":
adresseIP,port,req=self.requestSync(requete)
s=socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((adresseIP,port))
s.send(json.dumps(req))
s.send(self._messageEnd)
s.close()
def sendRequest
def requestCreateNewUserDataSpace(self,requete):
"""
Méthode qui va demande au serveur de donnée de créer un nouvel
espace de stockage pour l'utilisateur
@param requete: requete a reformater et a router (dico python)
@return: l'adresse IP du serveur de données
@return: le port de connexion sur le serveur de données
@return: la requete (dico)
"""
# Obtention d'un serveur de données
serveur=self._ordonnanceurData.getServeur()
return serveur.adresseIP,serveur.port,requete
def requestGetProject(self,requete):
"""
Méthode qui va demande au serveur de donnée de renvoyer un projet
@param requete: requete a reformater et a router (dico python)
"""
adresseIP=requete.pop('servDataIp')
port=requete.pop('servDataPort')
return adresseIP,port,requete
def requestCompile(self,requete):
"""
Méthode qui va demande au serveur de donnée de lancer une compilation
via un serveur de compilation
@param requete: requete a reformater et a router (dico python)
"""
adresseIP=requete.pop('servDataIp')
port=requete.pop('servDataPort')
serveur=self._ordonnanceurCompilation.getServeur()
requete['servCompileIP']=serveur.adresseIP
requete['servCompilePort']=serveur.port
return adresseIP,port,requete
def requestGetFile(self,requete):
"""
Méthode qui va demande au serveur de donnée de renvoyer un fichier
@param requete: requete a reformater et a router (dico python)
"""
adresseIP=requete.pop('servDataIp')
port=requete.pop('servDataPort')
return adresseIP,port,requete
def requestDeleteFile(self,requete):
"""
Méthode qui va demande au serveur de donnée de supprimer un fichier
@param requete: requete a reformater et a router (dico python)
"""
adresseIP=requete.pop('servDataIp')
port=requete.pop('servDataPort')
return adresseIP,port,requete
def requestDeleteProject(self,requete):
"""
Méthode qui va demande au serveur de donnée de supprimer un projet
@param requete: requete a reformater et a router (dico python)
"""
adresseIP=requete.pop('servDataIp')
port=requete.pop('servDataPort')
return adresseIP,port,requete
def requestSync(self,requete):
"""
Méthode qui va demande au serveur de donnée de faire une synchro
@param requete: requete a reformater et a router (dico python)
"""
adresseIP=requete.pop('servDataIp')
port=requete.pop('servDataPort')
return adresseIP,port,requete
| [
"hannibal@hannibal-laptop.(none)"
] | hannibal@hannibal-laptop.(none) | |
c116c570c2a56abc674c9fb63d86381c90576e47 | 4d6975caece0acdc793a41e8bc6d700d8c2fec9a | /leetcode/1484.linked-list-in-binary-tree/1484.linked-list-in-binary-tree.py | 9077aff3937204041003e1371d38f9ce9273451b | [] | no_license | guiconti/workout | 36a3923f2381d6e7023e127100409b3a2e7e4ccb | 5162d14cd64b720351eb30161283e8727cfcf376 | refs/heads/master | 2021-08-03T10:32:02.108714 | 2021-07-26T04:38:14 | 2021-07-26T04:38:14 | 221,025,113 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 432 | py | # Definition for singly-linked list.
# class ListNode:
# def __init__(self, val=0, next=None):
# self.val = val
# self.next = next
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class Solution:
def isSubPath(self, head: ListNode, root: TreeNode) -> bool:
| [
"guibasconti@gmail.com"
] | guibasconti@gmail.com |
2e81ea805b01eef0c74165836d792b342caab99c | 32d5a407f68d28929f06d63e81896c087ac8f730 | /tests/firsttest_linear.py | f056fa0fdcd649e0ef47c1b6fa21687f10ed0898 | [] | no_license | Brossollet-Antonin/Ultrametric-benchmark | 5e9d8ed8d79cbfff3f2ed831e66488c582654cc9 | 74d3a00e4a7412ea4e058b7fd0c1f0b873a8bf8f | refs/heads/master | 2022-08-29T21:25:50.430748 | 2021-02-02T11:11:43 | 2021-02-02T11:11:43 | 232,113,332 | 1 | 0 | null | 2022-08-10T19:11:55 | 2020-01-06T14:04:49 | Jupyter Notebook | UTF-8 | Python | false | false | 3,768 | py | # -*- coding: utf-8 -*-
"""
Created on Mon Jun 3 15:35:15 2019
@author: Antonin
"""
import artificial_dataset
import algo
import torch
import neuralnet
import testing
import control
import random
from copy import deepcopy
import sequence_generator_temporal_noself as sequence_generator_temporal
device = torch.device("cpu")
print(device)
#dataset = artificial_dataset.artificial_dataset(depth=depth, branching=data_branching, data_sz=200, class_sz_train=20000,
# class_sz_test=1000, ratio_type='exponnential', ratio_value=2)
#dataset.create()
trainer = algo.training('temporal correlation', 'reservoir sampling', dataset=dataset,
task_sz_nbr=10,
tree_depth=depth, preprocessing=False, device=device, sequence_length=sequence_length, energy_step=step, T=T,
tree_branching=tree_branching, proba_transition=proba_transition)
netfc_original = neuralnet.Net_FCRU()
netfc_original.to(device)
original_accuracy = [testing.testing_final(netfc_original, dataset, device)[0]]
train_data_rates=trainer.train_data() #Stock rates (if not a random process) and data for training
train_data, rates, train_sequence = train_data_rates[0], train_data_rates[1], train_data_rates[2]
for i in range(test_nbr):
training_range = (i*test_stride, (i+1)*test_stride)
algo.learning_ER(netfc_original, trainer, train_data_rates, mem_sz=memory_sz,
batch_sz=10, lr=0.01, momentum=0.5, training_range=training_range)
original_accuracy_current, test_data = testing.testing_final(netfc_original, dataset, device)
original_accuracy.append(original_accuracy_current)
print("--- Start shuffle training ---")
netfc_shuffle = neuralnet.Net_FCRU()
netfc_shuffle.to(device)
shuffle_accuracy = [testing.testing_final(netfc_shuffle, dataset, device)[0]]
control_data_shuffle = deepcopy(train_data)
random.shuffle(control_data_shuffle)
for i in range(test_nbr):
training_range = (i*test_stride, (i+1)*test_stride)
control.shuffle_sequence(netfc_shuffle, trainer, control_data_shuffle, mem_sz=memory_sz,
batch_sz=10, lr=0.01, momentum=0.5, training_range=training_range)
shuffle_accuracy_current, _ = testing.testing_final(netfc_shuffle, dataset, device)
shuffle_accuracy.append(shuffle_accuracy_current)
print("--- Start labels training ---")
netfc_labels = neuralnet.Net_FCRU()
netfc_labels.to(device)
labels_accuracy = [testing.testing_final(netfc_labels, dataset, device)[0]]
permutation = [i for i in range(max(train_sequence)+1)]
random.shuffle(permutation)
control_sequence = [permutation[i] for i in train_sequence]
control_data_labels = sequence_generator_temporal.training_sequence(control_sequence, trainer.dataset)
for i in range(test_nbr):
training_range = (i*test_stride, (i+1)*test_stride)
control.shuffle_labels(netfc_labels, trainer, control_data_labels, mem_sz=memory_sz,
batch_sz=10, lr=0.01, momentum=0.5, training_range=training_range)
labels_accuracy_current, _ = testing.testing_final(netfc_labels, dataset, device)
labels_accuracy.append(labels_accuracy_current)
#
#netfc_labels2 = neuralnet.Net_FCL()
#netfc_labels2.to(device)
#control_sequence_labels2, labels_accuracy2 = control.shuffle_labels(netfc_labels2, trainer, train_sequence, mem_sz=memory_sz,
# batch_sz=10, lr=0.01, momentum=0.5)
compteur = [0 for k in range(len(dataset.train_data))]
for k in train_sequence:
compteur[k] += 1
exec(open("./sequence_plot.py").read()) | [
"noreply@github.com"
] | noreply@github.com |
28cd6eb7e066d5beef36ebf0143c8b5c570d2b49 | ff0b413d9c72b7eb30ce3092a4a3693134844f32 | /display-departure-times.py | 625a8edfe036213e5ea1c31b247af09f66261395 | [] | no_license | kastrom/departure-board | 4c6f6a67b0ca2b887745bc8c24211a79ef3257e1 | bd9e3fea5775ade8e9c524679a6543952c8b4d1d | refs/heads/master | 2020-03-06T19:17:15.827497 | 2018-05-03T15:54:04 | 2018-05-03T15:54:04 | 127,024,021 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,430 | py | #!/usr/bin/env python
# Display a runtext with double-buffering.
from samplebase import SampleBase
from rgbmatrix import graphics
import time
class DisplayDepartureTimes(SampleBase):
# class DisplayDepartureTimes(SampleBase, DepartureTimes):
def __init__(self, *args, **kwargs):
super(DisplayDepartureTimes, self).__init__(*args, **kwargs)
self.parser.add_argument("-font", "--font-input", help="The font to pass in", default="tom-thumb.bdf")
def run(self):
# DepartureTimes.getDepartureTimes()
canvas = self.matrix
font = graphics.Font()
font.LoadFont("fonts/" + self.args.font_input)
lineHeight = 8
maxLength = 11
charWidth = 4
baseTimePosition = 47
trainColor = graphics.Color(241, 171, 0)
boatColor = graphics.Color(0, 171, 255)
departures = [["Waterloo", "3"], ["Embankment", "5"], ["Waterloo", "9"], ["Waterloo", "17"]]
for n in range(4):
# test to see if there is a single digit time
if len(departures[n][1]) == 1:
# display the time component in the appropriate position
timePosition = baseTimePosition
# if the departure name length is above the maximum
if len(departures[n][0]) >= maxLength:
# truncate the name
departures[n][0] = departures[n][0][:maxLength-1] + "_"
else:
# if there is more than one digit, move the time 4 digits to the left
timePosition = baseTimePosition - charWidth
# this time test for a shorter truncation length
if len(departures[n][0]) >= maxLength - 1:
departures[n][0] = departures[n][0][:maxLength-2] + "_"
textColor = graphics.Color(100, 100, 100)
y = (lineHeight*(n+1))-2
graphics.DrawText(canvas, font, 1, y, textColor, departures[n][0])
graphics.DrawText(canvas, font, timePosition, y, textColor, departures[n][1])
graphics.DrawText(canvas, font, 52, y, textColor, "min")
time.sleep(100) # show display for 100 seconds before exit
# Main function
if __name__ == "__main__":
#departure_times = DepartureTimes()
display_departure_times = DisplayDepartureTimes()
if (not display_departure_times.process()):
display_departure_times.print_help()
| [
"krisfields@gmail.com"
] | krisfields@gmail.com |
ac35789c652d5665603fa9ac264c13af26d33213 | 10988682861172a52dcb2a5d25e6f4d61f628124 | /users/migrations/0001_initial.py | 4d529e84b10c4d8c84a3c1de3ed261144582b2db | [] | no_license | whereisyourpet/petfinder_be | 99b433939c3ccca6a6b79d44f56d51098437a797 | aa853c8a3dfa9652b965199b2606ee8c3eeb568b | refs/heads/master | 2020-04-25T13:42:49.130137 | 2019-03-13T12:41:14 | 2019-03-13T12:41:14 | 172,817,855 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,800 | py | # Generated by Django 2.1.7 on 2019-03-11 02:08
import django.contrib.auth.models
import django.contrib.auth.validators
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0009_alter_user_last_name_max_length'),
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('username', models.CharField(error_messages={'unique': 'A user with that username already exists.'}, help_text='Required. 150 characters or fewer. Letters, digits and @/./+/-/_ only.', max_length=150, unique=True, validators=[django.contrib.auth.validators.UnicodeUsernameValidator()], verbose_name='username')),
('first_name', models.CharField(blank=True, max_length=30, verbose_name='first name')),
('last_name', models.CharField(blank=True, max_length=150, verbose_name='last name')),
('email', models.EmailField(blank=True, max_length=254, verbose_name='email address')),
('is_staff', models.BooleanField(default=False, help_text='Designates whether the user can log into this admin site.', verbose_name='staff status')),
('is_active', models.BooleanField(default=True, help_text='Designates whether this user should be treated as active. Unselect this instead of deleting accounts.', verbose_name='active')),
('date_joined', models.DateTimeField(default=django.utils.timezone.now, verbose_name='date joined')),
('nickname', models.CharField(max_length=50, verbose_name='昵称')),
('location', models.CharField(blank=True, max_length=200, verbose_name='地址')),
('has_pet', models.BooleanField(default=False, verbose_name='是否有宠物')),
('willing', models.BooleanField(default=False, verbose_name='收养意愿')),
('number', models.IntegerField(default=0, verbose_name='收养个数')),
('married', models.BooleanField(default=False, verbose_name='是否已婚')),
('phone', models.CharField(blank=True, max_length=12, verbose_name='电话')),
('gender', models.BooleanField(choices=[(0, '男'), (1, '女')], default=0, max_length=1, verbose_name='性别')),
('description', models.CharField(default='这个人很懒,什么都没有留下', max_length=300, verbose_name='个性签名')),
('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')),
('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions')),
],
options={
'verbose_name': 'user',
'verbose_name_plural': 'users',
'abstract': False,
},
managers=[
('objects', django.contrib.auth.models.UserManager()),
],
),
]
| [
"yan_sy@126.com"
] | yan_sy@126.com |
3136dc4439820a55acbd7a518b4016742c59474d | 31cc157b0167e4878309b89bab04b72c318e5406 | /566. Reshape the Matrix.py | 449cd2d6df2a75d803abc1f68214f8091d4cade5 | [] | no_license | juneepa/LeetCode-Algorithms | b026b8100fc05642fa4e1c03f873ee1d661b766b | 37cbb5351634c2fb6e384c0d116581c63f41caa8 | refs/heads/master | 2021-09-07T20:51:59.111843 | 2018-02-28T23:32:24 | 2018-02-28T23:32:24 | 113,538,393 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 698 | py | ## https://leetcode.com/problems/reshape-the-matrix/description/
## Check website above for more details
## Easy, 165ms, 42.75%
## Not a great work but very straightforward
class Solution(object):
def matrixReshape(self, nums, r, c):
## check if r and c make sense
if r * c != len(sum(nums, [])):
return nums
flat = sum(nums, [])
new = [[None] * c for _ in range(r)]
a = 0
for i in range(r):
for j in range(c):
new[i][j] = flat[a]
a += 1
return new
| [
"noreply@github.com"
] | noreply@github.com |
5b149dcddea3f85d1c78a37975bfe212ed99dabc | 5372114813c1812e8bdcbe9122fd96ea8403cc4b | /blog/urls.py | 5302338f0518839940f6df052f3563598130c89b | [] | no_license | EduardoLopezRodriguez/socnet | b3f59dab5d8db5b09a480fa562214e33e42a3f55 | 22c79f0acc0d2d71f376799ab09fb5b9b3a360f6 | refs/heads/master | 2023-04-25T20:53:19.976551 | 2021-05-09T20:40:03 | 2021-05-09T20:40:03 | 365,840,551 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 541 | py | from django.urls import path
from . import views
urlpatterns = [
path('', views.HomeListView.as_view(), name='home'),
path('post/<int:pk>/', views.PostDetailView.as_view(), name='post_detail'),
path('post/new/', views.PostCreateView.as_view(), name='post_create'),
path('post/<int:pk>/update/', views.PostUpdateView.as_view(), name='post_update'),
path('post/<int:pk>/delete/', views.PostDeleteView.as_view(), name='post_delete'),
path('user/<str:username>/', views.UserPostListView.as_view(), name='user_posts'),
]
| [
"e94129@outlook.com"
] | e94129@outlook.com |
7e8bd342fc0bb96f6ba727e866bcc18731ac5afa | d5735851b605e8960ca321c0e332c7ed810f3e6d | /Python/files/eatenApples.py | 213e017a02f984a27f4332e28fc80d8f3097ee15 | [] | no_license | pyj4104/LeetCode-Practice | 6ed0cffd3605be6e187bedeb99e3b4b430604913 | 6a7d033bfd687ad2a0d79ac6a7f50ace1625f631 | refs/heads/master | 2023-03-12T00:23:50.913239 | 2021-02-25T03:38:11 | 2021-02-25T03:38:11 | 306,699,384 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 851 | py | import heapq as h
class Solution:
def eatenApples(self, apples: [int], days: [int]) -> int:
pQueue = []
dateToday = 0
numEaten = 0
for i in range(len(days)):
h.heappush(pQueue, [days[dateToday]+dateToday, apples[dateToday]])
foundStoredApple = False
while pQueue and not foundStoredApple:
thingsToEat = h.heappop(pQueue)
if thingsToEat[0] > dateToday and thingsToEat[1] > 0:
foundStoredApple = True
if foundStoredApple:
numEaten += 1
thingsToEat[1] -= 1
if thingsToEat[0] > dateToday and thingsToEat[1] > 0:
h.heappush(pQueue, thingsToEat)
dateToday += 1
while pQueue:
thingsToEat = h.heappop(pQueue)
if thingsToEat[0] > dateToday and thingsToEat[1] > 0:
numEaten += 1
thingsToEat[1] -= 1
else:
continue
h.heappush(pQueue, thingsToEat)
dateToday += 1
return numEaten
| [
"pyj4104@hotmail.com"
] | pyj4104@hotmail.com |
ed4cfb6f7353b9a3d34d98921ab407e804cfbdbb | 982336a4698cf0fedbdd7b24c4aa8064339bba19 | /plmn.py | 1b12088b5a36791b3cededb2b5c29a7084a233c6 | [] | no_license | blackyreviewsyt/privet | 47a3fe238682743be74cf4889eaf887aacfe91c3 | cf43767df87d7a3b04b06a27d7643c57ad923d14 | refs/heads/main | 2023-01-12T13:44:11.451642 | 2020-11-15T13:43:09 | 2020-11-15T13:43:09 | 313,040,271 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 586 | py | str = input("Введите строку ")
pl = 0
mn = 0
po = 0
mo = 0
for i in range(len(str) ):
if str[i] == '+':
pl += 1
if str[i] == '-':
mn += 1
if str[i : i + 2] == "+0":
po += 1
if str[i : i + 2] == "-0":
mo += 1
print("В это строке ", pl, " плюс(ов)")
print("В это строке ", mn, " минус(ов)")
print("В это строке ", po, " плюс(ов) после которы идёт 0")
print("В это строке ", mo, " минус(ов) после которы идёт 0")
| [
"blackreviewsyt@gmail.com"
] | blackreviewsyt@gmail.com |
5eaa73066b3c9d35699c4b176d32305dd1832ac5 | 5e6155ad914b8bf67c0b8eb07baf12dccdcc0dbb | /email_definitions/culture.py | 007fc0614c9430ad94a3ebad9366e3e8e82079fb | [
"Apache-2.0"
] | permissive | fabio-pintodacosta/gu-email-renderer | 08cc85b06580768ba89c262f1633ffcbbfbf02a5 | f9f62f6e8048cc0a6f79df734b6217f3dbcec8fa | refs/heads/master | 2020-12-27T21:16:41.510316 | 2015-11-03T13:36:57 | 2015-11-03T13:36:57 | 39,622,245 | 0 | 0 | null | 2015-07-24T09:02:11 | 2015-07-24T09:02:11 | null | UTF-8 | Python | false | false | 4,786 | py | import pysistence as immutable
import mail_renderer as mr
import handlers
import data_source as ds
import data_sources as dss
from guardianapi.apiClient import ApiClient
from container_api import container
client = ApiClient(mr.base_url, mr.api_key, edition="uk")
class FilmToday(handlers.EmailTemplate):
recognized_versions = ['v1', 'v2']
ad_tag = 'email-film-today'
ad_config = immutable.make_dict({
'leaderboard': 'Top'
})
data_sources = immutable.make_dict({
'v1': {
'film_today_latest': ds.FilmTodayLatestDataSource(client)
},
'v2': {
'film_front': container.for_id('6d84cd8d-d159-4e9a-ba2f-8852528d2d03')
}
})
priority_list = immutable.make_dict({
'v1': [('film_today_latest', 10)],
'v2': [('film_front', 10)],
})
template_names = immutable.make_dict({
'v1': 'culture/film-today/v1',
'v2': 'culture/film-today/v2',
})
class SleeveNotes(handlers.EmailTemplate):
recognized_versions = ['v1', 'v2', 'v3']
ad_tag = 'email-sleeve-notes'
ad_config = {
'leaderboard': 'Top',
'leaderboard2': 'Bottom',
}
music_editors_picks = dss.general.ItemDataSource(content_id='music', show_editors_picks=True, tags=['-tone/news'])
base_data_sources = immutable.make_dict({
'music_most_viewed': dss.general.ItemDataSource(content_id='music', show_most_viewed=True),
'music_picks': music_editors_picks,
'music_blog': dss.general.ItemDataSource(content_id='music/musicblog'),
'music_watch_listen': dss.general.ItemDataSource(content_id='music', tags=['(type/video|type/audio)']),
'music_further': music_editors_picks,
})
data_sources = immutable.make_dict({
'v1' : base_data_sources,
'v2' : base_data_sources,
'v3' : base_data_sources,
})
priorities = immutable.make_list(('music_most_viewed', 3),
('music_picks', 5),
('music_blog', 5),
('music_watch_listen', 5),
('music_further', 3))
priority_list = immutable.make_dict({
'v1': priorities,
'v2': priorities,
'v3': priorities
})
template_names = immutable.make_dict({
'v1': 'culture/sleeve-notes/v1',
'v2': 'culture/sleeve-notes/v2',
'v3': 'culture/sleeve-notes/v3',
})
class CloseUp(handlers.EmailTemplate):
recognized_versions = ['v1', 'v2', 'v3']
ad_tag = 'email-close-up'
ad_config = {
'leaderboard_v1': 'Top',
'leaderboard_v2': 'Bottom'
}
base_data_sources = immutable.make_dict({
'film_week': ds.FilmOfTheWeekDataSource(client),
'film_picks': ds.FilmEditorsPicksDataSource(client),
'film_show': ds.FilmShowDataSource(client),
'film_most_viewed': ds.FilmMostViewedDataSource(client),
'film_interviews': ds.FilmInterviewsDataSource(client),
'film_blogs': ds.FilmBlogsDataSource(client),
'film_quiz': ds.FilmQuizDataSource(client)
})
data_sources = immutable.make_dict({
'v1': base_data_sources,
'v2': base_data_sources,
'v3': base_data_sources,
})
priority_list = {}
priority_list['v1'] = [('film_week', 1), ('film_show', 1), ('film_interviews', 3),
('film_blogs', 5), ('film_quiz', 1), ('film_picks', 2), ('film_most_viewed', 3)]
priority_list['v2'] = priority_list['v1']
priority_list['v3'] = priority_list['v1']
template_names = immutable.make_dict({
'v1': 'culture/close-up/v1',
'v2': 'culture/close-up/v2',
'v3': 'culture/close-up/v3'
})
class Bookmarks(handlers.EmailTemplate):
recognized_versions = immutable.make_list('v1')
ad_tag = 'email-bookmarks'
ad_config = immutable.make_dict({
'leaderboard': 'Top',
'leaderboard2': 'Bottom'
})
base_data_sources = immutable.make_dict({
'books_picks': dss.culture.BooksEditorsPicks(client),
'book_reviews': dss.culture.BookReviews(client),
'books_blog': dss.culture.BooksBlog(client),
'book_podcasts': dss.culture.BookPodcasts(client),
'books_most_viewed': dss.culture.BooksMostViewed(client),
'how_to_draw': dss.culture.HowToDraw(client),
})
data_sources = immutable.make_dict({
'v1' : base_data_sources,
})
priority_list = immutable.make_dict({
'v1': immutable.make_list(
('books_picks', 5),
('books_most_viewed', 3),
('book_reviews', 3),
('books_blog', 3),
('book_podcasts', 1),
('how_to_draw', 1))
})
template_names = immutable.make_dict({
'v1': 'culture/bookmarks/v1',
})
| [
"robert.rees@guardian.co.uk"
] | robert.rees@guardian.co.uk |
26436c7f8ca5002a0efc19ff4ad758d7c00b6fb1 | 2f919b97aea7f0698830658073ab939a0e84133f | /Alexey/Numbers/fast_exponention.py | bbbec64e211d2fde85ce476602466f1676cc13eb | [] | no_license | glock3/Learning- | 1efcf9174d77363b2ecf84a688dcda82ae8ee405 | f80c8d8ac1bf6eb6472726fe86369149bb5f86ad | refs/heads/master | 2023-01-08T23:21:22.665561 | 2020-11-08T23:54:53 | 2020-11-08T23:54:53 | 290,705,463 | 0 | 2 | null | 2020-11-08T23:54:54 | 2020-08-27T07:18:49 | Python | UTF-8 | Python | false | false | 506 | py | def main():
a = int(input("Enter number a: "))
b = int(input("Enter number b: "))
o_log_n(power_of_nums(a, b))
def o_log_n(result):
temp = result
big_o = 0
while temp > 1:
temp = temp // 2
big_o += 1
print('Complexity is: ' + str(big_o))
def power_of_nums(a, b):
result = 1
if b != 0:
for index in range(b):
result = result * a
print('Power of nums is ' + str(result))
return result
if __name__ == '__main__':
main() | [
"work@localhost.localdomain"
] | work@localhost.localdomain |
08a8ecac2c1bbc0eabcd430943c305af664fc09c | ce1cf00a7e8d8b45701f5bc575434958dcc53c7e | /plugins/oauth/server/providers/globus.py | f821642694ff8d7f1d0fe3654bc4b97233f042c5 | [
"Apache-2.0"
] | permissive | lslab/girder | 1fd4cdffcbc4e581d3103dd552262921a29ab540 | b40f7f1cd3e6a516bf858b026f040c93d0928bbe | refs/heads/master | 2020-12-24T12:12:37.399019 | 2017-03-21T21:56:15 | 2017-03-21T21:56:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,567 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
###############################################################################
# Copyright Kitware Inc.
#
# Licensed under the Apache License, Version 2.0 ( the "License" );
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
from six.moves import urllib
from girder.api.rest import getApiUrl, RestException
from .base import ProviderBase
from .. import constants
class Globus(ProviderBase):
_AUTH_URL = 'https://auth.globus.org/v2/oauth2/authorize'
_AUTH_SCOPES = ('urn:globus:auth:scope:auth.globus.org:view_identities',
'openid', 'profile', 'email')
_TOKEN_URL = 'https://auth.globus.org/v2/oauth2/token'
_API_USER_URL = 'https://auth.globus.org/v2/oauth2/userinfo'
def getClientIdSetting(self):
return self.model('setting').get(
constants.PluginSettings.GLOBUS_CLIENT_ID)
def getClientSecretSetting(self):
return self.model('setting').get(
constants.PluginSettings.GLOBUS_CLIENT_SECRET)
@classmethod
def getUrl(cls, state):
clientId = cls.model('setting').get(
constants.PluginSettings.GLOBUS_CLIENT_ID)
if clientId is None:
raise Exception('No Globus client ID setting is present.')
callbackUrl = '/'.join((getApiUrl(), 'oauth', 'globus', 'callback'))
query = urllib.parse.urlencode({
'response_type': 'code',
'access_type': 'online',
'client_id': clientId,
'redirect_uri': callbackUrl,
'state': state,
'scope': ' '.join(cls._AUTH_SCOPES)
})
return '%s?%s' % (cls._AUTH_URL, query)
def getToken(self, code):
params = {
'grant_type': 'authorization_code',
'code': code,
'client_id': self.clientId,
'client_secret': self.clientSecret,
'redirect_uri': self.redirectUri
}
resp = self._getJson(method='POST', url=self._TOKEN_URL,
data=params,
headers={'Accept': 'application/json'})
if 'error' in resp:
raise RestException(
'Got an error exchanging token from provider: "%s".' % resp,
code=502)
return resp
def getUser(self, token):
headers = {
'Authorization': 'Bearer {}'.format(token['access_token'])
}
resp = self._getJson(method='GET', url=self._API_USER_URL,
headers=headers)
oauthId = resp.get('sub')
if not oauthId:
raise RestException(
'Globus identity did not return a valid ID.', code=502)
email = resp.get('email')
if not email:
raise RestException(
'Globus identity did not return a valid email.', code=502)
name = resp['name'].split()
firstName = name[0]
lastName = name[-1]
return self._createOrReuseUser(oauthId, email, firstName, lastName)
| [
"xarthisius.kk@gmail.com"
] | xarthisius.kk@gmail.com |
ed7b014f42fe52ce025c3ac73b90a0614d8c5b49 | 688fb8b735401e3653078e6e8d0047376dea9398 | /pizzaapp/migrations/0005_alter_ordermodel_ordereditems.py | 9cc30eab39d96c245b2822a5d28a8798891fac18 | [
"MIT"
] | permissive | vivekx01/pizzaapp-v2 | bbf2592580fc2cf1bf76db25f9aadd739046bc2a | 71c028af35955c53564b96ade4ff93c4b59799fe | refs/heads/main | 2023-06-15T00:13:25.874602 | 2021-07-09T06:04:17 | 2021-07-09T06:04:17 | 371,899,282 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 424 | py | # Generated by Django 3.2 on 2021-05-03 07:21
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('pizzaapp', '0004_rename_email_ordermodel_phone'),
]
operations = [
migrations.AlterField(
model_name='ordermodel',
name='ordereditems',
field=models.CharField(max_length=50),
),
]
| [
"noreply@github.com"
] | noreply@github.com |
a8f8eb18de3d54d6229386e896ac579b2dc8f9a9 | a6fd8b3fefa2654cb8bb188734eb587b6051d743 | /awssaml/api/ADFSService.py | 21c772612ee87f21f4628059f93133f138c41dc9 | [
"MIT"
] | permissive | piotrplenik/awssaml | 7fc0dd51ef07b46989d7a57559953b0b935a85d4 | bebf96260bba47a8ec160b97f806bc7f4a209748 | refs/heads/master | 2023-05-28T19:58:10.716918 | 2019-10-30T20:32:28 | 2019-10-30T20:32:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,870 | py | import requests
import base64
from requests_ntlm import HttpNtlmAuth
from bs4 import BeautifulSoup
import xml.etree.ElementTree as ET
from .ADFSConnection import ConnectionType, ADFSConnection, NtlmADFSConnection, WebFormADFSConnection
class ADFSService:
def __init__(self, identity_url):
# type: (str) -> None
self.identity_url = identity_url
self.connection = None
def connect(self, type_name, username, password):
# type: (str, str, str) -> None
if type_name == ConnectionType.web_form():
self.connection = WebFormADFSConnection(self.identity_url, username, password)
else:
self.connection = NtlmADFSConnection(self.identity_url, username, password)
self.connection.connect()
def get_aws_roles(self):
# Parse the returned assertion and extract the authorized roles
awsroles = []
root = ET.fromstring(base64.b64decode(self.connection.get_assertion()))
for saml2attribute in root.iter('{urn:oasis:names:tc:SAML:2.0:assertion}Attribute'):
if saml2attribute.get('Name') == 'https://aws.amazon.com/SAML/Attributes/Role':
for saml2attributevalue in saml2attribute.iter('{urn:oasis:names:tc:SAML:2.0:assertion}AttributeValue'):
awsroles.append(saml2attributevalue.text)
# Note the format of the attribute value should be role_arn,principal_arn
# but lots of blogs list it as principal_arn,role_arn so let's reverse
# them if needed
for awsrole in awsroles:
chunks = awsrole.split(',')
if'saml-provider' in chunks[0]:
newawsrole = chunks[1] + ',' + chunks[0]
index = awsroles.index(awsrole)
awsroles.insert(index, newawsrole)
awsroles.remove(awsrole)
return awsroles
| [
"piotr.plenik.consultant@nielsen.com"
] | piotr.plenik.consultant@nielsen.com |
43ba18e1ca65424bedf81bd23bc6740a4005af6f | 70876e11e6a102c34c9d7a626a33bda26fcfbf58 | /gino/schema.py | 192cbe80271c90ff2ee13d025cc0c85d70923146 | [
"BSD-3-Clause"
] | permissive | Urielable/gino | c275ad52b108d9ff3cd24a532d348a32cf3b3cb2 | 226d6df131ee32534fd37849a83e2513df5b5f37 | refs/heads/master | 2020-03-25T03:42:23.093065 | 2018-08-02T08:45:37 | 2018-08-02T08:45:37 | 143,355,539 | 1 | 0 | null | 2018-08-02T23:48:54 | 2018-08-02T23:48:54 | null | UTF-8 | Python | false | false | 14,910 | py | # noinspection PyProtectedMember
from sqlalchemy import exc, util
from sqlalchemy.sql.base import _bind_or_error
from sqlalchemy.sql.ddl import (
AddConstraint,
CreateIndex,
CreateSequence,
CreateTable,
DropConstraint,
DropIndex,
DropSequence,
DropTable,
SchemaDropper,
SchemaGenerator,
SetColumnComment,
SetTableComment,
sort_tables_and_constraints,
)
from sqlalchemy.types import SchemaType
class AsyncVisitor:
async def traverse_single(self, obj, **kw):
# noinspection PyUnresolvedReferences
for v in self._visitor_iterator:
meth = getattr(v, "visit_%s" % obj.__visit_name__, None)
if meth:
return await meth(obj, **kw)
class AsyncSchemaGenerator(AsyncVisitor, SchemaGenerator):
async def _can_create_table(self, table):
self.dialect.validate_identifier(table.name)
effective_schema = self.connection.schema_for_object(table)
if effective_schema:
self.dialect.validate_identifier(effective_schema)
return (not self.checkfirst or
not (await self.dialect.has_table(self.connection,
table.name,
schema=effective_schema)))
async def _can_create_sequence(self, sequence):
effective_schema = self.connection.schema_for_object(sequence)
return self.dialect.supports_sequences and (
(not self.dialect.sequences_optional or
not sequence.optional) and
(
not self.checkfirst or
not await self.dialect.has_sequence(
self.connection,
sequence.name,
schema=effective_schema)
)
)
async def visit_metadata(self, metadata):
if self.tables is not None:
tables = self.tables
else:
tables = list(metadata.tables.values())
tables_create = []
for t in tables:
if await self._can_create_table(t):
tables_create.append(t)
collection = sort_tables_and_constraints(tables_create)
seq_coll = []
# noinspection PyProtectedMember
for s in metadata._sequences.values():
if s.column is None and await self._can_create_sequence(s):
seq_coll.append(s)
event_collection = [
t for (t, fks) in collection if t is not None
]
await _Async(metadata.dispatch.before_create)(
metadata, self.connection,
tables=event_collection,
checkfirst=self.checkfirst,
_ddl_runner=self)
for seq in seq_coll:
await self.traverse_single(seq, create_ok=True)
for table, fkcs in collection:
if table is not None:
await self.traverse_single(
table, create_ok=True,
include_foreign_key_constraints=fkcs,
_is_metadata_operation=True)
else:
for fkc in fkcs:
await self.traverse_single(fkc)
await _Async(metadata.dispatch.after_create)(
metadata, self.connection,
tables=event_collection,
checkfirst=self.checkfirst,
_ddl_runner=self)
async def visit_table(
self, table, create_ok=False,
include_foreign_key_constraints=None,
_is_metadata_operation=False):
if not create_ok and not await self._can_create_table(table):
return
await _Async(table.dispatch.before_create)(
table, self.connection,
checkfirst=self.checkfirst,
_ddl_runner=self,
_is_metadata_operation=_is_metadata_operation)
for column in table.columns:
if column.default is not None:
await self.traverse_single(column.default)
if not self.dialect.supports_alter:
# e.g., don't omit any foreign key constraints
include_foreign_key_constraints = None
await self.connection.status(
CreateTable(
table,
include_foreign_key_constraints=include_foreign_key_constraints
))
if hasattr(table, 'indexes'):
for index in table.indexes:
await self.traverse_single(index)
if self.dialect.supports_comments and not self.dialect.inline_comments:
if table.comment is not None:
await self.connection.status(SetTableComment(table))
for column in table.columns:
if column.comment is not None:
await self.connection.status(SetColumnComment(column))
await _Async(table.dispatch.after_create)(
table, self.connection,
checkfirst=self.checkfirst,
_ddl_runner=self,
_is_metadata_operation=_is_metadata_operation)
async def visit_foreign_key_constraint(self, constraint):
if not self.dialect.supports_alter:
return
await self.connection.status(AddConstraint(constraint))
async def visit_sequence(self, sequence, create_ok=False):
if not create_ok and not await self._can_create_sequence(sequence):
return
await self.connection.status(CreateSequence(sequence))
async def visit_index(self, index):
await self.connection.status(CreateIndex(index))
class AsyncSchemaDropper(AsyncVisitor, SchemaDropper):
async def visit_metadata(self, metadata):
if self.tables is not None:
tables = self.tables
else:
tables = list(metadata.tables.values())
try:
unsorted_tables = []
for t in tables:
if await self._can_drop_table(t):
unsorted_tables.append(t)
collection = list(reversed(
sort_tables_and_constraints(
unsorted_tables,
filter_fn=lambda constraint: False
if not self.dialect.supports_alter
or constraint.name is None else None
)
))
except exc.CircularDependencyError as err2:
if not self.dialect.supports_alter:
util.warn(
"Can't sort tables for DROP; an "
"unresolvable foreign key "
"dependency exists between tables: %s, and backend does "
"not support ALTER. To restore at least a partial sort, "
"apply use_alter=True to ForeignKey and "
"ForeignKeyConstraint "
"objects involved in the cycle to mark these as known "
"cycles that will be ignored."
% (
", ".join(sorted([t.fullname for t in err2.cycles]))
)
)
collection = [(t, ()) for t in unsorted_tables]
else:
util.raise_from_cause(
exc.CircularDependencyError(
err2.args[0],
err2.cycles, err2.edges,
msg="Can't sort tables for DROP; an unresolvable "
"foreign key dependency exists between tables: %s."
" Please ensure that the ForeignKey and "
"ForeignKeyConstraint objects involved in the "
"cycle have names so that they can be dropped "
"using DROP CONSTRAINT."
% (
", ".join(
sorted([t.fullname for t in err2.cycles]))
)
)
)
seq_coll = []
for s in metadata._sequences.values():
if s.column is None and await self._can_drop_sequence(s):
seq_coll.append(s)
event_collection = [
t for (t, fks) in collection if t is not None
]
await _Async(metadata.dispatch.before_drop)(
metadata, self.connection, tables=event_collection,
checkfirst=self.checkfirst, _ddl_runner=self)
for table, fkcs in collection:
if table is not None:
await self.traverse_single(
table, drop_ok=True, _is_metadata_operation=True)
else:
for fkc in fkcs:
await self.traverse_single(fkc)
for seq in seq_coll:
await self.traverse_single(seq, drop_ok=True)
await _Async(metadata.dispatch.after_drop)(
metadata, self.connection, tables=event_collection,
checkfirst=self.checkfirst, _ddl_runner=self)
async def _can_drop_table(self, table):
self.dialect.validate_identifier(table.name)
effective_schema = self.connection.schema_for_object(table)
if effective_schema:
self.dialect.validate_identifier(effective_schema)
return not self.checkfirst or (await self.dialect.has_table(
self.connection, table.name, schema=effective_schema))
async def _can_drop_sequence(self, sequence):
effective_schema = self.connection.schema_for_object(sequence)
return self.dialect.supports_sequences and (
(not self.dialect.sequences_optional or
not sequence.optional) and
(not self.checkfirst or
await self.dialect.has_sequence(
self.connection,
sequence.name,
schema=effective_schema))
)
async def visit_index(self, index):
await self.connection.status(DropIndex(index))
async def visit_table(self, table, drop_ok=False,
_is_metadata_operation=False):
if not drop_ok and not await self._can_drop_table(table):
return
await _Async(table.dispatch.before_drop)(
table, self.connection,
checkfirst=self.checkfirst,
_ddl_runner=self,
_is_metadata_operation=_is_metadata_operation)
for column in table.columns:
if column.default is not None:
await self.traverse_single(column.default)
await self.connection.status(DropTable(table))
await _Async(table.dispatch.after_drop)(
table, self.connection,
checkfirst=self.checkfirst,
_ddl_runner=self,
_is_metadata_operation=_is_metadata_operation)
async def visit_foreign_key_constraint(self, constraint):
if not self.dialect.supports_alter:
return
await self.connection.status(DropConstraint(constraint))
async def visit_sequence(self, sequence, drop_ok=False):
if not drop_ok and not await self._can_drop_sequence(sequence):
return
await self.connection.status(DropSequence(sequence))
class GinoSchemaVisitor:
__slots__ = ('_item',)
def __init__(self, item):
self._item = item
async def create(self, bind=None, *args, **kwargs):
if bind is None:
bind = _bind_or_error(self._item)
await getattr(bind, '_run_visitor')(AsyncSchemaGenerator,
self._item, *args, **kwargs)
return self._item
async def drop(self, bind=None, *args, **kwargs):
if bind is None:
bind = _bind_or_error(self._item)
await getattr(bind, '_run_visitor')(AsyncSchemaDropper,
self._item, *args, **kwargs)
async def create_all(self, bind=None, tables=None, checkfirst=True):
await self.create(bind=bind, tables=tables, checkfirst=checkfirst)
async def drop_all(self, bind=None, tables=None, checkfirst=True):
await self.drop(bind=bind, tables=tables, checkfirst=checkfirst)
class AsyncSchemaTypeMixin:
async def create_async(self, bind=None, checkfirst=False):
if bind is None:
bind = _bind_or_error(self)
t = self.dialect_impl(bind.dialect)
if t.__class__ is not self.__class__ and isinstance(t, SchemaType):
await t.create_async(bind=bind, checkfirst=checkfirst)
async def drop_async(self, bind=None, checkfirst=False):
if bind is None:
bind = _bind_or_error(self)
t = self.dialect_impl(bind.dialect)
if t.__class__ is not self.__class__ and isinstance(t, SchemaType):
await t.drop_async(bind=bind, checkfirst=checkfirst)
async def _on_table_create_async(self, target, bind, **kw):
if not self._is_impl_for_variant(bind.dialect, kw):
return
t = self.dialect_impl(bind.dialect)
if t.__class__ is not self.__class__ and isinstance(t, SchemaType):
await getattr(t, '_on_table_create_async')(target, bind, **kw)
async def _on_table_drop_async(self, target, bind, **kw):
if not self._is_impl_for_variant(bind.dialect, kw):
return
t = self.dialect_impl(bind.dialect)
if t.__class__ is not self.__class__ and isinstance(t, SchemaType):
await getattr(t, '_on_table_drop_async')(target, bind, **kw)
async def _on_metadata_create_async(self, target, bind, **kw):
if not self._is_impl_for_variant(bind.dialect, kw):
return
t = self.dialect_impl(bind.dialect)
if t.__class__ is not self.__class__ and isinstance(t, SchemaType):
await getattr(t, '_on_metadata_create_async')(target, bind, **kw)
async def _on_metadata_drop_async(self, target, bind, **kw):
if not self._is_impl_for_variant(bind.dialect, kw):
return
t = self.dialect_impl(bind.dialect)
if t.__class__ is not self.__class__ and isinstance(t, SchemaType):
await getattr(t, '_on_metadata_drop_async')(target, bind, **kw)
async def _call_portable_instancemethod(fn, args, kw):
m = getattr(fn.target, fn.name + '_async', None)
if m is None:
return fn(*args, **kw)
else:
kw.update(fn.kwargs)
return await m(*args, **kw)
class _Async:
def __init__(self, listener):
self._listener = listener
async def call(self, *args, **kw):
for fn in self._listener.parent_listeners:
await _call_portable_instancemethod(fn, args, kw)
for fn in self._listener.listeners:
await _call_portable_instancemethod(fn, args, kw)
def __call__(self, *args, **kwargs):
return self.call(*args, **kwargs)
def patch_schema(db):
for st in {'Enum'}:
setattr(db, st, type(st, (getattr(db, st), AsyncSchemaTypeMixin), {}))
| [
"fantix.king@gmail.com"
] | fantix.king@gmail.com |
5a32dd55cd15e3b4e6e7ad9559c5b39183ee2359 | fb2021d19c97b297cfeefaa5e62a51873cb7e5ef | /geekshop/settings.py | 0f817c077b458d450313b8abdef257ee0c9fe28d | [] | no_license | Katerina27/Django_1 | 7ae78d4fd978d2f398daad9bafc8edadf9a67612 | 6c0651e37d16117a4587e8abf1f348886b0c938e | refs/heads/main | 2023-06-26T20:54:55.481720 | 2021-07-19T11:06:58 | 2021-07-19T11:06:58 | 387,422,196 | 0 | 0 | null | 2021-07-31T15:26:39 | 2021-07-19T10:15:58 | CSS | UTF-8 | Python | false | false | 3,505 | py | """
Django settings for geekshop project.
Generated by 'django-admin startproject' using Django 3.2.5.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.2/ref/settings/
"""
import os
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'django-insecure-fhn6f%7ac19=zjls7r=gv=nj*im&@(61ud74)1#dos_zg1sgv6'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'mainapp',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'geekshop.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': ['geekshop/templates'],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'geekshop.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'ru-RU'
TIME_ZONE = 'Europe/Moscow'
USE_I18N = True
USE_L10N = True
USE_TZ = False
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = (
os.path.join(BASE_DIR, 'geekshop','static'),
)
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
| [
"noreply@github.com"
] | noreply@github.com |
8d9a07e99f4b4fe1788ee9b53f7d83e9c80abff8 | 9ed71fc3a63610cfe3fd25ba7b6b282e1755386c | /onlineShop/Order/urls.py | 68da976654d0fc42f10fb4643691cf9b2964fa23 | [
"MIT"
] | permissive | alirezaryahi/django-onlineShop | e3fca5ee2535c96411742511982623f6d33b4755 | b36c4a37ac98977862b83f646c2303ec4bb1a6ab | refs/heads/main | 2023-02-02T12:14:47.089604 | 2020-12-21T11:15:08 | 2020-12-21T11:15:08 | 322,706,973 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 268 | py | from django.urls import path
from .views import Open_order, add_user_order, remove_order_detail
urlpatterns = [
path('openOrder', Open_order),
path('add_user_order', add_user_order),
path('remove_order_detail/<detail_id>', remove_order_detail),
]
| [
"noreply@github.com"
] | noreply@github.com |
c3ade7808d49bedaf6b0ea9994a731efdeca9974 | 7f9de62b2952a086ea1004d81868a393449cd569 | /calc.py | 9904302736ae5dfe50aa3d78cf8ab89884e3673d | [] | no_license | fastudio4/flask-mill | abbd7b5aea8368a49d21e1c3294a9363210ed9b0 | 2729ba14165a8176919db76a77a2b5d32c87f5fa | refs/heads/master | 2021-06-28T06:05:23.365758 | 2017-09-18T18:22:57 | 2017-09-18T18:22:57 | 103,895,675 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,560 | py | from math import pi
from statistics import mean
from db import db_milling
class Milling(object):
def __init__(self, material, d_cut, t_cut, max_speed_spindle, max_frequency):
self.material = material
self.d_cut = d_cut
self.t_cut = t_cut
self.max_speed_spindle = max_speed_spindle
self.max_frequency = max_frequency
self.setting = self.min_max_speed()
def spindle_speed(self, cutting_speed):
return int(round((1000 * cutting_speed)/(pi * self.d_cut), 0))
def frequency_speed(self, speed):
return int((speed * self.max_frequency) / self.max_speed_spindle)
def fz_materials(self):
val = int(self.d_cut)
if 0 < val <= 1:
return 0
elif 1 <= val <= 2:
return 1
elif 3 <= val <= 4:
return 2
elif 5 <= val <= 6:
return 3
elif 8 <= val <= 10:
return 4
elif 12 <= val <= 16:
return 5
else:
return None
def feed(self, spindle_speed):
return int(round((spindle_speed * db_milling[self.material]['fz'][self.fz_materials()] * self.t_cut)/60, 0))
def min_max_speed(self):
min_speed = self.spindle_speed(db_milling[self.material]['min_cut'])
max_speed = self.spindle_speed(db_milling[self.material]['max_cut'])
if max_speed > self.max_speed_spindle:
max_speed_cut = int(round((self.max_frequency * self.max_speed_spindle)/max_speed, 0))
min_speed_cut = db_milling[self.material]['min_cut'] - (db_milling[self.material]['max_cut'] - max_speed_cut)
min_speed_spindle = self.spindle_speed(min_speed_cut)
max_speed_spindle = self.spindle_speed(max_speed_cut)
return {
'min_cut': min_speed_cut,
'mean_cut': mean([min_speed_cut, max_speed_cut]),
'max_cut': max_speed_cut,
'min_speed': min_speed_spindle,
'mean_speed': mean([min_speed_spindle, max_speed_spindle]),
'max_speed': max_speed_spindle,
'min_feed': self.feed(min_speed_spindle),
'mean_feed': mean([self.feed(min_speed_spindle), self.feed(max_speed_spindle)]),
'max_feed': self.feed(max_speed_spindle),
'fz': db_milling[self.material]['fz'][self.fz_materials()] * self.t_cut,
'frequency_min': self.frequency_speed(min_speed_spindle),
'frequency_mean': mean([self.frequency_speed(min_speed_spindle), self.frequency_speed(max_speed_spindle)]),
'frequency_max': self.frequency_speed(max_speed_spindle)
}
return {
'min_cut': db_milling[self.material]['min_cut'],
'mean_cut': mean([db_milling[self.material]['min_cut'], db_milling[self.material]['max_cut']]),
'max_cut': db_milling[self.material]['max_cut'],
'min_speed': min_speed,
'mean_speed': mean([min_speed, max_speed]),
'max_speed': max_speed,
'min_feed': self.feed(min_speed),
'mean_feed': mean([self.feed(min_speed), self.feed(max_speed)]),
'max_feed': self.feed(max_speed),
'fz': db_milling[self.material]['fz'][self.fz_materials()] * self.t_cut,
'frequency_min': self.frequency_speed(min_speed),
'frequency_mean': mean([self.frequency_speed(min_speed), self.frequency_speed(max_speed)]),
'frequency_max': self.frequency_speed(max_speed)
}
| [
"fastudio4@gmail.com"
] | fastudio4@gmail.com |
899db2725bd391edbca36a7cb68b75ffd4e74c4a | d3fefacc4e74f2ba6fdae3a03318dbdd9d43d09d | /lib/imdb_util.py | e75c0ef05c34b8f2063dbc0939be48f92cb2238f | [] | no_license | susanmosha/M3D-RPN | dabc9bc0ad084cdc1185c73e532157778d58de43 | d7354226a73eba692eb6455c0ac734ffc1aada47 | refs/heads/master | 2020-12-12T06:17:44.291207 | 2020-02-06T06:41:33 | 2020-02-06T06:41:33 | 234,062,183 | 0 | 0 | null | 2020-01-15T11:07:58 | 2020-01-15T11:07:57 | null | UTF-8 | Python | false | false | 20,578 | py | """
This file contains all image database (imdb) functionality,
such as loading and reading information from a dataset.
Generally, this file is meant to read in a dataset from disk into a
simple custom format for the detetive framework.
"""
# -----------------------------------------
# modules
# -----------------------------------------
import torch
import torch.utils.data as data
import sys
import re
from PIL import Image
from copy import deepcopy
sys.dont_write_bytecode = True
# -----------------------------------------
# custom
# -----------------------------------------
from lib.rpn_util import *
from lib.util import *
from lib.augmentations import *
from lib.core import *
class Dataset(torch.utils.data.Dataset):
"""
A single Dataset class is used for the whole project,
which implements the __init__ and __get__ functions from PyTorch.
"""
def __init__(self, conf, root, cache_folder=None):
"""
This function reads in all datasets to be used in training and stores ANY relevant
information which may be needed during training as a list of edict()
(referred to commonly as 'imobj').
The function also optionally stores the image database (imdb) file into a cache.
"""
imdb = []
self.video_det = False if not ('video_det' in conf) else conf.video_det
self.video_count = 1 if not ('video_count' in conf) else conf.video_count
self.use_3d_for_2d = ('use_3d_for_2d' in conf) and conf.use_3d_for_2d
# use cache?
if (cache_folder is not None) and os.path.exists(os.path.join(cache_folder, 'imdb.pkl')):
logging.info('Preloading imdb.')
imdb = pickle_read(os.path.join(cache_folder, 'imdb.pkl'))
else:
# cycle through each dataset
for dbind, db in enumerate(conf.datasets_train):
logging.info('Loading imdb {}'.format(db['name']))
# single imdb
imdb_single_db = []
# kitti formatting
if db['anno_fmt'].lower() == 'kitti_det':
train_folder = os.path.join(root, db['name'], 'training')
ann_folder = os.path.join(train_folder, 'label_2', '')
cal_folder = os.path.join(train_folder, 'calib', '')
im_folder = os.path.join(train_folder, 'image_2', '')
# get sorted filepaths
annlist = sorted(glob(ann_folder + '*.txt'))
imdb_start = time()
self.affine_size = None if not ('affine_size' in conf) else conf.affine_size
for annind, annpath in enumerate(annlist):
# get file parts
base = os.path.basename(annpath)
id, ext = os.path.splitext(base)
calpath = os.path.join(cal_folder, id + '.txt')
impath = os.path.join(im_folder, id + db['im_ext'])
impath_pre = os.path.join(train_folder, 'prev_2', id + '_01' + db['im_ext'])
impath_pre2 = os.path.join(train_folder, 'prev_2', id + '_02' + db['im_ext'])
impath_pre3 = os.path.join(train_folder, 'prev_2', id + '_03' + db['im_ext'])
# read gts
p2 = read_kitti_cal(calpath)
p2_inv = np.linalg.inv(p2)
gts = read_kitti_label(annpath, p2, self.use_3d_for_2d)
if not self.affine_size is None:
# filter relevant classes
gts_plane = [deepcopy(gt) for gt in gts if gt.cls in conf.lbls and not gt.ign]
if len(gts_plane) > 0:
KITTI_H = 1.65
# compute ray traces for default projection
for gtind in range(len(gts_plane)):
gt = gts_plane[gtind]
#cx2d = gt.bbox_3d[0]
#cy2d = gt.bbox_3d[1]
cy2d = gt.bbox_full[1] + gt.bbox_full[3]
cx2d = gt.bbox_full[0] + gt.bbox_full[2] / 2
z2d, coord3d = projection_ray_trace(p2, p2_inv, cx2d, cy2d, KITTI_H)
gts_plane[gtind].center_in = coord3d[0:3, 0]
gts_plane[gtind].center_3d = np.array(gt.center_3d)
prelim_tra = np.array([gt.center_in for gtind, gt in enumerate(gts_plane)])
target_tra = np.array([gt.center_3d for gtind, gt in enumerate(gts_plane)])
if self.affine_size == 4:
prelim_tra = np.pad(prelim_tra, [(0, 0), (0, 1)], mode='constant', constant_values=1)
target_tra = np.pad(target_tra, [(0, 0), (0, 1)], mode='constant', constant_values=1)
affine_gt, err = solve_transform(prelim_tra, target_tra, compute_error=True)
a = 1
obj = edict()
# did not compute transformer
if (self.affine_size is None) or len(gts_plane) < 1:
obj.affine_gt = None
else:
obj.affine_gt = affine_gt
# store gts
obj.id = id
obj.gts = gts
obj.p2 = p2
obj.p2_inv = p2_inv
# im properties
im = Image.open(impath)
obj.path = impath
obj.path_pre = impath_pre
obj.path_pre2 = impath_pre2
obj.path_pre3 = impath_pre3
obj.imW, obj.imH = im.size
# database properties
obj.dbname = db.name
obj.scale = db.scale
obj.dbind = dbind
# store
imdb_single_db.append(obj)
if (annind % 1000) == 0 and annind > 0:
time_str, dt = compute_eta(imdb_start, annind, len(annlist))
logging.info('{}/{}, dt: {:0.4f}, eta: {}'.format(annind, len(annlist), dt, time_str))
# concatenate single imdb into full imdb
imdb += imdb_single_db
imdb = np.array(imdb)
# cache off the imdb?
if cache_folder is not None:
pickle_write(os.path.join(cache_folder, 'imdb.pkl'), imdb)
# store more information
self.datasets_train = conf.datasets_train
self.len = len(imdb)
self.imdb = imdb
# setup data augmentation transforms
self.transform = Augmentation(conf)
# setup sampler and data loader for this dataset
self.sampler = torch.utils.data.sampler.WeightedRandomSampler(balance_samples(conf, imdb), self.len)
self.loader = torch.utils.data.DataLoader(self, conf.batch_size, sampler=self.sampler, collate_fn=self.collate)
# check classes
cls_not_used = []
for imobj in imdb:
for gt in imobj.gts:
cls = gt.cls
if not(cls in conf.lbls or cls in conf.ilbls) and (cls not in cls_not_used):
cls_not_used.append(cls)
if len(cls_not_used) > 0:
logging.info('Labels not used in training.. {}'.format(cls_not_used))
def __getitem__(self, index):
"""
Grabs the item at the given index. Specifically,
- read the image from disk
- read the imobj from RAM
- applies data augmentation to (im, imobj)
- converts image to RGB and [B C W H]
"""
if not self.video_det:
# read image
im = cv2.imread(self.imdb[index].path)
else:
# read images
im = cv2.imread(self.imdb[index].path)
video_count = 1 if self.video_count is None else self.video_count
if video_count >= 2:
im_pre = cv2.imread(self.imdb[index].path_pre)
if not im_pre.shape == im.shape:
im_pre = cv2.resize(im_pre, (im.shape[1], im.shape[0]))
im = np.concatenate((im, im_pre), axis=2)
if video_count >= 3:
im_pre2 = cv2.imread(self.imdb[index].path_pre2)
if im_pre2 is None:
im_pre2 = im_pre
if not im_pre2.shape == im.shape:
im_pre2 = cv2.resize(im_pre2, (im.shape[1], im.shape[0]))
im = np.concatenate((im, im_pre2), axis=2)
if video_count >= 4:
im_pre3 = cv2.imread(self.imdb[index].path_pre3)
if im_pre3 is None:
im_pre3 = im_pre2
if not im_pre3.shape == im.shape:
im_pre3 = cv2.resize(im_pre3, (im.shape[1], im.shape[0]))
im = np.concatenate((im, im_pre3), axis=2)
# transform / data augmentation
im, imobj = self.transform(im, deepcopy(self.imdb[index]))
for i in range(int(im.shape[2]/3)):
# convert to RGB then permute to be [B C H W]
im[:, :, (i*3):(i*3) + 3] = im[:, :, (i*3+2, i*3+1, i*3)]
im = np.transpose(im, [2, 0, 1])
return im, imobj
@staticmethod
def collate(batch):
"""
Defines the methodology for PyTorch to collate the objects
of a batch together, for some reason PyTorch doesn't function
this way by default.
"""
imgs = []
imobjs = []
# go through each batch
for sample in batch:
# append images and object dictionaries
imgs.append(sample[0])
imobjs.append(sample[1])
# stack images
imgs = np.array(imgs)
imgs = torch.from_numpy(imgs).cuda()
return imgs, imobjs
def __len__(self):
"""
Simply return the length of the dataset.
"""
return self.len
def read_kitti_cal(calfile):
"""
Reads the kitti calibration projection matrix (p2) file from disc.
Args:
calfile (str): path to single calibration file
"""
text_file = open(calfile, 'r')
p2pat = re.compile(('(P2:)\s+(fpat)\s+(fpat)\s+(fpat)\s+(fpat)\s+(fpat)\s+(fpat)\s+(fpat)\s+(fpat)\s+(fpat)' +
'\s+(fpat)\s+(fpat)\s+(fpat)\s*\n').replace('fpat', '[-+]?[\d]+\.?[\d]*[Ee](?:[-+]?[\d]+)?'))
for line in text_file:
parsed = p2pat.fullmatch(line)
# bbGt annotation in text format of:
# cls x y w h occ x y w h ign ang
if parsed is not None:
p2 = np.zeros([4, 4], dtype=float)
p2[0, 0] = parsed.group(2)
p2[0, 1] = parsed.group(3)
p2[0, 2] = parsed.group(4)
p2[0, 3] = parsed.group(5)
p2[1, 0] = parsed.group(6)
p2[1, 1] = parsed.group(7)
p2[1, 2] = parsed.group(8)
p2[1, 3] = parsed.group(9)
p2[2, 0] = parsed.group(10)
p2[2, 1] = parsed.group(11)
p2[2, 2] = parsed.group(12)
p2[2, 3] = parsed.group(13)
p2[3, 3] = 1
text_file.close()
return p2
def read_kitti_poses(posefile):
text_file = open(posefile, 'r')
ppat1 = re.compile(('(fpat)\s+(fpat)\s+(fpat)\s+(fpat)\s+(fpat)\s+(fpat)\s+(fpat)\s+(fpat)\s+(fpat)' +
'\s+(fpat)\s+(fpat)\s+(fpat)\s*\n').replace('fpat', '[-+]?[\d]+\.?[\d]*[Ee](?:[-+]?[\d]+)?'))
ppat2 = re.compile(('(fpat)\s+(fpat)\s+(fpat)\s+(fpat)\s+(fpat)\s+(fpat)\s+(fpat)\s+(fpat)\s+(fpat)' +
'\s+(fpat)\s+(fpat)\s+(fpat)\s*\n').replace('fpat', '[-+]?[\d]+\.?[\d]*'));
ps = []
for line in text_file:
parsed1 = ppat1.fullmatch(line)
parsed2 = ppat2.fullmatch(line)
if parsed1 is not None:
p = np.zeros([4, 4], dtype=float)
p[0, 0] = parsed1.group(1)
p[0, 1] = parsed1.group(2)
p[0, 2] = parsed1.group(3)
p[0, 3] = parsed1.group(4)
p[1, 0] = parsed1.group(5)
p[1, 1] = parsed1.group(6)
p[1, 2] = parsed1.group(7)
p[1, 3] = parsed1.group(8)
p[2, 0] = parsed1.group(9)
p[2, 1] = parsed1.group(10)
p[2, 2] = parsed1.group(11)
p[2, 3] = parsed1.group(12)
p[3, 3] = 1
ps.append(p)
elif parsed2 is not None:
p = np.zeros([4, 4], dtype=float)
p[0, 0] = parsed2.group(1)
p[0, 1] = parsed2.group(2)
p[0, 2] = parsed2.group(3)
p[0, 3] = parsed2.group(4)
p[1, 0] = parsed2.group(5)
p[1, 1] = parsed2.group(6)
p[1, 2] = parsed2.group(7)
p[1, 3] = parsed2.group(8)
p[2, 0] = parsed2.group(9)
p[2, 1] = parsed2.group(10)
p[2, 2] = parsed2.group(11)
p[2, 3] = parsed2.group(12)
p[3, 3] = 1
ps.append(p)
text_file.close()
return ps
def read_kitti_label(file, p2, use_3d_for_2d=False):
"""
Reads the kitti label file from disc.
Args:
file (str): path to single label file for an image
p2 (ndarray): projection matrix for the given image
"""
gts = []
text_file = open(file, 'r')
'''
Values Name Description
----------------------------------------------------------------------------
1 type Describes the type of object: 'Car', 'Van', 'Truck',
'Pedestrian', 'Person_sitting', 'Cyclist', 'Tram',
'Misc' or 'DontCare'
1 truncated Float from 0 (non-truncated) to 1 (truncated), where
truncated refers to the object leaving image boundaries
1 occluded Integer (0,1,2,3) indicating occlusion state:
0 = fully visible, 1 = partly occluded
2 = largely occluded, 3 = unknown
1 alpha Observation angle of object, ranging [-pi..pi]
4 bbox 2D bounding box of object in the image (0-based index):
contains left, top, right, bottom pixel coordinates
3 dimensions 3D object dimensions: height, width, length (in meters)
3 location 3D object location x,y,z in camera coordinates (in meters)
1 rotation_y Rotation ry around Y-axis in camera coordinates [-pi..pi]
1 score Only for results: Float, indicating confidence in
detection, needed for p/r curves, higher is better.
'''
pattern = re.compile(('([a-zA-Z\-\?\_]+)\s+(fpat)\s+(fpat)\s+(fpat)\s+(fpat)\s+(fpat)\s+(fpat)\s+(fpat)\s+'
+ '(fpat)\s+(fpat)\s+(fpat)\s+(fpat)\s+(fpat)\s+(fpat)\s+(fpat)\s*((fpat)?)\n')
.replace('fpat', '[-+]?\d*\.\d+|[-+]?\d+'))
for line in text_file:
parsed = pattern.fullmatch(line)
# bbGt annotation in text format of:
# cls x y w h occ x y w h ign ang
if parsed is not None:
obj = edict()
ign = False
cls = parsed.group(1)
trunc = float(parsed.group(2))
occ = float(parsed.group(3))
alpha = float(parsed.group(4))
x = float(parsed.group(5))
y = float(parsed.group(6))
x2 = float(parsed.group(7))
y2 = float(parsed.group(8))
width = x2 - x + 1
height = y2 - y + 1
h3d = float(parsed.group(9))
w3d = float(parsed.group(10))
l3d = float(parsed.group(11))
cx3d = float(parsed.group(12)) # center of car in 3d
cy3d = float(parsed.group(13)) # bottom of car in 3d
cz3d = float(parsed.group(14)) # center of car in 3d
rotY = float(parsed.group(15))
# actually center the box
cy3d -= (h3d / 2)
elevation = (1.65 - cy3d)
if use_3d_for_2d and h3d > 0 and w3d > 0 and l3d > 0:
# re-compute the 2D box using 3D (finally, avoids clipped boxes)
verts3d, corners_3d = project_3d(p2, cx3d, cy3d, cz3d, w3d, h3d, l3d, rotY, return_3d=True)
# any boxes behind camera plane?
if np.any(corners_3d[2, :] <= 0):
ign = True
else:
x = min(verts3d[:, 0])
y = min(verts3d[:, 1])
x2 = max(verts3d[:, 0])
y2 = max(verts3d[:, 1])
width = x2 - x + 1
height = y2 - y + 1
# project cx, cy, cz
coord3d = p2.dot(np.array([cx3d, cy3d, cz3d, 1]))
# store the projected instead
cx3d_2d = coord3d[0]
cy3d_2d = coord3d[1]
cz3d_2d = coord3d[2]
cx = cx3d_2d / cz3d_2d
cy = cy3d_2d / cz3d_2d
# encode occlusion with range estimation
# 0 = fully visible, 1 = partly occluded
# 2 = largely occluded, 3 = unknown
if occ == 0: vis = 1
elif occ == 1: vis = 0.66
elif occ == 2: vis = 0.33
else: vis = 0.0
while rotY > math.pi: rotY -= math.pi * 2
while rotY < (-math.pi): rotY += math.pi * 2
# recompute alpha
alpha = convertRot2Alpha(rotY, cz3d, cx3d)
obj.elevation = elevation
obj.cls = cls
obj.occ = occ > 0
obj.ign = ign
obj.visibility = vis
obj.trunc = trunc
obj.alpha = alpha
obj.rotY = rotY
# is there an extra field? (assume to be track)
if len(parsed.groups()) >= 16 and parsed.group(16).isdigit(): obj.track = int(parsed.group(16))
obj.bbox_full = np.array([x, y, width, height])
obj.bbox_3d = [cx, cy, cz3d_2d, w3d, h3d, l3d, alpha, cx3d, cy3d, cz3d, rotY]
obj.center_3d = [cx3d, cy3d, cz3d]
gts.append(obj)
text_file.close()
return gts
def balance_samples(conf, imdb):
"""
Balances the samples in an image dataset according to the given configuration.
Basically we check which images have relevant foreground samples and which are empty,
then we compute the sampling weights according to a desired fg_image_ratio.
This is primarily useful in datasets which have a lot of empty (background) images, which may
cause instability during training if not properly balanced against.
"""
sample_weights = np.ones(len(imdb))
if conf.fg_image_ratio >= 0:
empty_inds = []
valid_inds = []
for imind, imobj in enumerate(imdb):
valid = 0
scale = conf.test_scale / imobj.imH
igns, rmvs = determine_ignores(imobj.gts, conf.lbls, conf.ilbls, conf.min_gt_vis,
conf.min_gt_h, conf.max_gt_h, scale)
for gtind, gt in enumerate(imobj.gts):
if (not igns[gtind]) and (not rmvs[gtind]):
valid += 1
sample_weights[imind] = valid
if valid>0:
valid_inds.append(imind)
else:
empty_inds.append(imind)
if not (conf.fg_image_ratio == 2):
fg_weight = len(imdb) * conf.fg_image_ratio / len(valid_inds)
bg_weight = len(imdb) * (1 - conf.fg_image_ratio) / len(empty_inds)
sample_weights[valid_inds] = fg_weight
sample_weights[empty_inds] = bg_weight
logging.info('weighted respectively as {:.2f} and {:.2f}'.format(fg_weight, bg_weight))
logging.info('Found {} foreground and {} empty images'.format(np.sum(sample_weights > 0), np.sum(sample_weights <= 0)))
# force sampling weights to sum to 1
sample_weights /= np.sum(sample_weights)
return sample_weights
| [
"brazilga@msu.edu"
] | brazilga@msu.edu |
f382edecf942c8b06185993898f5cb08ef96e580 | dbbc56b58af216b923b38e7efbefd7a597945498 | /week1_alle.py | 0c1fd43667c1309a1eefb7de65b7521c15c5466f | [] | no_license | erikfsk/matin1105 | 684c68412da4699040728bd1e8c30a19efcdadec | b1c2f7908b2ff06447fe8fce6f7d12dfabe73517 | refs/heads/master | 2020-03-27T08:30:04.624643 | 2018-11-23T19:04:26 | 2018-11-23T19:04:26 | 146,261,093 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 769 | py |
# -*- coding: utf-8 -*-
"""
**TIL ALLE**
Så norske tegn er ikke støttet i python!
HVIS det funker for deg, så er det fordi du bruker et program som fikser det!
Jeg vet ikke hvilke program som fikser det, men atom og sublime, som jeg bruker,
fikser det ikke!
HVIS du absolutt må skrive norsk i programmet ditt, så må du skrive denne linjen
øverst i programmet:
"""
# -*- coding: utf-8 -*-
# ENKELTE av dere greier ikke å bruke % (eventuelt .format):
a = 1.123456789
print("%.2f" % a,"euro") #NEI
print("%g euro" % a) #NEI, fordi dette ikke gir riktig antall desimaler
print("%.2f euro" % a) #JA, tallet to kan dere bytte med så mange desimaler dere vil ha
"""
tips til neste uke:
https://github.com/erikfsk/in1900/blob/master/week2_lister.py
""" | [
"erikfsk@student.matnat.uio.no"
] | erikfsk@student.matnat.uio.no |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.