blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 69 | license_type stringclasses 2 values | repo_name stringlengths 5 118 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringlengths 4 63 | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 2.91k 686M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 23 values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 220 values | src_encoding stringclasses 30 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 2 10.3M | extension stringclasses 257 values | content stringlengths 2 10.3M | authors listlengths 1 1 | author_id stringlengths 0 212 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
c87488b7851b45888bf821b022a61e1ba46019a0 | 9c6ede34ef2027259924a231c805d36b22ab6a76 | /mainTest.py | b4faf0ac52f74c46e3c1b8fe4f62802941786251 | [] | no_license | Chanaka-Sandeepa/MultiLevel-K-way-partitioning-FYP | 3810197e367784c500aabc7332c2f24e9117e4d1 | 5becf1456f27f366778e5b8fc08cdbb40626b659 | refs/heads/master | 2020-03-15T21:17:00.624112 | 2018-05-12T04:47:40 | 2018-05-12T04:47:40 | 132,351,688 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,306 | py | import networkx as nx
from CSVHandlers import ColomboNodesMarker
from GraphControllers import RoadGraph
from CSVHandlers import CSVReader
from GraphControllers.Graph import Graph
from PartitionHandler import Partition
from Utils import MapMarker
if __name__ == '__main__':
# Creating initial graphs
# colNodes = ColomboNodesMarker.read_file("Colombo_Nodes.csv")
# node_list = colNodes[1]
# node_coordinates = colNodes[0]
############################################
# road_graph = RoadGraph.read_json_file('Resources/RoadJsons/export_colombo_district.geojson', node_list, node_coordinates)
# nx.write_graphml(road_graph, "Test_Graphs/minimizedDistrictRoadGraph.graphml")
# road_graph = nx.read_graphml("Test_Graphs/minimizedDistrictRoadGraph.graphml")
############################################
# trip_data = CSVReader.read_file('Resources/Datasets/PickmeTrips.csv')
# ## # trips= [['a','b'],['c','d'],['e','f'],['g','h'],['i','j'],['a','e'],['c','h']]
# trip_graph = Graph.create_graph(trip_data)
# nx.write_graphml(trip_graph, "Test_Graphs/TripGraph1.graphml")
# trip_graph = nx.read_graphml("Test_Graphs/TripGraph1.graphml")
# road_graph = nx.read_graphml("fullRoadGraph.graphml")
# Graph.draw_graph(trip_graph)
# combinedFullGraph = Graph.combineWithNearestRoadGraphNodes(road_graph, trip_data)
###########################################################
# nx.write_graphml(combinedFullGraph, "Test_Graphs/CombinedMinimizedGraph.graphml")
############################################################
# nx.write_graphml(trip_graph, "Test_Graphs/TripGraph1.graphml")
# print("Combining graphs...")
# cg = Graph.combine_graphs(road_graph, trip_graph)
# nx.write_graphml(cg, "combinedFullGraph.graphml")
#
# # combined_graph = nx.compose(road_graph, trip_graph)
# nx.write_graphml(cg, "testCombinedGraph2.graphml")
# Graph.draw_graph(trip_graph)
# Graph.draw_graph(trip_graph)
# print("Drawing combined graph...")
# print("road graph nodes :-", road_graph.number_of_nodes())
# print("trip graph nodes :-", trip_graph.number_of_nodes())
# print("combined graph nodes :-", cg.number_of_nodes())
#
# Graph.draw_graph(trip_graph)
######################################################################
# # Partitioning graphs
# g = nx.read_graphml("fullRoadGraph.graphml")
# print(g.nodes(data=True))
# g = GraphReduction.random_match(g)
# g = TripGraphReduction.random_match(trip_graph)
# print(g.nodes(data=True))
# nx.write_graphml(g, "reducedFullCombinedGraph2.graphml")
# g = nx.read_graphml("testReducedRoadGraph2.graphml")
# Graph.draw_graph(trip_graph)
# gs = nx.connected_component_subgraphs(g)
# c =0
# for a in gs:
# c += 1
# print('sub in reducedG', c)
# print('nodes in reducedG', g.number_of_nodes())
####################################################################
# g = GraphReduction.random_match(trip_graph)
g = nx.read_graphml("Test_Graphs/CombinedMinimizedGraph.graphml")
# # gs = nx.connected_component_subgraphs(g)
# # c =0
# # for a in gs:
# # c += 1
# # print('sub in reducedG', c)
# nx.write_graphml(g, "testReducedRoadGraph2.graphml")
##############################################################
orig_graph = nx.read_graphml("Test_Graphs/CombinedMinimizedGraph.graphml")
partitioned_graphs = Partition.recursive_bisection(g, 0)
for p in partitioned_graphs:
print(p.number_of_nodes())
# Graph.draw_graph(p)
partition_means_list = Partition.calculatePartitionAvgCoordinate(orig_graph, partitioned_graphs)
refined_graphs = Partition.refinePartitionedGraphs(partitioned_graphs, orig_graph)
# MapMarker.initiate_artitions(refined_graphs, orig_graph)
####################################################################
# Graph.draw_graph(trip_graph)
# cg = Graph.combine_graphs(road_graph, trip_graph)
# Graph.check_graph(road_graph)
# print(trip_graph.nodes(data=True))
#
# gs = nx.connected_component_subgraphs(cg)
# c =0
# for a in gs:
# c += 1
# print('sub in reducedG', c)
# col_nodes = ColomboNodesMarker.read_file('Colombo_Nodes.csv') | [
"chanaka.14@cse.mrt.ac.lk"
] | chanaka.14@cse.mrt.ac.lk |
5b1898ad002efc03e96baeb9e1bc3aefda60bc4f | 8bda65732ed8afb45a9eabe602474c29ecf3100e | /makeDS.py | 682bc8a2230984cbf709237b85577da3f5b473c5 | [] | no_license | epsln/chiner_models | 0c05237c58a752e53df27c4f858f7b7fa84c312d | aff6ae055683bf6310b0cccfe5f9795b4a67e5c4 | refs/heads/main | 2023-07-07T00:54:37.934109 | 2021-08-10T14:00:03 | 2021-08-10T14:00:03 | 394,417,528 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,580 | py | #Create a dataset of normalised spectrograms from files
import os
import numpy as np
import librosa
import random
import audiofile as af
import configparser
from utils.audioTools import getSpectro
debugFlag = False
def main():
config = configparser.ConfigParser()
if debugFlag == True:
config.read(r'configTest.cfg')
else:
config.read(r'config.cfg')
dsName = config.get('Dataset', 'name')
fftLength = int(config.get('Dataset', 'fftLength'))
nFreq = int(config.get('Dataset', 'nFreq'))
numFeatures = int(config.get('Dataset', 'numFeatures'))
numEx = int(config.get('Dataset', 'numEx'))
musicFilesDir = config.get('Dataset', 'musicFilesDir')
#Might want to expand this somewhat
acceptableFormats = [".wav", ".flac", ".mp3"]
#Might want to rename data to fit whatever user might want
musicFiles = [os.path.join(path, name) for path, subdirs, files in os.walk(os.path.expanduser(musicFilesDir)) for name in files]
random.shuffle(musicFiles)
#Remove the undesirables formats
for music in musicFiles:
if os.path.splitext(music)[1] not in acceptableFormats:
musicFiles.remove(music)
# for path, subdirs, files in os.walk(musicFilesDir):
# for name in files:
# print(os.path.join(path, name))
# musicFiles.append(os.path.join(path, name))
if len(musicFiles) == 0:
raise ValueError("No music file detected...")
#If folder already exist, quit
if os.path.exists(dsName):
#TODO: Raise an actual (appropriate) error
print("ERROR: The folder '" + dsName + "' already exists ! either delete it or rename it and try again")
#return -1
else:
#Else create folder
os.makedirs(dsName)
os.makedirs(dsName + "/train")
os.makedirs(dsName + "/test/")
#Finally create the dataset
for i in range(min(numEx, len(musicFiles))):
song = musicFiles[i]
S = getSpectro(song, fftLength)
if np.random.uniform(0, 1) > 0.8:
print("Saving " + dsName + "/test/"+os.path.basename(song)[:-4]+".npy")
print("[",i + 1,"/",min(numEx, len(musicFiles)), "]")
np.save(dsName + "/test/"+os.path.basename(song)[:-4]+".npy", S)
else:
print("Saving " + dsName + "/train/"+os.path.basename(song)[:-4]+".npy")
print("[",i + 1,"/",min(numEx, len(musicFiles)), "]")
np.save(dsName + "/train/"+os.path.basename(song)[:-4]+".npy", S)
if __name__ == "__main__":
main()
| [
"martin.olivier1997@gmail.com"
] | martin.olivier1997@gmail.com |
2252b90ba8deb223db2d75fe3370861ede934d35 | bcc5ebe8c5d0b78c43087f0c292e329cb8d78e6a | /venv/bin/pasteurize | fbc9fe65369d9648f41dfaf9c689c7a6bbe36855 | [
"MIT"
] | permissive | RaymondDashWu/generative-structures-dapp | 1ce0fc0028c97de49c2843d4b3e30c84e92fb769 | 06819e9333c663a8be3eca444b55dd244d31f87b | refs/heads/master | 2022-12-26T22:59:14.699771 | 2019-08-09T20:13:59 | 2019-08-09T20:13:59 | 193,174,106 | 0 | 0 | MIT | 2022-12-03T14:43:11 | 2019-06-22T00:27:31 | Python | UTF-8 | Python | false | false | 320 | #!/Users/raymondmbp/makeschool/BEW-2.4-Decentralized-Apps-Distributed-Protocols/generative-structures/venv/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from libpasteurize.main import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"raymond31670@hotmail.com"
] | raymond31670@hotmail.com | |
f1c3b687cccb87797fcf6751fc883f9f3bb75051 | db89a13a9194e7a0ee48192720084eb2453fa7c5 | /lib/LangCombo_Stats.py | c3aa004f922629b26b93a35772d113097839b501 | [] | no_license | www-jrtorres042-github-enterprise-org/MlsTool | 33c24b8cbfb133e573e081adfc18a98cffbfdebd | 1d7553211a9ec53b1860842c03457eda9cb7502b | refs/heads/master | 2023-08-05T11:16:47.356117 | 2021-10-02T11:08:32 | 2021-10-02T11:08:32 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,020 | py | ##### Language_Stats.py #####
from lib.Process_Data import Process_Data
class LangCombo_Stats():
Header_Names = ["id", "combination", "count", "distribution"]
def __init__(self, id, combination):
self.combo_id = id
self.combination = combination
self.count = 0
self.distribution = 0.0
self.language_used = len(list(combination.split(" ")))
def update(self):
self.count = self.count + 1
def update_distribution(self, total_combination):
self.distribution = self.count/total_combination
def object_to_list(self, key):
# "id"
values = [self.combo_id]
# "combination"
values = [self.combination]
# "count"
values.append(self.count)
# "distribution"
values.append(self.distribution)
def object_to_dict(self, key):
keys = LangCombo_Stats.Header_Names
values = self.object_to_list("")
return {key: value for key, value in zip(keys, values)}
| [
"li.wen@wsu.edu"
] | li.wen@wsu.edu |
ff7bf13a043bf04e722acae623f0deffee48f7b3 | 6bdffbfa7656bbc9d200f7609894385c86f8d6b5 | /Demo/Exercise05/ex05.py | bb0136ef227e169e71002358d226357df805906a | [] | no_license | jackyin68/Business-Intelligence-1641 | 21f65ea54816f1671c9d87add76a3337ed26fb94 | e89ca7e5dc98252057e8a7b9632e293ce9412395 | refs/heads/master | 2021-03-10T11:37:47.126486 | 2019-08-21T09:11:24 | 2019-08-21T09:11:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 810 | py | import numpy as np
# Create a dictionary with keys as student names and values as student GPA (float)
students = {'John':7.5, 'Paul':6.5, 'Ringo':8.0, 'George':6.0}
name = input("Enter new student name: ")
gpa = float(input("Enter student GPA: "))
students[name] = gpa
name = input("Enter new student name: ")
gpa = float(input("Enter student GPA: "))
students[name] = gpa
print(students)
print("Student who has max GPA: ", max(students))
print("Student who has min GPA: ", min(students))
print("Mean of GPA: ", np.mean(list(students.values())))
name = input("Enter student name: ")
print("Student GPA = ", students[name])
name = input("Enter student name: ")
gpa = float(input("Enter student new GPA: "))
students[name] = gpa
name = input("Enter student name: ")
students.pop(name)
print(students)
| [
"49477836+Duytv081298@users.noreply.github.com"
] | 49477836+Duytv081298@users.noreply.github.com |
69b5d17e2073ac65996477fd224f24a330a7b6ed | 04bffeaf52f193800b84363ce3cff5b6afef0893 | /user/models.py | 7f0d38708eded5aeeb378078b642607de7bab1e8 | [] | no_license | lumpy6494/guapstudent- | a7b2c1eaf30cd558a3bfd44426728981612b4d62 | 20dcd25a0d758a2ea6e4b2e763363332d2b7f5c5 | refs/heads/master | 2023-07-26T22:06:14.069961 | 2021-09-09T18:16:01 | 2021-09-09T18:16:01 | 395,648,571 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,355 | py | import uuid
from django.contrib.auth.models import AbstractUser, Group
from django.db import models
# Create your models here.
class CastomUser(AbstractUser):
two_name = models.CharField(max_length=255, verbose_name='Отчество')
birthday = models.DateField(blank=True, null=True, verbose_name='День Рождения')
is_activated = models.BooleanField(default=True, db_index=True, verbose_name='Активирован?')
email = models.EmailField(unique=True)
uuid = models.UUIDField(unique=True, editable=False, default=uuid.uuid4)
class Meta:
verbose_name = 'Пользователь'
verbose_name_plural = 'Пользователи'
class Promokod(models.Model):
promo = models.CharField(max_length=50, verbose_name='Пригласительный', blank=True, null=True)
description_promo = models.TextField(max_length=500, verbose_name='Описание', blank=True, null=True)
gpoup_user = models.ForeignKey(Group, on_delete=models.PROTECT, verbose_name='Группа для пользователя',
related_name='groups_user', blank=True, null=True)
def __str__(self):
return self.promo
class Meta:
verbose_name = 'Пригласительный '
verbose_name_plural = 'Пригласительные'
| [
"superpolicay@gmail.com"
] | superpolicay@gmail.com |
8d06121e1ae462fafa60e6eb568930d5ff08b018 | 0fd71e96aa665d6600d5d1ef584bff0f65989c8c | /setup.py | dc7cd4b62780472c45df78187745a21f3034a184 | [] | no_license | alacrity2001/TSB-UAD | 07d6a77e9a30d7a2c333a3b60b0c0f0711127322 | b61e91efc80be2d1766cc44f6a121c1b662ba67e | refs/heads/main | 2023-08-31T01:40:22.927314 | 2021-10-03T19:37:19 | 2021-10-03T19:37:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 242 | py | from setuptools import setup, find_packages
setup(name='TSB-AD',
version='0.1',
author='Yuhao Kang',
author_email='yuhaok@uchicago.edu',
url='https://github.com/yuhao12345/TSB-AD',
packages = find_packages()
)
| [
"john@paparrizos.org"
] | john@paparrizos.org |
515f571aec0c41aa280a7ad4f155a691de756151 | e7efae2b83216d9621bd93390959d652de779c3d | /datadog_checks_dev/datadog_checks/dev/tooling/commands/agent/__init__.py | 8153698063f3e5affe147fad62925b3a12cfa3e0 | [
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference",
"MIT",
"BSD-3-Clause-Modification",
"Unlicense",
"Apache-2.0",
"LGPL-3.0-only",
"LicenseRef-scancode-public-domain",
"BSD-2-Clause",
"CC0-1.0"
] | permissive | DataDog/integrations-core | ee1886cc7655972b2791e6ab8a1c62ab35afdb47 | 406072e4294edff5b46b513f0cdf7c2c00fac9d2 | refs/heads/master | 2023-08-31T04:08:06.243593 | 2023-08-30T18:22:10 | 2023-08-30T18:22:10 | 47,203,045 | 852 | 1,548 | BSD-3-Clause | 2023-09-14T16:39:54 | 2015-12-01T16:41:45 | Python | UTF-8 | Python | false | false | 617 | py | # (C) Datadog, Inc. 2018-present
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
import click
from ..console import CONTEXT_SETTINGS
from .changelog import changelog
from .integrations import integrations
from .integrations_changelog import integrations_changelog
from .requirements import requirements
ALL_COMMANDS = (changelog, requirements, integrations, integrations_changelog)
@click.group(context_settings=CONTEXT_SETTINGS, short_help='A collection of tasks related to the Datadog Agent')
def agent():
pass
for command in ALL_COMMANDS:
agent.add_command(command)
| [
"noreply@github.com"
] | DataDog.noreply@github.com |
11373250ede4c3b0baff36ef1700e6e7a0c70db1 | 06cda71707e5f4ac040ae89f63f00cdab48d2577 | /one-isp-fairness/ispNet.py | 2ff8c8e458631228b49cfc0cfb01f9eec9cd0d32 | [] | no_license | YimengZhao/interdomain-TE | 9c4a1ef19549a39c8a7c9fecbd6a277c7d2073d4 | cc1a000c8c967a8608a0fa8bf740c5c200eb358e | refs/heads/master | 2020-03-08T11:44:08.136833 | 2018-04-04T18:54:33 | 2018-04-04T18:54:33 | 128,106,586 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,041 | py | from topology import Topology
from traffic import TrafficMatrix
from generatePath import *
from initopt import *
from predicates import nullPredicate
from provisioning import generateTrafficClasses, provisionLinks
from optHelper import *
import copy
import math
import networkx
CITY_TRAFFIC_VOLUME = 10
class IspNetwork:
def __init__(self, topo_name, topo_file, traffic_file=None):
self.topo = Topology(topo_name, topo_file)
if traffic_file:
self.trafficMatrix = TrafficMatrix.load(traffic_file)
self.linkcaps = []
def get_link_util(self):
link_util_dict = {}
for tc, paths in self.pptc.iteritems():
for path in paths:
if path.bw == 0:
continue
links = path.getLinks()
for link in links:
if link in link_util_dict:
link_util_dict[link] += path.bw
else:
link_util_dict[link] = path.bw
for link in self.topo.edges():
if link not in link_util_dict:
link_util_dict[link] = 0
return link_util_dict
def set_traffic(self, trafficMatrix, topo, path_num=4):
self.trafficMatrix = trafficMatrix
self.trafficClasses = []
self.ie_path_map = {}
base_index = 0
for key in trafficMatrix.keys():
self.ie_path_map[key] = generatePath(self.trafficMatrix[key].keys(), topo, nullPredicate, "shortest", maxPaths=path_num)
print 'test'
print self.trafficMatrix[key].keys()
tcs = generateTrafficClasses(key, self.trafficMatrix[key].keys(), self.trafficMatrix[key], {'a':1}, {'a':100}, index_base = base_index)
base_index += len(tcs)
self.trafficClasses.extend(tcs)
#for tc in self.trafficClasses:
#print tc
#self.linkcaps = provisionLinks(self.topo, self.trafficClasses, 1)
self.norm_list = get_norm_weight(self.trafficClasses)
self.network_norm_list = get_network_norm_weight(self.trafficClasses)
def calc_path_singleinput(self, fake_node, trafficMatrix, cp_num):
#add fake node
self.fake_topo = copy.deepcopy(self.topo)
self.fake_topo._graph.add_node(fake_node)
#self.topo._graph.add_edge(0, fake_node)
self.fake_topo._graph.add_edge(fake_node, 0)
#self.topo._graph.add_edge(1, fake_node)
self.fake_topo._graph.add_edge(fake_node, 1)
(pptc, throughput) = self.calc_path_maxminfair(trafficMatrix, self.fake_topo)
self.pptc = pptc
ingress_bw_dict = {}
for i in range(cp_num):
ingress_bw_dict[i] = {}
print 'single input'
for tc, paths in pptc.iteritems():
for path in paths:
nodes = path.getNodes()
print 'nodes:{}'.format(nodes)
print 'bw:{}'.format(path.bw)
ingress = nodes[1]
if ingress in ingress_bw_dict[tc.network_id]:
ingress_bw_dict[tc.network_id][ingress] += path.bw
else:
ingress_bw_dict[tc.network_id][ingress] = path.bw
return (ingress_bw_dict, throughput)
def calc_path_maxminfair(self, trafficMatrix, topo = None, network_level = False, weighted = True, max_throughput = False):
if topo == None:
topo = self.topo
self.set_traffic(trafficMatrix, topo, path_num = 10)
ie_path_map = {}
for path_map in self.ie_path_map.itervalues():
ie_path_map.update(path_map)
'''print 'testing'
for ie, paths in ie_path_map.iteritems():
print ie
for path in paths:
print path.getNodes()'''
pptc = initOptimization(ie_path_map, topo, self.trafficClasses)
'''self.linkcaps[(0,2)] = 10.0
self.linkcaps[(2,0)] = 10.0
self.linkcaps[(0,1)] = 10.0
self.linkcaps[(1,0)] = 10.0'''
throughput = 0
if network_level:
ret, throughput = MCF_network(self.linkcaps, pptc, self.network_norm_list, 50, max_throughput)
else:
if weighted == False:
self.norm_list = dict((x, 1) for (x, y) in self.norm_list.iteritems())
throughput = maxmin_fair_allocate(self.trafficClasses, self.linkcaps, pptc, self.norm_list, max_throughput)
self.pptc = pptc
return (pptc, throughput)
def calc_path_shortest(self, trafficMatrix):
self.set_traffic(trafficMatrix, self.topo, path_num = 1)
ie_path_map = {}
for path_map in self.ie_path_map.itervalues():
ie_path_map.update(path_map)
pptc = initOptimization(ie_path_map, self.topo, self.trafficClasses)
self.linkcaps[(0,2)] = 10.0
self.linkcaps[(2,0)] = 10.0
self.linkcaps[(0,1)] = 10.0
self.linkcaps[(1,0)] = 10.0
throughput = maxmin_fair_allocate(self.trafficClasses, self.linkcaps, pptc, self.norm_list, False)
self.pptc = pptc
return (pptc, throughput)
| [
"yzhao389@eros.cc.gatech.edu"
] | yzhao389@eros.cc.gatech.edu |
3dcb93fd4c95d948f08630db348b27d9952302ab | 82c7bdcfba82dc7755e24860da75c881ee9e4809 | /scalpr/database/depth.py | dde72560cd524e0c3a6bfa50384ce8e01c0fe6ad | [
"MIT"
] | permissive | ersineser/scalpr | acc043841d8a001223572cf56f6d260a1591d81a | b6864c20a3772e5ea253b3db8361cffb732c4e8c | refs/heads/main | 2023-08-25T10:02:44.942282 | 2021-10-30T07:46:10 | 2021-10-30T07:46:10 | 459,350,863 | 1 | 0 | MIT | 2022-02-14T22:49:01 | 2022-02-14T22:49:00 | null | UTF-8 | Python | false | false | 799 | py | # pylint: disable=no-name-in-module
from collections import deque
from typing import Deque
from pydantic import BaseModel
class Bid(BaseModel):
price: float
quantity: float
class Ask(BaseModel):
price: float
quantity: float
class Depth(BaseModel):
"""Depthcache, showing the best bids and asks in the orderbook.
Timeframe.depth.bids[0] is the best bid, Timeframe.depth.bids[1]
the second best etc.
The bids and asks are a snapshot of the depthcache at close time
of the timeframe.
The value Options._depthcache_size represents the number of bids
and asks saved.
E.g. if Options._depthcache_size is equal to 5,
the top 5 best bids and asks will be saved.
"""
bids: Deque[Bid] = deque()
asks: Deque[Ask] = deque()
| [
"tvanmeer123@gmail.com"
] | tvanmeer123@gmail.com |
cb04b6b46e60fcdf9bd5b7f6dadcbdfb3e65af0e | 1edaf48fb81223513b386e81927d51c70f09b8f9 | /problems_30s/problem_32.py | 481614641baa21fa94eed126d4350bda4a8de98f | [] | no_license | famalhaut/ProjectEuler | 4392cc50ac88a168e9a2d7889e4a8da201afaf72 | 9c6be56f0fed472472d08bd35f488d8b94f684ff | refs/heads/master | 2020-05-26T11:50:52.711715 | 2017-04-21T16:01:58 | 2017-04-21T16:01:58 | 84,996,759 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,187 | py | """
Pandigital products
Problem 32
We shall say that an n-digit number is pandigital if it makes use of all the digits 1 to n exactly
once; for example, the 5-digit number, 15234, is 1 through 5 pandigital.
The product 7254 is unusual, as the identity, 39 × 186 = 7254, containing multiplicand, multiplier,
and product is 1 through 9 pandigital.
Find the sum of all products whose multiplicand/multiplier/product identity can be written
as a 1 through 9 pandigital.
HINT: Some products can be obtained in more than one way so be sure to only include it once
in your sum.
"""
def problem():
result = set()
def _helper(a, b):
c = a * b
digits = set(str(a)) | set(str(b)) | set(str(c))
if len(digits - {'0'}) == 9:
print('{a} * {b} = {c}'.format(a=a, b=b, c=c))
result.add(c)
# 1-digit * 4-digit = 4-digit
for a in range(1, 10):
for b in range(1000, 10000 // a):
_helper(a, b)
# 2-digit * 3-digit = 4-digit
for a in range(10, 100):
for b in range(100, 10000 // a):
_helper(a, b)
return sum(result)
if __name__ == '__main__':
print('Answer:', problem())
| [
"famalhaut.ru@gmail.com"
] | famalhaut.ru@gmail.com |
459ecb5a6a85606d81ff832ccbdfa3e50996919d | 3c004cf2138de007160320ef61b9581fb242a861 | /gettime.py | 34a115776d7bf8387080e6cb431fed4d6fc546c5 | [] | no_license | deepforce/pythonscript | e41c23ccc64cd875f7dcccfe95b1509e7eacf431 | 15764faac13a2ebbc52a1b513317e373ebb58be9 | refs/heads/master | 2021-01-10T21:36:09.453267 | 2015-09-24T02:09:30 | 2015-09-24T02:09:30 | 42,761,126 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 333 | py | # coding = utf-8
import time
import datetime
#打印当前时间
print(time.ctime())
#当前时间
now_time = datetime.datetime.now()
print(now_time)
#昨天的现在
yesterday = now_time +datetime.timedelta(days = -1)
print(yesterday)
#现在的前一秒
now_old = now_time + datetime.timedelta(seconds = -1)
print(now_old) | [
"zyq81678593@126.com"
] | zyq81678593@126.com |
99aed070a6cf826c3b0eee7ddd0310c2e249e97d | b72d35ff8980ea49b9d3e4c081ce0743361b5f1c | /settings/development.py | 66ba55d7590977bcd4a3058ed546ddaf4c68b406 | [] | no_license | miguelguzmanr/django-project-template | 7d770ccda8463abb47f4be59da554dc6ca966b8d | 2df5fccaada417804ea199e0f672b6353ee2de8a | refs/heads/master | 2020-06-07T13:21:34.478579 | 2019-08-18T22:54:13 | 2019-08-18T22:54:13 | 193,031,647 | 0 | 0 | null | 2019-12-04T23:23:30 | 2019-06-21T04:41:20 | Python | UTF-8 | Python | false | false | 866 | py | import os
from settings.base import (
BASE_DIR, INSTALLED_APPS, MIDDLEWARE, ROOT_URLCONF, TEMPLATES,
WSGI_APPLICATION, AUTH_PASSWORD_VALIDATORS, LANGUAGE_CODE, TIME_ZONE,
USE_I18N, USE_L10N, USE_TZ, STATIC_URL)
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = os.environ.setdefault('DJANGO_SETTINGS_SECRET_KEY', '@-79#*u6(541vm#&67a_08sc7v$*0e!loiiiqgng2@jj#6%h%a')
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
| [
"miguelguzman@protonmail.com"
] | miguelguzman@protonmail.com |
fc4c311d5bc2234566bf6da99f3a670bf2e645f9 | f20e965e19b749e84281cb35baea6787f815f777 | /Phys/Phys/Swimming/example/DecayTreeTuple.py | a4ae76a442a7beeeb7f791472b3bd885ae9713a1 | [] | no_license | marromlam/lhcb-software | f677abc9c6a27aa82a9b68c062eab587e6883906 | f3a80ecab090d9ec1b33e12b987d3d743884dc24 | refs/heads/master | 2020-12-23T15:26:01.606128 | 2016-04-08T15:48:59 | 2016-04-08T15:48:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,490 | py | ##
## Static Configuration
##
from Gaudi.Configuration import *
from Configurables import ( ApplicationMgr,
CondDB,
HistogramPersistencySvc,
DaVinci,
MessageSvc,
GaudiSequencer,
ANNDispatchSvc,
HltDecReportsDecoder,
HltSelReportsDecoder )
CondDB( UseOracle = False,DisableLFC=True )
## More space for output
MessageSvc().Format = "% F%30W%S%15W%R%T %0W%M"
## mDST paths
locationRoot = '/Event'
selectionPath = 'SingleCandidate'
particlePath = selectionPath + '/Particles'
#pvLocation = 'Rec/Vertex/Primary'
p2PVLocation = selectionPath +'/Particle2VertexRelations'
mDST = True
if mDST:
mDSTName = 'SwimmingMDST'
p2PVLocation = '%s/BestPV_%s_P2PV' % ( selectionPath, mDSTName )
from MicroDSTConf.TriggerConfUtils import configureL0AndHltDecoding
locationRoot += '/' + mDSTName
configureL0AndHltDecoding(locationRoot)
from Gaudi.Configuration import *
from Configurables import ( DaVinci, MessageSvc, FilterDesktop )
# Make the DecayTreeTuple
from Configurables import DecayTreeTuple
dtt = DecayTreeTuple (
'SwimmingDTT',
## print histos
HistoPrint = True,
## N-tuple LUN
NTupleLUN = "DTT",
## input particles from selection:
Inputs = [ particlePath ],
## Primary vertices from mDST
P2PVInputLocations = [ p2PVLocation ],
UseP2PVRelations = True,
WriteP2PVRelations = False,
)
dtt.Decay = "B_s0 -> (^J/psi(1S) => ^mu+ ^mu-) (^phi(1020) -> ^K+ ^K-)"
if mDST: dtt.RootInTES = locationRoot
from DecayTreeTuple.Configuration import *
## Add appropriate tools
dtt.addBranches({
"B" : "B_s0 : B_s0 -> (J/psi(1S) => mu+ mu-) (phi(1020) -> K+ K-)"
})
dtt.B.addTupleTool('TupleToolPropertime')
ttsi = dtt.B.addTupleTool('TupleToolSwimmingInfo/TriggerInfo')
ttsis = dtt.B.addTupleTool('TupleToolSwimmingInfo/StrippingInfo')
ttsi.ReportsLocation = selectionPath + '/P2TPRelations'
ttsis.ReportsLocation = selectionPath + '/P2TPRelations'
ttsis.ReportStage = "Stripping"
tttt = dtt.B.addTupleTool('TupleToolTISTOS')
tttt.TriggerList = ['Hlt1TrackAllL0Decision', 'Hlt1TrackMuonDecision', 'Hlt1DiMuonHighMassDecision',
'Hlt2DiMuonDetachedJpsiDecision', 'Hlt2DiMuonJpsiDecision']
tttt.VerboseHlt1 = True
tttt.VerboseHlt2 = True
dv = DaVinci()
dv.DDDBtag = 'head-20110914'
dv.CondDBtag = 'head-20110914'
dv.DataType = '2011'
dv.Lumi = False
dv.InputType = "MDST" if mDST else "DST"
dv.UserAlgorithms = [ dtt ]
dv.EvtMax = -1
ApplicationMgr().HistogramPersistency = "ROOT"
from Configurables import HistogramPersistencySvc
HistogramPersistencySvc ( OutputFile = 'histos.root' )
from Configurables import NTupleSvc
NTupleSvc().Output += [ "DTT DATAFILE='tuples.root' TYPE='ROOT' OPT='NEW'"]
NTupleSvc().OutputLevel = 1
## Point the EventClockSvc to the RootInTES
## from Configurables import EventClockSvc, OdinTimeDecoder, TimeDecoderList
## EventClockSvc().addTool( TimeDecoderList, name = "EventTimeDecoder" )
## EventClockSvc().EventTimeDecoder.RootInTES = locationRoot
from GaudiConf import IOHelper
if mDST:
## IOHelper().inputFiles(['/castor/cern.ch/user/r/raaij/test/Swimming.SwimmingMicroDST.mdst'])
IOHelper().inputFiles(['Swimming.SwimmingMDST.mdst'])
else:
IOHelper().inputFiles(['/project/bfys/raaij/cmtuser/Moore_v12r8/scripts/SwimTrigDST.dst'])
| [
"rlambert@cern.ch"
] | rlambert@cern.ch |
fff4f9cefdd1de227ec424125d5f987dbf243342 | 19ea7c14a53506d2138ccb2dd1f0fc5588f8d935 | /itcload.py | c3522a61f2a54c9c6c1e164e90ff6809e7605848 | [] | no_license | cpatrickking/python_itunesconnect | 046223e6e45c57e665ea67f81c9838064070034a | 45a328e0cbbc30a8637a805c0fb250c17b39064d | refs/heads/master | 2016-09-11T03:21:22.887474 | 2013-08-28T18:48:26 | 2013-08-28T18:48:26 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,778 | py | # Insert this script into the folder to which you want to download the files along with config.cfg,
# Autoingestion.properties and Autoingestion.class (java file downloadable from Apple)
# Call from the CMD prompt
import glob
import re
import os
import datetime
import ConfigParser
import MySQLdb as mysql
#Configuration file including vendorid, database credentials
config = ConfigParser.RawConfigParser()
config.read('config.cfg')
vendorid = config.get('creds', 'vendorid')
user = config.get('dbcreds', 'user')
password = config.get('dbcreds', 'password')
host = config.get('dbcreds', 'host')
database = config.get('dbcreds', 'database')
mypath = config.get('creds', 'mypath')
#connect to database,get most recent date loaded from itc_daily table
cnx = mysql.connect( host, user, password, database, local_infile=1)
query = ("SELECT MAX(`begin date`) FROM ITC_DAILY")
cursor = cnx.cursor()
cursor.execute(query)
results = cursor.fetchall()
resulttup = results[0]
result = resulttup[0]
max_date_in_table = result.strftime("%Y%m%d")
cursor.connection.autocommit(True)
print max_date_in_table + " is the most recent file in the database" + "\n"
# This is the block of code that creates the list of files already in the folder
def max_report_date():
current_daily = [f for f in glob.glob('S_D_*') if '.gz' not in f]
current_daily_dates = []
for daily in current_daily:
daily_file = os.path.split(daily)[1]
daily_file_date = daily_file[13:len(daily_file)-4]
current_daily_dates.append(daily_file_date)
max_file_date = max(current_daily_dates)
return max_file_date
maxreportdate = max_report_date()
#now what days do you need to download, returns a list of the itunes style filedate distinction
def days_to_download():
today = datetime.datetime.now()
days_datetime = today - datetime.datetime.strptime(maxreportdate, "%Y%m%d")
days_string = str(days_datetime)
days_slice = days_string[:days_string.find(' ')]
days = int(days_slice) -1
days_to_dwn = []
if days == 0:
print "There are no more reports to download yet, %s is the most recent report available" % maxreportdate
while days > 0:
day_to_add = today - datetime.timedelta(days)
days_to_dwn.append(day_to_add.strftime("%Y%m%d"))
days -= 1
return days_to_dwn
#need to put new line in FRONT of text
print '...downloading and processing files from Apple' + "\n"
#this actually downloads the files according to the list you just created (python wrapper for java .class file in folder provided by apple)
def report_downloader(days_to_download):
for d in days_to_download:
cmd = ["java", "Autoingestion", "autoingestion.properties", vendorid, "Sales", "Daily", "Summary", d]
cmdstring = ' '.join(cmd)
os.popen(cmdstring)
report_downloader(days_to_download())
#Unzip the gz files
files = glob.glob('S_D_*')
def unzip_files(files):
for f in files:
if f.find('.gz') != -1:
zipcmd = ["gzip", "-d", f ]
zipcmdstring = ' '.join(zipcmd)
os.popen(zipcmdstring)
unzip_files(files)
files = glob.glob('S_D_' + vendorid + '*.txt')
#determine which of the files in the folder to upload to the database
files_to_load = []
def add_files():
for f in files:
z = f[13:len(f)-4]
y = ("S_D_%s_%s") % (vendorid, z)
if z > max_date_in_table:
files_to_load.append(y)
return files_to_load
add_files()
#insert file in itc_daily database
def load_itc_daily(loadfile):
filename = "%s%s.txt" % (mypath,loadfile)
droptable1 = ("drop table if exists newitcdaily2;")
# creates a table (could be temporary) for the data to be loaded into -- to handle dates
# could refactor this to say for all text files, load into this table first THEN load into DB (non-priority)
createtable = ("""create table newitcdaily2 (
Provider varchar(150),
`Provider Country` varchar(150),
SKU varchar(150),
Developer varchar(150),
Title varchar(150),
Version varchar(150),
`Product Type Identifier` varchar(150),
Units varchar(150),
`Developer Proceeds` varchar(150),
`Begin Date` varchar(64),
`End Date` varchar(64),
`Customer Currency` varchar(150),
`Country Code` varchar(150),
`Currency of Proceeds` varchar(150),
`Apple Identifier` varchar(150),
`Customer Price` varchar(150),
`Promo Code` varchar(150),
`Parent Identifier` varchar(150),
`Subscription` varchar(150),
`Period` varchar(150));""")
load_query = ("LOAD DATA LOCAL INFILE '%s' INTO TABLE newitcdaily2 IGNORE 1 LINES;" %(filename))
insert_query = ("""insert into ITC_Daily
SELECT
Provider,
`Provider Country`,
SKU,
Developer,
Title,
Version,
`Product Type Identifier`,
Units,
`Developer Proceeds`,
CONCAT(right(`begin date`,4),'-',left(`begin date`,2),'-',mid(`begin date`,4,2)),
CONCAT(right(`end date`,4),'-',left(`end date`,2),'-',mid(`end date`,4,2)),
`Customer Currency`,
`Country Code`,
`Currency of Proceeds`,
`Apple Identifier`,
`Customer Price`,
`Promo Code`,
`Parent Identifier`,
`Subscription`,
`Period`
from newitcdaily2;""")
cursor.execute(droptable1)
cnx.commit()
cursor.execute(createtable)
cnx.commit()
cursor.execute(load_query)
cnx.commit()
cursor.execute(insert_query)
cnx.commit()
#for files to be uploaded, execute upload function
def load_to_db():
for f in files_to_load:
load_itc_daily(f)
load_to_db()
#final message, what has been added to the table
# maybe put a line count in here and do line count for files to load, line count for new files in DB for Match check
#something like WC files - n(accounting for title lines) = count(*) files added greater than original maxdate
cursor.execute("SELECT MAX(`BEGIN DATE`) from ITC_DAILY")
final_results_fetch = (cursor.fetchall())
final_results_tup = final_results_fetch[0]
final_results = final_results_tup[0]
string_result = final_results.strftime("%m/%d/%Y")
print "...these files have been added:"
for f in files_to_load:
print f
print "Your data has been uploaded through %s" % (string_result)
cursor.close()
cnx.close()
| [
"cpatrick.king@gmail.com"
] | cpatrick.king@gmail.com |
5c45beebc5a8b8fbca99765efaec7ff97e62dca0 | 156dbd988cf76f1566812915536c87797f470beb | /Estruturas repetitivas/For/Problema_divisao.py | 7cdc69dfdc7d9b5981f392ff3b55746a23d0d5fa | [] | no_license | RAFAELSPAULA/Exercicios-introdutorio-Python | b3cf5332adffa7e051e4114bbc480e3f67f9d2cd | 1880144e7394ceda5af85a4323a132ff92eb581d | refs/heads/main | 2023-08-16T18:30:06.265684 | 2021-09-23T18:54:39 | 2021-09-23T18:54:39 | 407,629,121 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 486 | py | # Escreva um algoritmo que leia dois números e imprima o resultado da divisão do primeiro pelo
# segundo. Caso não for possível, mostre a mensagem “DIVISAO IMPOSSIVEL”.
x = int(input('Quantos casos voce vai digitar?'))
for i in range(0,x):
a = float(input('Entre com o numerador:'))
b = float(input('Entre com o numerador:'))
if b == 0:
print('Divisão Impossível')
else:
divisao = a / b
print(f'DIVISAO = {divisao:.2f}')
| [
"noreply@github.com"
] | RAFAELSPAULA.noreply@github.com |
70926b02978529fa9edc66e2ea1a2862ddad1222 | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_074/ch153_2020_04_13_20_39_02_324902.py | 7c41c4092f5c5dbd3653ad6e6e0f0b55a83b2984 | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 391 | py | def agrupa_por_idade(dicio):
dicio={[nome]:idade}
key=nome
dic={}
if idade<=11:
dic.update={criança:[nome]}
return dic
if idade>=12 and idade>=17:
dic.update={adolescente:[nome]}
return dic
if idade>=18 and idade<=59:
dic.update={adulto:[nome]}
return dic
else:
dic.update={idoso:[nome]}
return dic | [
"you@example.com"
] | you@example.com |
83fec668e56fcdff66e94ad5af3d22793aba1ac8 | 2e67bdd45c0427490880ca02f913a923a0890cdf | /foodcartapp/migrations/0043_order_products.py | 759a18dabb4d27be55133c80f21cd2960ebab509 | [] | no_license | KozhevnikovM/devman-star-burger | 5ed72c2a8a99bee12770bd2d28aa35c92be0cff8 | 54836d0216ea1117ea12ddfff11afbef15e7a3b5 | refs/heads/master | 2023-04-12T23:23:28.862134 | 2021-04-19T13:17:15 | 2021-04-19T13:17:15 | 355,147,980 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 443 | py | # Generated by Django 3.0.7 on 2021-03-23 12:36
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('foodcartapp', '0042_auto_20210317_1251'),
]
operations = [
migrations.AddField(
model_name='order',
name='products',
field=models.ManyToManyField(through='foodcartapp.OrderPosition', to='foodcartapp.Product'),
),
]
| [
"admin@example.com"
] | admin@example.com |
3701f72a30e5f04542e1240b6580880b2851145c | 63898f79a6ed0bebf9f2fed134d80a15dc46d513 | /lib/comandos.py | a33c10e874644f5d59554794bee969ccfaa9ef00 | [] | no_license | seblaz/fiuba-algo2-tp3 | ba7f280698fea76da58c3d2028e5fac07a68b4fa | 39f761759ceb9237ae8d0d502547065d916aaf6a | refs/heads/master | 2021-06-10T14:32:18.491468 | 2016-12-13T21:28:27 | 2016-12-13T21:28:27 | 74,316,566 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 552 | py | from lib.grafo import Grafo
class Comandos(object):
"""Clase que ejecuta los comandos del grafo."""
def __init__(self, grafo):
self.grafo = grafo
def similares(self, vertice, cantidad):
return self.grafo.n_similares(vertice, cantidad)
def recomendar(self, vertice, cantidad):
return self.grafo.n_recomendar(vertice, cantidad)
def camino(self, origen, destino):
return self.grafo.camino(origen, destino)
def centralidad(self, cantidad):
return self.grafo.centralidad_exacta(cantidad)
| [
"seby_1996@hotmail.com"
] | seby_1996@hotmail.com |
66ed557c3999fc31103d9c4d0ddcf63768b354cd | 45de13a618813455a3ea1e65c5dd31066b311cd7 | /Jaccorot/0020/0020.py | 336126ebf442c8574f662d06677b3203fc16022b | [] | permissive | luhralive/python | bbee0a3e7d0ac9845db484595362bba41923c2a4 | b74bdc4c7bc8e75aee9530c27d621a773a71ac67 | refs/heads/master | 2020-05-19T12:48:17.144882 | 2019-05-05T11:53:46 | 2019-05-05T11:53:46 | 185,023,995 | 1 | 0 | MIT | 2019-05-05T11:45:42 | 2019-05-05T11:45:41 | null | UTF-8 | Python | false | false | 834 | py | #!/usr/bin/python
# coding=utf-8
"""
第 0020 题: 登陆中国联通网上营业厅 后选择「自助服务」 --> 「详单查询」,然后选择你要查询的时间段,
点击「查询」按钮,查询结果页面的最下方,点击「导出」,就会生成类似于 2014年10月01日~2014年10月31日
通话详单.xls 文件。写代码,对每月通话时间做个统计。
"""
import xlrd
def count_the_dail_time(filename):
excel = xlrd.open_workbook(filename)
sheet = excel.sheet_by_index(0)
row_nums = sheet.nrows
col_nums = sheet.ncols
total_time = 0
for i in range(1,row_nums):
total_time += int(sheet.cell_value(i, 3))
return total_time
if __name__ == "__main__":
total_len = count_the_dail_time("src.xls")
print "本月通话时长为" + total_len + "秒"
| [
"caozijun007@163.com"
] | caozijun007@163.com |
31210d8e9ca34adeac2eb884389740c085812bb4 | 377fc6e13101a2a45826cd118110c790f396a805 | /joi2006yo-a.py | 76c9acea6c793943ca033f4c33a8547e23ddc800 | [] | no_license | number09/atcoder | 4076e7223f424b9923754e73992d6442e0bb0de7 | f521ca1205b254d99744abaf6a7a5bfe69845fe0 | refs/heads/master | 2021-06-04T23:16:39.021645 | 2021-01-19T08:30:39 | 2021-01-19T08:30:39 | 132,128,927 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 323 | py | n = int(input())
li_cards = []
for _ in range(n):
li_cards.append(list(map(int, input().split())))
score_a = 0
score_b = 0
for c in li_cards:
if c[0] > c[1]:
score_a += sum(c)
elif c[0] < c[1]:
score_b += sum(c)
else:
score_a += c[0]
score_b += c[1]
print(score_a, score_b) | [
"12413803+number09@users.noreply.github.com"
] | 12413803+number09@users.noreply.github.com |
fd79e1ac0a286ca45b1d4946256e9a1e4aba4141 | a1a7e8c2f086f7706b82d7d4206efa6a3bd8befa | /coin/exchange/mtgox.py | a65b27736c22a15976cd0ed1b85589b71a32e59d | [
"MIT"
] | permissive | aitoraznar/coinprice-indicator | 1d29aa99e68cf2eb0f58f52d97396ba6dfd7bc5a | c8f6021bed7832dc7251386bbaeb907af245e6aa | refs/heads/master | 2021-01-17T18:08:15.329411 | 2017-06-27T09:40:08 | 2017-06-27T09:40:08 | 95,542,549 | 0 | 0 | null | 2017-06-27T09:39:11 | 2017-06-27T09:39:11 | null | UTF-8 | Python | false | false | 1,966 | py | # -*- coding: utf-8 -*-
# MtGox
# https://en.bitcoin.it/wiki/MtGox/API/HTTP/v2
# Legacy code
__author__ = "nil.gradisnik@gmail.com"
from gi.repository import GLib
import requests
import utils
from exchange.error import Error
from alarm import Alarm
CONFIG = {
'ticker': 'http://data.mtgox.com/api/2/',
'ticker_suffix': '/money/ticker',
'asset_pairs': [
{
'code': 'BTCUSD',
'name': 'BTC to USD'
},
{
'code': 'BTCEUR',
'name': 'BTC to EUR'
}
]
}
class MtGox:
def __init__(self, config, indicator):
self.indicator = indicator
self.timeout_id = 0
self.alarm = Alarm(config['app']['name'])
self.error = Error(self)
def start(self, error_refresh=None):
refresh = error_refresh if error_refresh else self.indicator.refresh_frequency
self.timeout_id = GLib.timeout_add_seconds(refresh, self.check_price)
def stop(self):
if self.timeout_id:
GLib.source_remove(self.timeout_id)
def check_price(self):
self.asset_pair = self.indicator.active_asset_pair
try:
res = requests.get(CONFIG['ticker'] + self.asset_pair + CONFIG['ticker_suffix'])
data = res.json()
if data:
self._parse_result(data['data'])
except Exception as e:
print(e)
self.error.increment()
return self.error.is_ok()
def _parse_result(self, data):
self.error.clear()
label = data['last']['display_short']
bid = utils.category['bid'] + data['buy']['display_short']
high = utils.category['high'] + data['high']['display_short']
low = utils.category['low'] + data['low']['display_short']
ask = utils.category['ask'] + data['sell']['display_short']
volume = utils.category['volume'] + data['vol']['display_short']
# if self.alarm:
# self.alarm.check(float(data["last"]))
self.indicator.set_data(label, bid, high, low, ask, volume)
def _handle_error(self, error):
print("MtGox API error: " + error[0])
| [
"nil.gradisnik@gmail.com"
] | nil.gradisnik@gmail.com |
4961fb73566b13cf8d008c649a41c4206e23a2cb | b9f2b8478e84ab78b93faa703433f8034fd65444 | /crystalyzation.py | 29d63276ed62d9d7e5bbf20d5108baab47a312d6 | [] | no_license | leloulight/inasra | 301d3b87f66010474ab4c95c54b2fd01fb3450e8 | fd5a3201919fd61f5f4d3667607292f8678cf48b | refs/heads/master | 2021-01-23T20:44:08.867791 | 2016-01-15T17:12:36 | 2016-01-15T17:12:36 | 49,732,933 | 0 | 0 | null | 2016-01-15T17:07:12 | 2016-01-15T17:07:11 | null | UTF-8 | Python | false | false | 1,683 | py | #!/usr/bin/env python
import json
import re
import pdb
from glob import glob as glob
import random
import socket
board = json.loads(open("xwordspine.json").read())
def boardtrim(board):
destroy = 1
for each in board[-1]:
if each is ' ':
pass
else:
destroy = 0
if destroy == 1:
board.pop(-1)
boardtrim(board)
elif destroy == 0:
print('trimmed')
boardtrim(board)
board = list(zip(*board))
for each in board:
each = list(each)
boardtrim(board)
board = list(zip(*board))
for each in range(len(board)):
board[each] = list(board[each])
depants = open('visualyze3d/thepants.txt','w')
for each in range(len(board)):
for space in range(len(board[each])):
if board[each][space] == ' ':
print('')
else:
goods = board[each][space]+' 0 '+str(.4*each)+' '+str(-.4*space)+';\n'
depants.write(goods)
depants.close()
pdb.set_trace()
#place 1 horizontal
wordbones = []
for each_square in board[0]:
wordbones.append(each_square.replace(' ', '.'))
for each_square in range(len(board[1])):
if board[1][each_square] is not ' ':
print(wordbones[each_square])
wordbones[each_square] = board[0][each_square]
print(''.join(wordbones))
mystery_word = re.compile(''.join(wordbones))
acroglob = glob('acro_dicts/*')
maybe_bone = []
for each in acroglob:
maybe_bone.append(json.loads(open(each).read()))
flat_list_of_maybe_bones = []
for each in maybe_bone:
for every in each:
for single in every:
flat_list_of_maybe_bones.append(single)
random.shuffle(flat_list_of_maybe_bones)
pdb.set_trace()
#Why are my for loops broken?
#place -1 horizontal
| [
"deifius@github.com"
] | deifius@github.com |
363e3d0afb2659005eb948006061e1de157752ed | e1fc3c9332122b43664ef64ed7e3b03848a9f5ba | /IMAPTest.py | 13114084fb1b150406262e0577bbd5371d6584ea | [
"MIT"
] | permissive | ModischFabrications/ReMailer | 3adea28e27cab1bd7e488450826ba7483a6b9628 | 68cf1ad6f5d2aca67832810a196f43c2cf610fda | refs/heads/master | 2020-03-27T05:19:56.160939 | 2019-03-05T19:27:33 | 2019-03-05T19:27:33 | 146,011,744 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 771 | py | """" simple test cases for receiving messages """
import unittest
from main import *
domain = "mailrobot@mail.xing.com"
class IMAPTEST(unittest.TestCase):
def test_loading(self):
logger = get_logger()
mail_address, password = read_login()
imap_client = connect_imap(logger, mail_address, password)
imap_client.select_folder("INBOX", readonly=save_mode)
mail_UIDs = imap_client.gmail_search("in: inbox, " + domain)
part_to_fetch = "BODY[]"
mail_id = mail_UIDs[0]
mail = imap_client.fetch(mail_id, [part_to_fetch])[mail_id][part_to_fetch.encode()]
print("You've got Mail!")
self.assertGreater(len(mail), 0) # more like > 1k
# end
if __name__ == '__main__':
unittest.main()
| [
"magicmanfoli@gmail.com"
] | magicmanfoli@gmail.com |
c8b010348985198cd32bc04897b0599058a05f38 | 4635ee166ff05ca3d1c85de0d1bf90fb1707dd3c | /mango/metrics/confusion_matrix.py | 80b75888cb9f049ac553e8e4b3b397dbdb8fe3e9 | [] | no_license | Pedrexus/MangoFIL | be9a8018ba2ab7e4086f6414e71f8d7a9eceb820 | 8f6b3f64d2469cbd69fa381425f1d756969b416a | refs/heads/master | 2022-11-21T13:12:57.710178 | 2020-06-26T18:17:59 | 2020-06-26T18:17:59 | 255,701,407 | 0 | 0 | null | 2020-04-17T02:50:35 | 2020-04-14T19:08:59 | Python | UTF-8 | Python | false | false | 1,911 | py | import tensorflow as tf
# make confusion matrix metric
# make sparse categorical Precision, Recall and F1
class SparseCategoricalTruePositives(tf.keras.metrics.Metric):
def __init__(self, print_tensor=False, name="sparse_categorical_true_positives", **kwargs):
super().__init__(name=name, **kwargs)
self.cat_true_positives = self.add_weight(name="ctp", initializer="zeros")
self.print_tensor = print_tensor
def update_state(self, y_true, y_pred, sample_weight=None):
y_pred = tf.argmax(y_pred, axis=-1)
y_true = tf.reshape(tf.argmax(y_true, axis=-1), [-1])
if self.print_tensor:
tf.print(y_pred, y_true, summarize=-1)
equal = tf.equal(y_true, y_pred)
equal_int = tf.cast(equal, dtype=tf.float32)
true_poss = tf.reduce_sum(equal_int)
true_float = tf.cast(true_poss, dtype=tf.float32)
self.cat_true_positives.assign_add(true_float)
def result(self):
return self.cat_true_positives
class SparseCategoricalTrueNegatives(tf.keras.metrics.Metric):
def __init__(self, print_tensor=False, name="sparse_categorical_true_positives", **kwargs):
super().__init__(name=name, **kwargs)
self.cat_true_positives = self.add_weight(name="ctp", initializer="zeros")
self.print_tensor = print_tensor
def update_state(self, y_true, y_pred, sample_weight=None):
y_pred = tf.argmax(y_pred, axis=-1)
y_true = tf.reshape(tf.argmax(y_true, axis=-1), [-1])
if self.print_tensor:
tf.print(y_pred, y_true, summarize=-1)
equal = tf.equal(y_true, y_pred)
equal_int = tf.cast(equal, dtype=tf.float32)
true_poss = tf.reduce_sum(equal_int)
true_float = tf.cast(true_poss, dtype=tf.float32)
self.cat_true_positives.assign_add(true_float)
def result(self):
return self.cat_true_positives
| [
"vaz.valois@df.ufscar.br"
] | vaz.valois@df.ufscar.br |
35672d6ef46b07f7dea34b4daa56da0c887a86fb | e874c16913e9554786c75d80884533e1db4dbd49 | /job/quality/width.py | 3f32b78b0f457f77a888e1e56679466eb59650ac | [] | no_license | lamsh/misc | f307c34526173766631d0f50b128d3bd7e8724dc | a426a231a008aa1b2b803955f18460e8b0b358ab | refs/heads/master | 2021-01-20T09:21:49.969220 | 2017-11-25T14:00:37 | 2017-11-25T14:00:37 | 90,244,391 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,185 | py | #!/usr/bin/env python3
# coding: utf-8
# (File name: width.py)
# Author: SENOO, Ken
# License: MIT
# (Last update: 2015-01-26T15:44+09:00)
"""
Excelの地形データからwidth.prnを作成する
入力:input.xls
出力:width.prn
"""
import sys
import xlrd
FR = "./input.xls"
START_ROW = 5
START_COL = 4
MAX_NO = 35 # No.の個数
MAX_HEIGHT_INDEX = 32 # 標高の個数
BLOCK_ID = 1
## ファイル取得
wb = xlrd.open_workbook(FR)
sheet_name = wb.sheet_names()
ws = wb.sheet_by_index(0)
width_list = [] # 書き込み用データの格納
for row in range(MAX_HEIGHT_INDEX):
width_list.append([-999.0] + ws.row_values(row + START_ROW, START_COL-1))
width_list.append([-999.0]*(len(width_list[0])))
width_list[:] = width_list[::-1]
## ファイル出力
FW = "./width.prn"
header = [
["block-no", "mi", "mj"],
[BLOCK_ID, MAX_NO+1, MAX_HEIGHT_INDEX+1],
["k", "i", "j", "width"],
]
whead = "\n".join(["\t".join(map(str,row)) for row in header])+"\n"
## 転置
width_list = list(map(list, zip(*width_list)))
## 0の値は-999.0に置換
for row in range(len(width_list)):
for col in range(len(width_list[row])):
if width_list[row][col] == 0:
width_list[row][col] = -999.0
wval = []
for ri, row in enumerate(width_list, start=1):
for ci, col in enumerate(row, start=1):
wval.append("{k}\t{x}\t{y}\t{width}".format(k=BLOCK_ID, x=ri, y=ci, width=col))
with open(FW, "w", encoding="utf-8", newline="\n") as fw:
fw.write(whead)
fw.write("\n".join(wval))
## 5以下の値を-999.0、5-10を10に mask
for row in range(len(width_list)):
for col in range(len(width_list[row])):
if width_list[row][col] <= 5:
width_list[row][col] = -999.0
if 5 < width_list[row][col] < 10:
width_list[row][col] = 10
wval = []
for ri, row in enumerate(width_list, start=1):
for ci, col in enumerate(row, start=1):
wval.append("{k}\t{x}\t{y}\t{width}".format(k=BLOCK_ID, x=ri, y=ci, width=col))
FW = "./width-masked.prn"
with open(FW, "w", encoding="utf-8", newline="\n") as fw:
fw.write(whead)
fw.write("\n".join(wval))
| [
"mslamsh20131029@outlook.jp"
] | mslamsh20131029@outlook.jp |
849bc3bb90ec4d300eed4c9ce126e2b3ed2aeef5 | b483c598fa375e9af02348960f210b9f482bd655 | /pythonbrasil/exercicios/listas/LT resp 06.py | 4a956fe6704cf8f89b2b9ac2bdcf1bef84176545 | [
"MIT"
] | permissive | brunofonsousa/python | 6f766d08bf193180ea9a4903cb93ffd167db588d | 8f2f26c77015c0baaa76174e004406b4115272c7 | refs/heads/master | 2022-09-30T14:58:01.080749 | 2020-06-08T09:55:35 | 2020-06-08T09:55:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 540 | py | '''
Faça um Programa que peça as quatro notas de 10 alunos, calcule e
armazene num vetor a média de cada aluno, imprima o número de alunos
com média maior ou igual a 7.0.
'''
alunos = 2
nota = 0
soma = 0
for i in range(1,3):
notas = []
for j in range(1,3):
nota += float(input("Digite a %iª nota do aluno %i: " %(i, j)))
nota /= 2
notas.append(nota)
for media in notas:
if media > 7:
soma += 1
print("O número de alunos com média maior que 7.00 foi de %i." %soma)
| [
"brunofonsousa@gmail.com"
] | brunofonsousa@gmail.com |
683033b34e5ba82571bedabf75dda4cfedc1e88c | bb62f4738e32b82904b61d4be9d21b41d05ed694 | /motion_planners/rrt_connect.py | bb1702b156f42c9066f8eda37cc052634eb5eeba | [
"MIT"
] | permissive | yhome22/motion-planners | 34049b1f65cb8f45d656ce61d94e4a605d861615 | 891423418a9c6ac5d6fbe2bbc9c51087ae7d9b03 | refs/heads/master | 2023-06-11T10:38:10.807421 | 2021-06-15T23:54:51 | 2021-06-15T23:54:51 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,461 | py | import time
from .primitives import extend_towards
from .rrt import TreeNode, configs
from .utils import irange, RRT_ITERATIONS, INF, elapsed_time
def wrap_collision_fn(collision_fn):
# TODO: joint limits
# import inspect
# print(inspect.getargspec(collision_fn))
# print(dir(collision_fn))
def fn(q1, q2):
try:
return collision_fn(q1, q2)
except TypeError:
return collision_fn(q2)
return fn
def rrt_connect(start, goal, distance_fn, sample_fn, extend_fn, collision_fn,
max_iterations=RRT_ITERATIONS, max_time=INF, **kwargs):
"""
:param start: Start configuration - conf
:param goal: End configuration - conf
:param distance_fn: Distance function - distance_fn(q1, q2)->float
:param sample_fn: Sample function - sample_fn()->conf
:param extend_fn: Extension function - extend_fn(q1, q2)->[q', ..., q"]
:param collision_fn: Collision function - collision_fn(q)->bool
:param max_iterations: Maximum number of iterations - int
:param max_time: Maximum runtime - float
:param kwargs: Keyword arguments
:return: Path [q', ..., q"] or None if unable to find a solution
"""
# TODO: goal sampling function connected to a None node
start_time = time.time()
if collision_fn(start) or collision_fn(goal):
return None
# TODO: support continuous collision_fn with two arguments
#collision_fn = wrap_collision_fn(collision_fn)
nodes1, nodes2 = [TreeNode(start)], [TreeNode(goal)] # TODO: allow a tree to be prespecified (possibly as start)
for iteration in irange(max_iterations):
if elapsed_time(start_time) >= max_time:
break
swap = len(nodes1) > len(nodes2)
tree1, tree2 = nodes1, nodes2
if swap:
tree1, tree2 = nodes2, nodes1
target = sample_fn()
last1, _ = extend_towards(tree1, target, distance_fn, extend_fn, collision_fn,
swap, **kwargs)
last2, success = extend_towards(tree2, last1.config, distance_fn, extend_fn, collision_fn,
not swap, **kwargs)
if success:
path1, path2 = last1.retrace(), last2.retrace()
if swap:
path1, path2 = path2, path1
#print('{} max_iterations, {} nodes'.format(iteration, len(nodes1) + len(nodes2)))
path = configs(path1[:-1] + path2[::-1])
# TODO: return the trees
return path
return None
#################################################################
def birrt(start, goal, distance_fn, sample_fn, extend_fn, collision_fn, **kwargs):
"""
:param start: Start configuration - conf
:param goal: End configuration - conf
:param distance_fn: Distance function - distance_fn(q1, q2)->float
:param sample_fn: Sample function - sample_fn()->conf
:param extend_fn: Extension function - extend_fn(q1, q2)->[q', ..., q"]
:param collision_fn: Collision function - collision_fn(q)->bool
:param kwargs: Keyword arguments
:return: Path [q', ..., q"] or None if unable to find a solution
"""
# TODO: deprecate
from .meta import random_restarts
solutions = random_restarts(rrt_connect, start, goal, distance_fn, sample_fn, extend_fn, collision_fn,
max_solutions=1, **kwargs)
if not solutions:
return None
return solutions[0]
| [
"caelan@mit.edu"
] | caelan@mit.edu |
a113f792beca5c6c69a940ec55db1bb98da0b3e2 | c2186b2d1c3853a5f3bd964738d5be042b34fe02 | /Plot/Plot/Plot.py | 49763173ab1d2ea76f0ceaff28f90aeb3ac39e60 | [] | no_license | TwentyO/plot_test | 666336d1feb83f22b19c783de4a2f7a11d2d35ba | e345f886705b9820ca8715a1731906161c7fb85a | refs/heads/master | 2020-12-02T05:18:38.667673 | 2019-12-30T11:27:02 | 2019-12-30T11:27:02 | 230,902,605 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 96 | py | import matplotlib.pyplot as plt
X=range(-100,101)
Y=[x**2 for x in X]
plt.plot(X,Y)
plt.show() | [
"934733443@qq.com"
] | 934733443@qq.com |
c4d6c81822cc229ff05f3173a338729020ce79ce | 7f6790144891fcd48d0394a5272b2a8a6bf17d02 | /warmups/api_call.py | d0eecdca4da90babb0e8c63c0c959ae489a24015 | [] | no_license | Ismaelleyva/Project2 | 47e53cac242ac0593c1e2da1f94b243348c5a21d | 241517015e417a95c35da9b755720cf51fb0a1af | refs/heads/master | 2020-04-04T11:23:08.564971 | 2018-11-02T15:58:57 | 2018-11-02T15:58:57 | 155,889,049 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 195 | py | api_token = '6iEWZ7RYlmZ2XGj56Q38GiymIDVkG61WlrSR7SLn'
url= "https://api.nasa.gov/mars-photos/api/v1/rovers/curiosity/photos?sol=1000&camera=fhaz&"
for api_token in url:
print(url+api_token)
| [
"2020ileyva@01889.dwight.edu"
] | 2020ileyva@01889.dwight.edu |
28e707de27e0fe1f7588660aebc250a2a516e8ce | a70d42ddfc84715e1e8ee680d349c49956508ca4 | /accounts/urls.py | c66a0da314140da6b4f3f77093b8ab3e6c669e9d | [] | no_license | SinghSujitkumar/Art-De-Galler | 5bf1814d8342ca6e9009b13090a29bc0c7551007 | 92b76b79aa639f7c1097340278362975c8c0076a | refs/heads/master | 2020-05-18T13:39:20.997094 | 2019-05-01T16:25:21 | 2019-05-01T16:25:21 | 184,445,399 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 277 | py | from django.urls import path
from . import views
urlpatterns = [
path('login', views.login, name='login'),
path('register', views.register, name="register"),
path('logout', views.logout, name="logout"),
path('dashboard', views.dashboard, name="dashboard"),
]
| [
"2017.sujitkumar.singh@ves.ac.in"
] | 2017.sujitkumar.singh@ves.ac.in |
45421fee2f3fe62e4799b14fe5137854032b46cb | 7de678a702845e555e1d24e6791224af9bb2a5c0 | /server/common/db.py | 8687c8780ed75eb5e9a0d467b8cef9d87cff0884 | [] | no_license | kongfy/dolphind | 198d93707f96b839c29ea983737c97cbfbe1f44b | abdf75c273fc6fb9901d8856a2db48a74ca90ec0 | refs/heads/master | 2021-01-17T04:51:38.472626 | 2014-12-11T11:00:19 | 2014-12-11T11:00:19 | 25,820,993 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 649 | py | # -*- coding: utf-8 -*-
"""
Database model, contain a global database connection pool
DBPOOL : global database connection pool for dolphin
"""
from twisted.enterprise import adbapi
from common import config
DBPOOL = adbapi.ConnectionPool("MySQLdb",
host=config.CFG['database']['host'],
port=int(config.CFG['database']['port']),
user=config.CFG['database']['user'],
passwd=config.CFG['database']['passwd'],
db=config.CFG['database']['db'],
cp_reconnect=True)
| [
"njukongfy@gmail.com"
] | njukongfy@gmail.com |
9dbb15b3b965cae663cb4e6bca1426395e761504 | 0655ce817527b7d6e7a946004dfe83a5454286c4 | /mysite/child/migrations/0012_state.py | 897c02b37cfaace62896612698d87d3b60780051 | [] | no_license | manicdepravity/ChildCarePortal | 3c06006e30639bdce730f5fb774fc43de9550152 | 16216c4a80303af247e94add6782bea8ddf3b988 | refs/heads/master | 2023-03-18T03:19:38.605379 | 2019-10-28T05:43:33 | 2019-10-28T05:43:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 575 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.7 on 2018-01-10 10:18
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('child', '0011_auto_20180110_0855'),
]
operations = [
migrations.CreateModel(
name='state',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50)),
],
),
]
| [
"kjatin6599@gmail.com"
] | kjatin6599@gmail.com |
a355e886eb3b0f015d09b83db9569c0bdae3699d | e963e938ce5e049f768ae6cf3bb74c7dd85bace0 | /QML_DQN_FROZEN_LAKE.py | 3803b2838dcf9af7c38ec1113f05cef943bed749 | [
"MIT"
] | permissive | michelangelo21/QHack-open_hackaton-QUBIT | b1625b7ae5ad19641d2ca7ea729545451fc44367 | 49043e78892985877cea78e371bf989b25b833d0 | refs/heads/main | 2023-03-08T09:19:43.206784 | 2021-02-26T22:42:04 | 2021-02-26T22:42:04 | 341,231,946 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 18,298 | py | # QML as Q Learning function approximator
# Need to specify STATE input format
# Computational Basis Encoding
# action output is still softmax [a_0, a_1, a_2, a_3, a_4, a_5]
# Deep Q-Learning DQN
# Experimence Replay (For i.i.d sampling)
# Target Network (Updata every C episodes) ==> Another Circuit Parameter Set
# This version is enhanced with PyTorch
# Adapt some code from
# PyTorch tutorial on deep reinforcement learning
# and
# Xanadu AI github repository
# Environment: OpenAI gym FrozenLake
##
import pennylane as qml
from pennylane import numpy as np
from pennylane.optimize import NesterovMomentumOptimizer
import torch
import torch.nn as nn
from torch.autograd import Variable
import matplotlib.pyplot as plt
from datetime import datetime
import pickle
import gym
import time
import random
from collections import namedtuple
from copy import deepcopy
from gym.envs.registration import register
register(
id='Deterministic-ShortestPath-4x4-FrozenLake-v0', # name given to this new environment
entry_point='ShortestPathFrozenLake:ShortestPathFrozenLake', # env entry point
kwargs={'map_name': '4x4', 'is_slippery': False} # argument passed to the env
)
# register(
# id='Deterministic-4x4-FrozenLake-v0', # name given to this new environment
# entry_point='gym.envs.toy_text.frozen_lake:FrozenLakeEnv', # env entry point
# kwargs={'map_name': '4x4', 'is_slippery': False} # argument passed to the env
# )
## Definition of Replay Memory
## If next_state == None
## it is in the terminal state
Transition = namedtuple('Transition',
('state', 'action', 'reward', 'next_state', 'done'))
class ReplayMemory(object):
def __init__(self, capacity):
self.capacity = capacity
self.memory = []
self.position = 0
def push(self, *args):
"""Saves a transition."""
if len(self.memory) < self.capacity:
self.memory.append(None)
self.memory[self.position] = Transition(*args)
self.position = (self.position + 1) % self.capacity
def sample(self, batch_size):
return random.sample(self.memory, batch_size)
def output_all(self):
return self.memory
def __len__(self):
return len(self.memory)
####
## Plotting Function ##
"""
Note: the plotting code is origin from Yang, Chao-Han Huck, et al. "Enhanced Adversarial Strategically-Timed Attacks Against Deep Reinforcement Learning."
## ICASSP 2020-2020 IEEE International Conference on Acoustics, Speech, and Signal Processing (ICASSP). IEEE, 2020.
If you use the code in your research, please cite the original reference.
"""
def plotTrainingResultCombined(_iter_index, _iter_reward, _iter_total_steps, _fileTitle):
fig, ax = plt.subplots()
# plt.yscale('log')
ax.plot(_iter_index, _iter_reward, '-b', label='Reward')
ax.plot(_iter_index, _iter_total_steps, '-r', label='Total Steps')
leg = ax.legend();
ax.set(xlabel='Iteration Index',
title=_fileTitle)
fig.savefig(_fileTitle + "_"+ datetime.now().strftime("NO%Y%m%d%H%M%S") + ".png")
def plotTrainingResultReward(_iter_index, _iter_reward, _iter_total_steps, _fileTitle):
fig, ax = plt.subplots()
# plt.yscale('log')
ax.plot(_iter_index, _iter_reward, '-b', label='Reward')
# ax.plot(_iter_index, _iter_total_steps, '-r', label='Total Steps')
leg = ax.legend();
ax.set(xlabel='Iteration Index',
title=_fileTitle)
fig.savefig(_fileTitle + "_REWARD" + "_"+ datetime.now().strftime("NO%Y%m%d%H%M%S") + ".png")
########################################
def decimalToBinaryFixLength(_length, _decimal):
binNum = bin(int(_decimal))[2:]
outputNum = [int(item) for item in binNum]
if len(outputNum) < _length:
outputNum = np.concatenate((np.zeros((_length-len(outputNum),)),np.array(outputNum)))
else:
outputNum = np.array(outputNum)
return outputNum
## PennyLane Part ##
# Specify the datatype of the Totch tensor
dtype = torch.DoubleTensor
## Define a FOUR qubit system
dev = qml.device('default.qubit', wires=4)
# dev = qml.device('qiskit.basicaer', wires=4)
def statepreparation(a):
"""Quantum circuit to encode a the input vector into variational params
Args:
a: feature vector of rad and rad_square => np.array([rad_X_0, rad_X_1, rad_square_X_0, rad_square_X_1])
"""
# Rot to computational basis encoding
# a = [a_0, a_1, a_2, a_3, a_4, a_5, a_6, a_7, a_8]
for ind in range(len(a)):
qml.RX(np.pi * a[ind], wires=ind)
qml.RZ(np.pi * a[ind], wires=ind)
def layer(W):
""" Single layer of the variational classifier.
Args:
W (array[float]): 2-d array of variables for one layer
"""
qml.CNOT(wires=[0, 1])
qml.CNOT(wires=[1, 2])
qml.CNOT(wires=[2, 3])
qml.Rot(W[0, 0], W[0, 1], W[0, 2], wires=0)
qml.Rot(W[1, 0], W[1, 1], W[1, 2], wires=1)
qml.Rot(W[2, 0], W[2, 1], W[2, 2], wires=2)
qml.Rot(W[3, 0], W[3, 1], W[3, 2], wires=3)
@qml.qnode(dev, interface='torch')
def circuit(weights, angles=None):
"""The circuit of the variational classifier."""
# Can consider different expectation value
# PauliX , PauliY , PauliZ , Identity
statepreparation(angles)
for W in weights:
layer(W)
return [qml.expval(qml.PauliZ(ind)) for ind in range(4)]
def variational_classifier(var_Q_circuit, var_Q_bias , angles=None):
"""The variational classifier."""
# Change to SoftMax???
weights = var_Q_circuit
# bias_1 = var_Q_bias[0]
# bias_2 = var_Q_bias[1]
# bias_3 = var_Q_bias[2]
# bias_4 = var_Q_bias[3]
# bias_5 = var_Q_bias[4]
# bias_6 = var_Q_bias[5]
# raw_output = circuit(weights, angles=angles) + np.array([bias_1,bias_2,bias_3,bias_4,bias_5,bias_6])
raw_output = circuit(weights, angles=angles) + var_Q_bias
# We are approximating Q Value
# Maybe softmax is no need
# softMaxOutPut = np.exp(raw_output) / np.exp(raw_output).sum()
return raw_output
def square_loss(labels, predictions):
""" Square loss function
Args:
labels (array[float]): 1-d array of labels
predictions (array[float]): 1-d array of predictions
Returns:
float: square loss
"""
loss = 0
for l, p in zip(labels, predictions):
loss = loss + (l - p) ** 2
loss = loss / len(labels)
# print("LOSS")
# print(loss)
# output = torch.abs(predictions - labels)**2
# output = torch.sum(output) / len(labels)
# loss = nn.MSELoss()
# output = loss(labels.double(), predictions.double())
return loss
# def square_loss(labels, predictions):
# """ Square loss function
# Args:
# labels (array[float]): 1-d array of labels
# predictions (array[float]): 1-d array of predictions
# Returns:
# float: square loss
# """
# # In Deep Q Learning
# # labels = target_action_value_Q
# # predictions = action_value_Q
# # loss = 0
# # for l, p in zip(labels, predictions):
# # loss = loss + (l - p) ** 2
# # loss = loss / len(labels)
# # loss = nn.MSELoss()
# output = torch.abs(predictions - labels)**2
# output = torch.sum(output) / len(labels)
# # output = loss(torch.tensor(predictions), torch.tensor(labels))
# # print("LOSS OUTPUT")
# # print(output)
# return output
def abs_loss(labels, predictions):
""" Square loss function
Args:
labels (array[float]): 1-d array of labels
predictions (array[float]): 1-d array of predictions
Returns:
float: square loss
"""
# In Deep Q Learning
# labels = target_action_value_Q
# predictions = action_value_Q
# loss = 0
# for l, p in zip(labels, predictions):
# loss = loss + (l - p) ** 2
# loss = loss / len(labels)
# loss = nn.MSELoss()
output = torch.abs(predictions - labels)
output = torch.sum(output) / len(labels)
# output = loss(torch.tensor(predictions), torch.tensor(labels))
# print("LOSS OUTPUT")
# print(output)
return output
def huber_loss(labels, predictions):
""" Square loss function
Args:
labels (array[float]): 1-d array of labels
predictions (array[float]): 1-d array of predictions
Returns:
float: square loss
"""
# In Deep Q Learning
# labels = target_action_value_Q
# predictions = action_value_Q
# loss = 0
# for l, p in zip(labels, predictions):
# loss = loss + (l - p) ** 2
# loss = loss / len(labels)
# loss = nn.MSELoss()
loss = nn.SmoothL1Loss()
# output = loss(torch.tensor(predictions), torch.tensor(labels))
# print("LOSS OUTPUT")
# print(output)
return loss(labels, predictions)
def cost(var_Q_circuit, var_Q_bias, features, labels):
"""Cost (error) function to be minimized."""
# predictions = [variational_classifier(weights, angles=f) for f in features]
# Torch data type??
predictions = [variational_classifier(var_Q_circuit = var_Q_circuit, var_Q_bias = var_Q_bias, angles=decimalToBinaryFixLength(4,item.state))[item.action] for item in features]
# predictions = torch.tensor(predictions,requires_grad=True)
# labels = torch.tensor(labels)
# print("PRIDICTIONS:")
# print(predictions)
# print("LABELS:")
# print(labels)
return square_loss(labels, predictions)
#############################
def epsilon_greedy(var_Q_circuit, var_Q_bias, epsilon, n_actions, s, train=False):
"""
@param Q Q values state x action -> value
@param epsilon for exploration
@param s number of states
@param train if true then no random actions selected
"""
# Modify to incorporate with Variational Quantum Classifier
# epsilon should change along training
# In the beginning => More Exploration
# In the end => More Exploitation
# More Random
#np.random.seed(int(datetime.now().strftime("%S%f")))
if train or np.random.rand() < ((epsilon/n_actions)+(1-epsilon)):
# action = np.argmax(Q[s, :])
# variational classifier output is torch tensor
# action = np.argmax(variational_classifier(var_Q_circuit = var_Q_circuit, var_Q_bias = var_Q_bias, angles = decimalToBinaryFixLength(9,s)))
action = torch.argmax(variational_classifier(var_Q_circuit = var_Q_circuit, var_Q_bias = var_Q_bias, angles = decimalToBinaryFixLength(4,s)))
else:
# need to be torch tensor
action = torch.tensor(np.random.randint(0, n_actions))
return action
def deep_Q_Learning(alpha, gamma, epsilon, episodes, max_steps, n_tests, render = False, test=False):
"""
@param alpha learning rate
@param gamma decay factor
@param epsilon for exploration
@param max_steps for max step in each episode
@param n_tests number of test episodes
"""
env = gym.make('Deterministic-ShortestPath-4x4-FrozenLake-v0')
# env = gym.make('Deterministic-4x4-FrozenLake-v0')
n_states, n_actions = env.observation_space.n, env.action_space.n
print("NUMBER OF STATES:" + str(n_states))
print("NUMBER OF ACTIONS:" + str(n_actions))
# Initialize Q function approximator variational quantum circuit
# initialize weight layers
num_qubits = 4
num_layers = 2
# var_init = (0.01 * np.random.randn(num_layers, num_qubits, 3), 0.0, 0.0, 0.0, 0.0, 0.0, 0.0)
var_init_circuit = Variable(torch.tensor(0.01 * np.random.randn(num_layers, num_qubits, 3), device='cpu').type(dtype), requires_grad=True)
var_init_bias = Variable(torch.tensor([0.0, 0.0, 0.0, 0.0], device='cpu').type(dtype), requires_grad=True)
# Define the two Q value function initial parameters
# Use np copy() function to DEEP COPY the numpy array
var_Q_circuit = var_init_circuit
var_Q_bias = var_init_bias
# print("INIT PARAMS")
# print(var_Q_circuit)
var_target_Q_circuit = var_Q_circuit.clone().detach()
var_target_Q_bias = var_Q_bias.clone().detach()
##########################
# Optimization method => random select train batch from replay memory
# and opt
# opt = NesterovMomentumOptimizer(0.01)
# opt = torch.optim.Adam([var_Q_circuit, var_Q_bias], lr = 0.1)
# opt = torch.optim.SGD([var_Q_circuit, var_Q_bias], lr=0.1, momentum=0.9)
opt = torch.optim.RMSprop([var_Q_circuit, var_Q_bias], lr=0.01, alpha=0.99, eps=1e-08, weight_decay=0, momentum=0, centered=False)
## NEed to move out of the function
TARGET_UPDATE = 20
batch_size = 5
OPTIMIZE_STEPS = 5
##
target_update_counter = 0
iter_index = []
iter_reward = []
iter_total_steps = []
cost_list = []
timestep_reward = []
# Demo of generating a ACTION
# Output a numpy array of value for each action
# Define the replay memory
# Each transition:
# (s_t_0, a_t_0, r_t, s_t_1, 'DONE')
memory = ReplayMemory(80)
# Input Angle = decimalToBinaryFixLength(9, stateInd)
# Input Angle is a numpy array
# stateVector = decimalToBinaryFixLength(9, stateInd)
# q_val_s_t = variational_classifier(var_Q, angles=stateVector)
# # action_t = q_val_s_t.argmax()
# action_t = epsilon_greedy(var_Q, epsilon, n_actions, s)
# q_val_target_s_t = variational_classifier(var_target_Q, angles=stateVector)
# train the variational classifier
for episode in range(episodes):
print(f"Episode: {episode}")
# Output a s in decimal format
s = env.reset()
# Doing epsilog greedy action selection
# With var_Q
a = epsilon_greedy(var_Q_circuit = var_Q_circuit, var_Q_bias = var_Q_bias, epsilon = epsilon, n_actions = n_actions, s = s).item()
t = 0
total_reward = 0
done = False
while t < max_steps:
if render:
print("###RENDER###")
env.render()
print("###RENDER###")
t += 1
target_update_counter += 1
# Execute the action
s_, reward, done, info = env.step(a)
# print("Reward : " + str(reward))
# print("Done : " + str(done))
total_reward += reward
# a_ = np.argmax(Q[s_, :])
a_ = epsilon_greedy(var_Q_circuit = var_Q_circuit, var_Q_bias = var_Q_bias, epsilon = epsilon, n_actions = n_actions, s = s_).item()
# print("ACTION:")
# print(a_)
memory.push(s, a, reward, s_, done)
if len(memory) > batch_size:
# Sampling Mini_Batch from Replay Memory
batch_sampled = memory.sample(batch_size = batch_size)
# Transition = (s_t, a_t, r_t, s_t+1, done(True / False))
# item.state => state
# item.action => action taken at state s
# item.reward => reward given based on (s,a)
# item.next_state => state arrived based on (s,a)
Q_target = [item.reward + (1 - int(item.done)) * gamma * torch.max(variational_classifier(var_Q_circuit = var_target_Q_circuit, var_Q_bias = var_target_Q_bias, angles=decimalToBinaryFixLength(4,item.next_state))) for item in batch_sampled]
# Q_prediction = [variational_classifier(var_Q, angles=decimalToBinaryFixLength(9,item.state))[item.action] for item in batch_sampled ]
# Gradient Descent
# cost(weights, features, labels)
# square_loss_training = square_loss(labels = Q_target, Q_predictions)
# print("UPDATING PARAMS...")
# CHANGE TO TORCH OPTIMIZER
# var_Q = opt.step(lambda v: cost(v, batch_sampled, Q_target), var_Q)
# opt.zero_grad()
# loss = cost(var_Q_circuit = var_Q_circuit, var_Q_bias = var_Q_bias, features = batch_sampled, labels = Q_target)
# print(loss)
# FIX this gradient error
# loss.backward()
# opt.step(loss)
def closure():
opt.zero_grad()
loss = cost(var_Q_circuit = var_Q_circuit, var_Q_bias = var_Q_bias, features = batch_sampled, labels = Q_target)
# print(loss)
loss.backward()
return loss
opt.step(closure)
# print("UPDATING PARAMS COMPLETED")
current_replay_memory = memory.output_all()
current_target_for_replay_memory = [item.reward + (1 - int(item.done)) * gamma * torch.max(variational_classifier(var_Q_circuit = var_target_Q_circuit, var_Q_bias = var_target_Q_bias, angles=decimalToBinaryFixLength(4,item.next_state))) for item in current_replay_memory]
# current_target_for_replay_memory = [item.reward + (1 - int(item.done)) * gamma * np.max(variational_classifier(var_target_Q, angles=decimalToBinaryFixLength(9,item.next_state))) for item in current_replay_memory]
# if t%5 == 0:
# cost_ = cost(var_Q_circuit = var_Q_circuit, var_Q_bias = var_Q_bias, features = current_replay_memory, labels = current_target_for_replay_memory)
# print("Cost: ")
# print(cost_.item())
# cost_list.append(cost_)
if target_update_counter > TARGET_UPDATE:
print("UPDATEING TARGET CIRCUIT...")
var_target_Q_circuit = var_Q_circuit.clone().detach()
var_target_Q_bias = var_Q_bias.clone().detach()
target_update_counter = 0
s, a = s_, a_
if done:
if render:
print("###FINAL RENDER###")
env.render()
print("###FINAL RENDER###")
print(f"This episode took {t} timesteps and reward: {total_reward}")
epsilon = epsilon / ((episode/100) + 1)
# print("Q Circuit Params:")
# print(var_Q_circuit)
print(f"This episode took {t} timesteps and reward: {total_reward}")
timestep_reward.append(total_reward)
iter_index.append(episode)
iter_reward.append(total_reward)
iter_total_steps.append(t)
break
# if render:
# print(f"Here are the Q values:\n{Q}\nTesting now:")
# if test:
# test_agent(Q, env, n_tests, n_actions)
return timestep_reward, iter_index, iter_reward, iter_total_steps, var_Q_circuit, var_Q_bias
# def test_agent(Q, env, n_tests, n_actions, delay=1):
# for test in range(n_tests):
# print(f"Test #{test}")
# s = env.reset()
# done = False
# epsilon = 0
# while True:
# time.sleep(delay)
# env.render()
# a = epsilon_greedy(Q, epsilon, n_actions, s, train=True)
# print(f"Chose action {a} for state {s}")
# s, reward, done, info = env.step(a)
# if done:
# if reward > 0:
# print("Reached goal!")
# else:
# print("Shit! dead x_x")
# time.sleep(3)
# break
# Should add plotting function and KeyboardInterrupt Handler
if __name__ =="__main__":
alpha = 0.4
gamma = 0.999
epsilon = 1.
episodes = 500
max_steps = 2500
n_tests = 2
timestep_reward, iter_index, iter_reward, iter_total_steps , var_Q_circuit, var_Q_bias = deep_Q_Learning(alpha, gamma, epsilon, episodes, max_steps, n_tests, test = False)
print(timestep_reward)
## Drawing Training Result ##
file_title = 'VQDQN_Frozen_Lake_NonSlip_Dynamic_Epsilon_RMSProp' + datetime.now().strftime("NO%Y%m%d%H%M%S")
plotTrainingResultReward(_iter_index = iter_index, _iter_reward = iter_reward, _iter_total_steps = iter_total_steps, _fileTitle = 'Quantum_DQN_Frozen_Lake_NonSlip_Dynamic_Epsilon_RMSProp')
## Saving the model
with open(file_title + "_var_Q_circuit" + ".txt", "wb") as fp:
pickle.dump(var_Q_circuit, fp)
with open(file_title + "_var_Q_bias" + ".txt", "wb") as fp:
pickle.dump(var_Q_bias, fp)
with open(file_title + "_iter_reward" + ".txt", "wb") as fp:
pickle.dump(iter_reward, fp)
| [
"michal.lukomski21@gmail.com"
] | michal.lukomski21@gmail.com |
3ce88a41f23fa6766fa5dbb26e09c2c515d7c943 | 67fcccbd835b97e374665cfe56f645ae9eed4e92 | /ely/wsgi.py | d63386fc694340786b991b1d0784efff568f6d38 | [] | no_license | muscatinedev/ely | c8273fd90dea1bc5e990e1c9624760d0061532c1 | b36a514ad3fcea138d3456ac1bff51f22b87ff24 | refs/heads/master | 2023-03-21T02:29:06.174185 | 2021-03-03T19:38:48 | 2021-03-03T19:38:48 | 323,563,373 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 383 | py | """
WSGI config for ely project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'ely.settings')
application = get_wsgi_application()
| [
"muscatinedev@gmail.com"
] | muscatinedev@gmail.com |
0351cd3111edde020d59f07b80c3ec3bb06fbf0e | 30d642ef779d361644456ee8ae95d8c1d091e4f4 | /ex26.py | 0f27c5c55ebfbbff2576339b18076b22722ff243 | [] | no_license | maggiebzt/learn-python-the-hard-way | 966ff240917931a69512ae8549785016d6ee6849 | ae2263559ecfc2bfac29c5814da363d670384533 | refs/heads/master | 2020-03-22T19:01:04.617936 | 2018-08-08T22:31:43 | 2018-08-08T22:31:43 | 139,933,067 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,882 | py | def break_words(stuff):
"""This function will break up words for us."""
words = stuff.split(' ')
return words
def sort_words(words):
"""Sorts the words."""
return sorted(words)
def print_first_word(words): # error 1: missing a colon
"""Prints the first word after popping it off."""
word = words.pop(0) # error 2: misspelled pop to poop haha
print word
def print_last_word(words):
"""Prints the last word after popping it off."""
word = words.pop(-1) # error 3: missing a closing parenthesis
print word
def sort_sentence(sentence):
"""Takes in a full sentence and returns the sorted words."""
words = break_words(sentence)
return sort_words(words)
def print_first_and_last(sentence):
"""Prints the first and last words of the sentence."""
words = break_words(sentence)
print_first_word(words)
print_last_word(words)
def print_first_and_last_sorted(sentence):
"""Sorts the words then prints the first and last one."""
words = sort_sentence(sentence)
print_first_word(words)
print_last_word(words)
print "Let's practice everything."
print 'You\'d need to know \'bout escapes with \\ that do \n newlines and \t tabs.'
poem = """
\tThe lovely world
with logic so firmly planted
cannot discern \n the needs of love
nor comprehend passion from intuition
and requires an explantion
\n\t\twhere there is none.
"""
print "--------------"
print poem
print "--------------"
five = 10 - 2 + 3 - 5
print "This should be five: %s" % five
def secret_formula(started):
jelly_beans = started * 500
jars = jelly_beans / 1000 # error 4: used '\' instead of '/'
crates = jars / 100
return jelly_beans, jars, crates
start_point = 10000
beans, jars, crates = secret_formula(start_point) # error 5: wrong variable name
# error 12: used '==' instead of '='
print "With a starting point of: %d" % start_point
print "We'd have %d jeans, %d jars, and %d crates." % (beans, jars, crates)
start_point = start_point / 10
print "We can also do that this way:"
print "We'd have %d beans, %d jars, and %d crabapples." % secret_formula(start_point)
# error 6: on line 74, missing a closing parenthesis
# error 7: on line 74, wrong variable name
sentence = "All god\tthings come to those who weight."
words = break_words(sentence) # error 13: remove reference to ex25
sorted_words = sort_words(words) # error 14: remove reference to ex25
print_first_word(words)
print_last_word(words)
print_first_word(sorted_words) # error 8: a period in the beginning of line
print_last_word(sorted_words)
sorted_words = sort_sentence(sentence) # error 15: remove reference to ex25
print sorted_words # error 9: misspelled print
print_first_and_last(sentence) # error 10: wrong function call
print_first_and_last_sorted(sentence) # error 11: indentation error,
# wrong variable name, wrong function call
| [
"22149904+maggiebzt@users.noreply.github.com"
] | 22149904+maggiebzt@users.noreply.github.com |
14d930c07ef672a334e9d2cd6db46037ca679856 | f659075076dd629862d062963ccbefbf717fa656 | /solvers/annealing.py | 181210dcb7f235977cbdec563cc325d0a51ac193 | [] | no_license | miquelfarre/tsp_mayhem | 35d09d5533388219ead8e12d6aa9f2326e6bef50 | b3888bb874484172a606d0ef4dbb0f58a0c460c4 | refs/heads/master | 2022-12-07T14:00:17.125979 | 2020-08-19T05:02:31 | 2020-08-19T05:02:31 | 288,636,272 | 0 | 0 | null | 2020-08-19T04:50:39 | 2020-08-19T04:50:39 | null | UTF-8 | Python | false | false | 1,437 | py | import random
import math
def acceptance_criteria(distance, new_distance, temp):
if new_distance < distance:
return 1.0
return math.exp((distance - new_distance) / temp)
def get_distance(current_list, cost_matrix):
distance = 0
pre_j = 0
for index in current_list:
distance = distance + cost_matrix[index, pre_j]
pre_j = index
return distance
class Annealing:
def calculate(self, G, cost_matrix, starting_node):
n = len(list(G))
temp = 100
cooling_rate = 0.003
current = [[i] for i in range(0, n)]
random.shuffle(current)
best = current
while temp > 1:
# random indexes must be different
(random_index_1, random_index_2) = random.sample(range(1, n), 2)
swapped = current.copy()
swapped[random_index_1], swapped[random_index_2] = swapped[random_index_2], swapped[random_index_1]
distance = get_distance(current, cost_matrix)
new_distance = get_distance(swapped, cost_matrix)
# annealing acceptance criteria
if acceptance_criteria(distance, new_distance, temp) > random.random():
current = swapped
if get_distance(current, cost_matrix) < get_distance(best, cost_matrix):
best = current
# decrease temp
temp -= cooling_rate
return list(best)
| [
"miquelfarre@Miquels-MacBook-Pro.local"
] | miquelfarre@Miquels-MacBook-Pro.local |
cca4eb53ee84c89cc60dd78279f630ce2adc308d | aea37c205cd97404f2a564fcebaf25cd284c5586 | /Toxicbug/Charlotte/division.py | 099427d714a34d942acc26ba9c015d0190920341 | [] | no_license | kanhaichun/ICS4U | b9457d001f9cdde84894f004409621023bea91ab | bf3e29364707f52fcd5f7b20c7ba6ca1d848af31 | refs/heads/master | 2020-07-31T02:15:38.211002 | 2018-04-26T17:18:25 | 2018-04-26T17:18:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 532 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
(a)Name: chenquancheng
(b)Date: Created on Wed Jan 10 12:41:32 2018
(c)Program Title: division
(d)Purpose: loops through 1000 numbers and determines whether they are divisible by 3 and 19
"""
for i in range(0,1000): #This makes the program loop through 1000 numbers.
if i%3==0: #If the number is divisible by 3,
print("divisible by 3") #print "divisible by 3"
if i%19==0: #If the number is divisible by 19,
print("Multiple of 19!") #print "Multiple of 19!"
| [
"31779684+Charlotte418@users.noreply.github.com"
] | 31779684+Charlotte418@users.noreply.github.com |
938d74f683b6899da1a3a4e45a9ca95feeccf13d | 5b777b268b804bc984f87d714ef25677ab10fab1 | /causallib/estimation/marginal_outcome.py | 6c83f15371c3f78daa5f11e480dbc6c2d0148bae | [
"Apache-2.0"
] | permissive | vishalbelsare/causallib | 71c06cafbf9d3f2163c4921d64cab8d36413ca67 | 9f0ddb4696d580cf0a529a6c6ce98b40b34e3796 | refs/heads/master | 2023-07-10T09:57:57.293064 | 2022-12-19T15:19:28 | 2022-12-19T15:19:28 | 230,206,247 | 0 | 0 | Apache-2.0 | 2022-12-22T00:45:47 | 2019-12-26T06:14:10 | Python | UTF-8 | Python | false | false | 3,669 | py | """
(C) Copyright 2019 IBM Corp.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Created on Apr 25, 2018
"""
import pandas as pd
from .base_weight import WeightEstimator
from .base_estimator import PopulationOutcomeEstimator
class MarginalOutcomeEstimator(WeightEstimator, PopulationOutcomeEstimator):
"""
A marginal outcome predictor.
Assumes the sample is marginally exchangeable, and therefore does not correct (adjust, control) for covariates.
Predicts the outcome/effect as if the sample came from a randomized control trial: $\\Pr[Y|A]$.
"""
def compute_weight_matrix(self, X, a, use_stabilized=None, **kwargs):
# Another way to view this is that Uncorrected is basically an IPW-like with all individuals equally weighted.
treatment_values = a.unique()
treatment_values = treatment_values.sort()
weights = pd.DataFrame(data=1, index=a.index, columns=treatment_values)
return weights
def compute_weights(self, X, a, treatment_values=None, use_stabilized=None, **kwargs):
# Another way to view this is that Uncorrected is basically an IPW-like with all individuals equally weighted.
weights = pd.Series(data=1, index=a.index)
return weights
def fit(self, X=None, a=None, y=None):
"""
Dummy implementation to match the API.
MarginalOutcomeEstimator acts as a WeightEstimator that weights each sample as 1
Args:
X (pd.DataFrame): Covariate matrix of size (num_subjects, num_features).
a (pd.Series): Treatment assignment of size (num_subjects,).
y (pd.Series): Observed outcome of size (num_subjects,).
Returns:
MarginalOutcomeEstimator: a fitted model.
"""
return self
def estimate_population_outcome(self, X, a, y, w=None, treatment_values=None):
"""
Calculates potential population outcome for each treatment value.
Args:
X (pd.DataFrame): Covariate matrix of size (num_subjects, num_features).
a (pd.Series): Treatment assignment of size (num_subjects,).
y (pd.Series): Observed outcome of size (num_subjects,).
w (pd.Series | None): Individual (sample) weights calculated. Used to achieved unbiased average outcome.
If not provided, will be calculated on the data.
treatment_values (Any): Desired treatment value/s to stratify upon before aggregating individual into
population outcome.
If not supplied, calculates for all available treatment values.
Returns:
pd.Series[Any, float]: Series which index are treatment values, and the values are numbers - the
aggregated outcome for the strata of people whose assigned treatment is the key.
"""
if w is None:
w = self.compute_weights(X, a)
res = self._compute_stratified_weighted_aggregate(y, sample_weight=w, stratify_by=a,
treatment_values=treatment_values)
return res
| [
"noreply@github.com"
] | vishalbelsare.noreply@github.com |
6ad33e0ac4718ca5543b334da5ad303ac0225538 | 6c1b4bf688d7a27a3d516f32e1b3a331004b5c52 | /invoke.py | e5e362e3fe92f4ef66c25023d1a9463919883e60 | [] | no_license | paoloo/invoke | ea07b4111b7f06ab5eaa64cbf3e7daf7e9558659 | ffadb7f60fcdc0287f3752d4182dc93e0c3460c7 | refs/heads/master | 2021-01-10T05:23:25.854204 | 2016-01-23T03:12:11 | 2016-01-23T03:12:11 | 48,082,120 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 603 | py | # -*- coding: utf-8 -*-
import ctypes, sys
def _invoke(_obj , _metodo , _parms):
try:
_cobj=ctypes.CDLL(_obj)
_func=eval('_cobj.%s' % _metodo)
return _func('%s' % ''.join(str(_k) for _k in _parms))
except:
raise
if __name__=='__main__':
if len(sys.argv) < 3:
print 'invoke.py - exec a function from a shared object'
print 'syntax: python invoke.py ./lib.so function [param1, param2, ...]'
exit(-1)
else:
_tmp = _invoke(sys.argv[1], sys.argv[2], sys.argv[3:])
print '\nfunction "%s" from "%s" returned "%s"' % (sys.argv[2],sys.argv[1],str(_tmp))
exit(0)
| [
"paolo@lit.ifce.edu.br"
] | paolo@lit.ifce.edu.br |
8c5c957d383f279a40df2c3cb710efbc071c3fff | f8f1c6d3b02c3a6bb30b303ada6c9fb298c72407 | /Socket/client.py | c30dcdd0ab9c1f2b5b86f977bb0c17478139ccfb | [] | no_license | tianlin95310/python-demo | 974c9a2ed31f7e13255b360376c1ce25dbb5b000 | 1d9935ff3db99172e398e46b08126b24ce9ac772 | refs/heads/master | 2023-06-04T18:12:18.097848 | 2021-06-15T08:41:21 | 2021-06-15T08:41:21 | 377,094,240 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 882 | py | import socket
client = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
# host = socket.gethostname()
host = '192.168.1.2'
# host = '27.23.227.74'
# host = 'localhost'
port = 10005
client.connect((host, port))
while True:
try:
data = client.recv(1024)
print ('recv', data.decode())
msg = '{"what":1,"content":{"username":"tianlin","gender":"male","id":"10001","socket":null}}'
client.send(msg.encode('utf-8'))
client.close()
except ConnectionRefusedError as refuse:
print('服务器拒绝连接!', refuse)
break
except ConnectionResetError as reset:
print('关闭了正在占线的链接!', reset)
break
except ConnectionAbortedError as aborted:
print('客户端断开链接!', aborted)
break
except OSError as oserror:
print('OSError', oserror)
break | [
"953108373@qq.com"
] | 953108373@qq.com |
ee41c038533ccef5d22ccaccdf725ac037a50d82 | dd2a1b07eaafa49fb304c14cb7f110ffa581f8b5 | /Valid Binary String.py | 173e952b59b8084830722f5a2252ec929c923ea8 | [] | no_license | miraajkadam/Hack-the-Interview-IV--Asia-Pacific | b5b053ce0fff89296a09e61d44d41d94cf2eeaa4 | f3b54acad0fa8890d39b13bee8742593346e6a2d | refs/heads/master | 2022-09-03T09:13:20.245825 | 2020-05-31T14:59:35 | 2020-05-31T14:59:35 | 268,086,221 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 491 | py | import sys
import math
def minimumMoves(s,d):
count=0
flag=False
for i in range(len(s)):
if(i>=(d-1) and i<=(len(s)-d)):
for j in range(i-(d-1),i+(d-1)+1):
if(s[j]==1):
flag=True
print(flag)
if __name__ == '__main__':
sys.stdin=open("pyIn.txt","r")
fout=open("pyOut.txt","w")
#---------------------------------
s = input()
d = int(input().strip())
#---------------------------------
result=minimumMoves(s,d)
fout.write(str(result) + "\n")
fout.close()
| [
"61474583+miraajkadam@users.noreply.github.com"
] | 61474583+miraajkadam@users.noreply.github.com |
ba4ee69d23fd61ba86aa9bc91e62b37f7c42bdcf | 69b1c873d8e8ea64dbab662866466a0b971ce41d | /utils.py | 7292966a069fa52645c7bdd436a7bb1a68075e44 | [] | no_license | Coder2Programmer/WANA-Develop | 9800e415561d81b7ba6d3584b03955fac02b7fcc | e1a8bf933216ba3aba5c828f1205a9b77f3d770e | refs/heads/master | 2023-02-24T01:52:48.336405 | 2021-01-26T03:27:47 | 2021-01-26T03:27:47 | 294,609,019 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,072 | py | #!/usr/bin/python3
# -*- coding: utf-8 -*-
"""
The utils module includes functions to help symbolic execution, such as transformation function
for transform truly value to symbolic value and versa.
"""
import ctypes
from typing import Union
import six
import z3
import bin_format
def is_int(value: int) -> bool:
return isinstance(value, six.integer_types)
def is_float(value: float) -> bool:
return isinstance(value, float)
def is_symbolic(value: Union[float, int, z3.BitVecNumRef]) -> bool:
return not isinstance(value, six.integer_types) and not isinstance(value, float)
def is_all_real(*args) -> bool:
for elem in args:
if is_symbolic(elem):
return False
return True
def to_symbolic(number: int, length: int) -> z3.BitVecVal:
if is_int(number) or is_float(number):
return z3.BitVecVal(number, length)
return number
def to_signed(number: int, length: int) -> int:
if number > 2**(length - 1):
return (2 ** length - number) * (-1)
else:
return number
def to_unsigned(number: int, length: int) -> int:
if number < 0:
return number + 2 ** length
else:
return number
def sym_abs(x):
return z3.If(x >= 0, x, -x)
def check_sat(solver: z3.Solver, pop_if_exception: bool = True) -> z3.CheckSatResult:
try:
ret = solver.check()
if ret == z3.unknown:
raise z3.Z3Exception(solver.reason_unknown())
except Exception as e:
if pop_if_exception:
solver.pop()
raise e
return ret
def eos_abi_to_int(abi_name: str) -> int:
try:
if len(abi_name) > 13:
raise Exception('string is too long to be a valid name')
if not abi_name:
return 0
value = 0
n = min(len(abi_name), 12)
for i in range(n):
value <<= 5
value |= _char_to_value(abi_name[i])
value <<= (4 + 5*(12-n))
if len(abi_name) == 13:
v = _char_to_value(abi_name[12])
if v > 0x0F:
raise Exception('13th character in name cannot be a letter that comes after j')
value |= v
return ctypes.c_int64(value).value
except Exception as e:
return 0
def _char_to_value(c: str) -> int:
if c == '.':
return 0
elif '1' <= c <= '5':
return (ord(c) - ord('1')) + 1
elif 'a' <= c <= 'z':
return (ord(c) - ord('a')) + 6
else:
raise Exception('character is not in allowed character set for names')
def gen_symbolic_args(func: 'instance.FunctionInstance'):
symbolic_params = list()
for i, e in enumerate(func.functype.args):
if e == bin_format.i32:
symbolic_params.append(z3.BitVec(f'i32_bv_{i}', 32))
elif e == bin_format.i64:
symbolic_params.append(z3.BitVec(f'i64_bv_{i}', 64))
elif e == bin_format.f32:
# The first approach is bit-vector based
# f32_bv = z3.BitVec(f'f32_bv_{i}', 32)
# symbolic_params.append(z3.fpBVToFP(f32_bv, z3.Float32()))
# The second approach is float-point based
symbolic_params.append(z3.FP(f'f32_{i}', z3.Float32()))
else:
# The first approach is bit-vector based
# f64_bv = z3.BitVec(f'f64_bv_{i}', 64)
# symbolic_params.append(z3.fpBVToFP(f64_bv, z3.Float64()))
# The second approach is float-point based
symbolic_params.append(z3.FP(f'f64_{i}', z3.Float64()))
return symbolic_params
def gen_symbolic_value(var_type, name):
if var_type == bin_format.i32:
return z3.BitVec(name, 32)
if var_type == bin_format.i64:
return z3.BitVec(name, 64)
if var_type == bin_format.f32:
return z3.FP(f'f32_{i}', z3.Float32())
if var_type == bin_format.f64:
return z3.FP(name, z3.Float64())
raise TypeError('Unsupported variable type')
| [
"wangdong17@foxmail.com"
] | wangdong17@foxmail.com |
9e2d106caf576c763e11e32eb14eb27cc379899f | 3c0fb20d77a8b4b63691fc8233cce44a50ecf36b | /src/core/geom/data/transform.py | 124b6149b51c0042e0b47a1a0325c7e1461de25c | [] | no_license | jorjuato/panda3dstudio | 8a9b35000b8850c0d2968f529a983b66ad01f2f8 | b6cf2a1d126273ca64ecec29f23eba7bf297f418 | refs/heads/master | 2020-12-06T19:16:42.105673 | 2016-04-26T18:14:44 | 2016-04-26T18:14:44 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,187 | py | from ...base import *
class GeomTransformBase(BaseObject):
def __init__(self):
self._verts_to_transf = {"vert": {}, "edge": {}, "poly": {}}
self._rows_to_transf = {"vert": None, "edge": None, "poly": None}
self._transf_start_data = {"bbox": None, "pos_array": None}
def _update_verts_to_transform(self, subobj_lvl):
selected_subobj_ids = self._selected_subobj_ids[subobj_lvl]
verts = self._subobjs["vert"]
self._verts_to_transf[subobj_lvl] = verts_to_transf = {}
self._rows_to_transf[
subobj_lvl] = rows_to_transf = SparseArray.allOff()
merged_verts = self._merged_verts
merged_verts_to_transf = set()
if subobj_lvl == "vert":
for vert_id in selected_subobj_ids:
merged_verts_to_transf.add(merged_verts[vert_id])
elif subobj_lvl == "edge":
edges = self._subobjs["edge"]
for edge_id in selected_subobj_ids:
edge = edges[edge_id]
for vert_id in edge:
merged_verts_to_transf.add(merged_verts[vert_id])
elif subobj_lvl == "poly":
polys = self._subobjs["poly"]
for poly_id in selected_subobj_ids:
poly = polys[poly_id]
for vert_ids in poly:
for vert_id in vert_ids:
merged_verts_to_transf.add(merged_verts[vert_id])
for merged_vert in merged_verts_to_transf:
rows = merged_vert.get_row_indices()
verts_to_transf[merged_vert] = rows
for row in rows:
rows_to_transf.set_bit(row)
def init_transform(self):
geom_node_top = self._geoms["top"]["shaded"].node()
start_data = self._transf_start_data
start_data["bbox"] = geom_node_top.get_bounds()
start_data["pos_array"] = geom_node_top.get_geom(
0).get_vertex_data().get_array(0)
def transform_selection(self, subobj_lvl, transf_type, value):
geom_node_top = self._geoms["top"]["shaded"].node()
vertex_data_top = geom_node_top.modify_geom(0).modify_vertex_data()
tmp_vertex_data = GeomVertexData(vertex_data_top)
if transf_type == "translate":
grid_origin = Mgr.get(("grid", "origin"))
vec = self._origin.get_relative_vector(grid_origin, value)
rows = self._rows_to_transf[subobj_lvl]
start_data = self._transf_start_data
tmp_vertex_data.set_array(0, start_data["pos_array"])
mat = Mat4.translate_mat(vec)
tmp_vertex_data.transform_vertices(mat, rows)
elif transf_type == "rotate":
grid_origin = Mgr.get(("grid", "origin"))
tc_pos = self._origin.get_relative_point(
self.world, Mgr.get("transf_center_pos"))
quat = self._origin.get_quat(
grid_origin) * value * grid_origin.get_quat(self._origin)
rows = self._rows_to_transf[subobj_lvl]
start_data = self._transf_start_data
tmp_vertex_data.set_array(0, start_data["pos_array"])
quat_mat = Mat4()
quat.extract_to_matrix(quat_mat)
offset_mat = Mat4.translate_mat(-tc_pos)
mat = offset_mat * quat_mat
offset_mat = Mat4.translate_mat(tc_pos)
mat *= offset_mat
tmp_vertex_data.transform_vertices(mat, rows)
elif transf_type == "scale":
grid_origin = Mgr.get(("grid", "origin"))
tc_pos = self._origin.get_relative_point(
self.world, Mgr.get("transf_center_pos"))
scale_mat = Mat4.scale_mat(value)
mat = self._origin.get_mat(
grid_origin) * scale_mat * grid_origin.get_mat(self._origin)
# remove translation component
mat.set_row(3, VBase3())
rows = self._rows_to_transf[subobj_lvl]
start_data = self._transf_start_data
tmp_vertex_data.set_array(0, start_data["pos_array"])
offset_mat = Mat4.translate_mat(-tc_pos)
mat = offset_mat * mat
offset_mat = Mat4.translate_mat(tc_pos)
mat *= offset_mat
tmp_vertex_data.transform_vertices(mat, rows)
array = tmp_vertex_data.get_array(0)
vertex_data_top.set_array(0, array)
for subobj_type in ("vert", "poly"):
vertex_data = self._vertex_data[subobj_type]
vertex_data.set_array(0, array)
array = GeomVertexArrayData(array)
handle = array.modify_handle()
handle.set_data(handle.get_data() * 2)
self._vertex_data["edge"].set_array(0, array)
def finalize_transform(self, cancelled=False):
start_data = self._transf_start_data
geom_node_top = self._geoms["top"]["shaded"].node()
vertex_data_top = geom_node_top.modify_geom(0).modify_vertex_data()
if cancelled:
bounds = start_data["bbox"]
pos_array = start_data["pos_array"]
vertex_data_top.set_array(0, pos_array)
for subobj_type in ("vert", "poly"):
self._vertex_data[subobj_type].set_array(0, pos_array)
pos_array = GeomVertexArrayData(pos_array)
handle = pos_array.modify_handle()
handle.set_data(handle.get_data() * 2)
self._vertex_data["edge"].set_array(0, pos_array)
else:
bounds = geom_node_top.get_bounds()
pos_reader = GeomVertexReader(vertex_data_top, "vertex")
subobj_lvl = Mgr.get_global("active_obj_level")
polys = self._subobjs["poly"]
poly_ids = set()
for merged_vert, indices in self._verts_to_transf[subobj_lvl].iteritems():
pos_reader.set_row(indices[0])
pos = Point3(pos_reader.get_data3f())
merged_vert.set_pos(pos)
poly_ids.update(merged_vert.get_polygon_ids())
vert_ids = []
for poly_id in poly_ids:
poly = polys[poly_id]
poly.update_center_pos()
poly.update_normal()
vert_ids.extend(poly.get_vertex_ids())
merged_verts = set(self._merged_verts[
vert_id] for vert_id in vert_ids)
self._update_vertex_normals(merged_verts)
self._origin.node().set_bounds(bounds)
self.get_toplevel_object().get_bbox().update(*self._origin.get_tight_bounds())
start_data.clear()
def _restore_subobj_transforms(self, old_time_id, new_time_id):
obj_id = self.get_toplevel_object().get_id()
prop_id = "subobj_transform"
prev_time_ids = Mgr.do("load_last_from_history",
obj_id, prop_id, old_time_id)
new_time_ids = Mgr.do("load_last_from_history",
obj_id, prop_id, new_time_id)
if prev_time_ids is None:
prev_time_ids = ()
if new_time_ids is None:
new_time_ids = ()
if not (prev_time_ids or new_time_ids):
return
if prev_time_ids and new_time_ids:
i = 0
for time_id in new_time_ids:
if time_id not in prev_time_ids:
break
i += 1
common_time_ids = prev_time_ids[:i]
prev_time_ids = prev_time_ids[i:]
new_time_ids = new_time_ids[i:]
verts = self._subobjs["vert"]
polys = self._subobjs["poly"]
data_id = "vert_pos_data"
time_ids_to_restore = {}
prev_prop_times = {}
positions = {}
# to undo transformations, determine the time IDs of the transforms that
# need to be restored by checking the data that was stored when transforms
# occurred, at times leading up to the time that is being replaced (the old
# time)
for time_id in prev_time_ids[::-1]:
# time_id is a Time ID to update time_ids_to_restore with
subobj_data = Mgr.do("load_from_history", obj_id, data_id, time_id)
# subobj_data.get("prev", {}) yields previous transform times
time_ids_to_restore.update(subobj_data.get("prev", {}))
data_for_loading = {}
# time_ids_to_restore.keys() are the IDs of vertices that need a
# transform update
for vert_id, time_id in time_ids_to_restore.iteritems():
if vert_id in verts:
prev_prop_times[vert_id] = time_id
# since multiple vertex positions might have to be loaded from the same
# datafile, make sure each datafile is loaded only once
data_for_loading.setdefault(time_id, []).append(vert_id)
for time_id, vert_ids in data_for_loading.iteritems():
pos_data = Mgr.do("load_from_history", obj_id,
data_id, time_id)["pos"]
for vert_id in vert_ids:
if vert_id in pos_data:
positions[vert_id] = pos_data[vert_id]
# to redo transformations, retrieve the transforms that need to be restored
# from the data that was stored when transforms occurred, at times leading
# up to the time that is being restored (the new time)
for time_id in new_time_ids:
subobj_data = Mgr.do("load_from_history", obj_id, data_id, time_id)
positions.update(subobj_data.get("pos", {}))
for vert_id in subobj_data.get("prev", {}):
if vert_id in verts:
prev_prop_times[vert_id] = time_id
# restore the verts' previous transform time IDs
for vert_id, time_id in prev_prop_times.iteritems():
verts[vert_id].set_previous_property_time("transform", time_id)
polys_to_update = set()
vertex_data_top = self._geoms["top"][
"shaded"].node().modify_geom(0).modify_vertex_data()
pos_writer = GeomVertexWriter(vertex_data_top, "vertex")
for vert_id, pos in positions.iteritems():
if vert_id in verts:
vert = verts[vert_id]
poly = polys[vert.get_polygon_id()]
polys_to_update.add(poly)
vert.set_pos(pos)
row = vert.get_row_index()
pos_writer.set_row(row)
pos_writer.set_data3f(pos)
pos_array = vertex_data_top.get_array(0)
self._vertex_data["vert"].set_array(0, pos_array)
self._vertex_data["poly"].set_array(0, pos_array)
pos_array = GeomVertexArrayData(pos_array)
handle = pos_array.modify_handle()
handle.set_data(handle.get_data() * 2)
self._vertex_data["edge"].set_array(0, pos_array)
vert_ids = []
for poly in polys_to_update:
poly.update_center_pos()
poly.update_normal()
vert_ids.extend(poly.get_vertex_ids())
self._vert_normal_change.update(vert_ids)
self.get_toplevel_object().get_bbox().update(*self._origin.get_tight_bounds())
| [
"Epihaius@users.noreply.github.com"
] | Epihaius@users.noreply.github.com |
d627be799d34ca09b15dbb8a8ba4999497693d40 | babc3e26d66a8084c9f84a0431338bafabae6ffd | /TaeJuneJoung/COD/lv2.OddOccurrencesInArray.py | d4d409749fec81fd81fda721db9af44dc3514b7c | [] | no_license | hoteldelluna/AlgoStudy | 5c23a1bfb07dbfbabc5bedd541d61784d58d3edc | 49ec098cecf2b775727d5648161f773e5488089b | refs/heads/dev | 2022-10-09T14:29:00.580834 | 2020-01-25T14:40:55 | 2020-01-25T14:40:55 | 201,632,052 | 5 | 0 | null | 2020-01-25T14:40:57 | 2019-08-10T13:11:41 | Python | UTF-8 | Python | false | false | 1,356 | py | """
무조건 하나만 홀수가 발생하니
마지막 index는 짝수일 수밖에 없다.(0부터 시작이니)
[조건]
1. A의 크기가 1인 경우
2. 홀수가 중간에 있는 경우
3. 홀수가 맨 마지막에 있는 경우
"""
def solution(A):
A.sort()
for i in range(0, len(A)-1, 2):
if A[i] != A[i+1]:
# 조건2 - 홀수가 1개밖에 없으니 답이 아니라면 짝수개이므로 앞에 것이 틀리다.
return A[i]
# 조건1, 3 - 조건2에서 끝나지 않았다면 맨 마지막 값이 답
return A[-1]
"""
[처음 풀이]
시도를 해본 문제
문제를 이해를 잘못한 부분도 한몫하였고,
효율성을 가장 크게 생각해야했던 문제
처음에는 set으로 감싸서 중복을 없앤 후,
해당 set내용으로 A.count를 하였으나 N^2이 나와 실패
Dict형태도 퍼포먼스에서는 좋지 않았다.
어떻게 짜면 효율적일지 다른 방도로 생각해보면 좋을듯한 문제.
현재 방법은 100점이나, 더 좋은 방도가 없을까?
"""
def solution(A):
A.sort()
if len(A) < 2:
return A[0]
cnt = 1
for i in range(1, len(A)):
if A[i-1] == A[i]:
cnt += 1
else:
if cnt%2:
return A[i-1]
else:
cnt = 1
return A[i] | [
"jtj0525@gmail.com"
] | jtj0525@gmail.com |
f106b629624edb3a15da57a5b1456cc17682108b | 55e3b57df5914896b5a5cb92ea09c11b67e5dad8 | /news/migrations/0002_auto_20201027_0850.py | c041a5521ed602568593f881701ed88ebe687d14 | [] | no_license | MrFlava/newsproject | 04ed04ee1c286c7bfeb6f42173184aa519cb5dec | fe3dc6c0de37345ddd6bb9032dae683dea84445b | refs/heads/master | 2023-06-19T06:18:59.106112 | 2021-07-21T17:46:16 | 2021-07-21T17:46:16 | 307,322,773 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,573 | py | # Generated by Django 3.1.2 on 2020-10-27 08:50
import datetime
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('news', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='post',
name='upvotes_amount',
field=models.IntegerField(default=0),
),
migrations.AlterField(
model_name='post',
name='author',
field=models.ForeignKey(blank=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='post',
name='published',
field=models.DateTimeField(default=datetime.datetime(2020, 10, 27, 8, 50, 36, 268065)),
),
migrations.CreateModel(
name='Comment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('content', models.TextField()),
('published', models.DateTimeField(default=datetime.datetime(2020, 10, 27, 8, 50, 36, 268672))),
('author', models.ForeignKey(blank=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
('post', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='news.post')),
],
),
]
| [
"thatelitemaili33t@gmail.com"
] | thatelitemaili33t@gmail.com |
f4607a41d07b6d11c28e5dd35ef3709e1afe0892 | faf86cf09d1b7414782b86991d0390a628c88e07 | /strategy.py | da981f8478bfc9315e650ae2119c44a714ab86d7 | [] | no_license | quan8tum/DFCF_TRADER | 42494ba846d1db6dddc62ee66564407304b9116b | fbabeb39a64e3965a90fd6fcc8cf2dd7a6fc920e | refs/heads/master | 2020-04-30T14:54:47.092540 | 2017-12-14T15:37:21 | 2017-12-14T15:37:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,442 | py | #!/usr/bin/env python
#-*- coding:utf-8 -*-
import requests
import sys,time
import json
stdi, stdo, stde = sys.stdin, sys.stdout, sys.stderr # 获取标准输入、标准输出和标准错误输出
reload(sys)
sys.stdin, sys.stdout, sys.stderr = stdi, stdo, stde # 保持标准输入、标准输出和标准错误输出
sys.setdefaultencoding('utf8')
class Strategy(object):
"""
利用同花顺回测引擎获取策略所需股票.
返回数据为JSON类型.
"""
def __init__(self,arg_query,upperIncome="20",fallIncome="5",lowerIncome="8"):
self.s = requests.session()
self.config=json.load(file("./config/strategy.json"))
self.s.headers.update(self.config["headers"])
self.query=self.config[arg_query]
self.stockHoldCount=self.config['transaction_params']['stockHoldCount']
self.hold_days=arg_query.split("_")[1]
self.upperIncome=str(upperIncome)
self.lowerIncome=str(lowerIncome)
self.fallIncome=str(fallIncome)
self.proxie=self.config["proxie"]
print '\n{0:-^70}'.format('')
print u"[策略]: %s [止盈回撤止损]: %s|%s|%s [满仓]: %s 只" % (arg_query,self.upperIncome,self.fallIncome,self.lowerIncome,self.stockHoldCount)
print '{0:-^70}\n'.format('')
self.success= True
#即时选股-----------------------------------------
def pickstock(self):
pickstock_params=self.config["pickstock_params"]
pickstock_params.update({"w":self.query})
while True:
try:
r=self.s.get(self.config["PICKSTOCK_URL"],params=pickstock_params,proxies=self.proxie)
except Exception as e:
print e;time.sleep(1)
else:
try:
return r.json()["data"]["result"]["result"]
except ValueError as e: #ValueError: No JSON object could be decoded
print '<pickstock>',e;time.sleep(2)
#回测选股--------------------------------------------
def traceback(self):
traceback_params=self.config["traceback_params"]
traceback_params.update({"query":self.query,
"daysForSaleStrategy":self.hold_days,
"upperIncome":self.upperIncome,
"lowerIncome":self.lowerIncome,
"fallIncome":self.fallIncome,
"startDate":" ",
"endDate":" "})
while True:
try:
r=self.s.post(self.config["STRATEGY_URL"],data=traceback_params,timeout=10,proxies=self.proxie)
except Exception as e:
print e;time.sleep(2)
else:
try:
r.json()['success']
except ValueError as e:
print '<traceback>',e
time.sleep(2)
continue
if r.json()['success']==False:
print "<traceback>: %s" % r.json()['data']['crmMessage']
#print u"抱歉,服务器繁忙,请稍后再试!"
time.sleep(1)
continue
#print r.json()['data']['stockData']['list']['data'][0]['codeName']
#print r.json()['data']['stockData'] #{u'errorCode': 100002, u'errorMsg': u'\u672a\u67e5\u8be2\u5230\u63a8\u8350\u80a1\u7968\u4ee3\u7801', u'list': []}
try:
num=r.json()['data']['stockData']['list']['stockNum']
except Exception as e: #TypeError as e:
print '<traceback>',e
time.sleep(1)
continue
if num!=0:
return r.json()['data']['stockData']['list']
else:
return False
#策略回测----------------------------------------------
def transaction(self,stime='2015-01-01',etime='2027-01-01'):
'''
return: (JSON)
stock_code, bought_at,sold_at,buying_price,selling_price
hold_for, signal_return_rate,stock_name
'''
transaction_params=self.config["transaction_params"]
transaction_params.update({"query":self.query,
"hold_for":self.hold_days,
"daysForSaleStrategy":self.hold_days})
transaction_params.update({"upperIncome":self.upperIncome,
"lowerIncome":self.lowerIncome,
"fallIncome":self.fallIncome,
"stime":stime,
"startDate":stime,
"etime":etime})
while True:
try:
r=self.s.post(self.config["TRANSACTION_URL"],data=transaction_params,proxies=self.proxie)
except Exception as e:
print '<transaction>',e;time.sleep(2)
else:
try:
if r.json()['success']==False:
print r.json()['data']['crmMessage']
print u"抱歉,服务器繁忙,请稍后再试!"
time.sleep(1)
continue
else:
try:
return r.json()['data']
except TypeError as e:
print '<transaction>',e
time.sleep(1)
continue
else:
return False
except ValueError as e: # NO JSON object could be decoded
print '<transaction>',e;time.sleep(2)
if __name__=="__main__":
if raw_input("Strategy:") == "" :
test=Strategy("QUERY_2_DAYS_HARD",25,5,10) # 2天策略: 25|5|10
else:
test=Strategy("QUERY_4_DAYS",20,5,8) # 2天策略: 25|5|10
from trade_calendar import TradeCalendar
calendar=TradeCalendar()
#-----------------------
result=test.pickstock()
print u"即时选股: @%s %s [%s]" % ((time.strftime('%X',time.localtime()),result[0][1],result[0][0][:6])if len(result)!=0 else (" ","[]"," "))
for i in xrange(len(result)):
print result[i][1]
#------------------------
result= test.traceback()
if result!=False:
print u"策略选股: %s %s [%s] ---> 购买日:%s\n" %((result["stockDate"], result["data"][0]["codeName"], \
result["data"][0]["code"], calendar.trade_calendar(result["stockDate"].replace("-","/"),2)) if result!=False else (" ","[]"," "," "))
else:
print u"回测选股: []"
##--------------------------------
stime='2017-01-01'
etime='2018-01-01'
r=test.transaction(stime=stime,etime=etime)
print '{0:-^70}'.format('Portfolie Value ')
if r is not False:
portfolio=1
for i in xrange(len(r)-1,-1,-1):
show=r[i]
if len(show["stock_name"])==3:
show["stock_name"]=show["stock_name"]+' '
print "%s %s %8s %6s %6s %6s %d %1.3f" % \
(show["stock_name"],
show["bought_at"],
show["sold_at"],
show["buying_price"],show["selling_price"],
show["signal_return_rate"],
time.strptime(show["bought_at"],'%Y-%m-%d').tm_wday+1,
(1+float(show["signal_return_rate"])/100)*portfolio)
portfolio *= 1+float(show["signal_return_rate"])/100
print '%s ---> %s' % (show["stock_name"], calendar.trade_calendar(show["bought_at"].replace("-","/"),int(test.hold_days)))
print '{0:-^70}\n'.format('End')
#import os
#os.system('pause')
#import random
#test.query = "DDE大单净量大于0.25;涨跌幅大于2%小于10.5%;市盈率小于45;非st股;非创业板;总市值从小到大排列"
'''
while True:
result=test.pickstock()
sys.stdout.write( "\r即时选股: @%s %s [%s]" % ((time.strftime('%X',time.localtime()),result[0][1],result[0][0][:6])if len(result)!=0 else (" ","[]"," ")))
time.sleep(random.randint(20,100))
''' | [
"wangych_qd@163.com"
] | wangych_qd@163.com |
13694a3733a922c55f5a0c8e4f7846f56af8cd4d | b55c72bc94c6464a1b4461a3d11051f7dce98cd4 | /source/205.py | 4c7d38f39594d3843d917d34579f356922c8d57c | [] | no_license | ilkerkesen/euler | d886a53d3df3922e4ddaff6ab9b767e547c0eca2 | b9e54412492cfcee9dbf5a017cf94e5da65ad0d3 | refs/heads/master | 2020-05-21T12:49:31.939194 | 2016-08-14T17:25:17 | 2016-08-14T17:25:17 | 6,717,398 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 830 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from itertools import product
def list_to_dict(totals):
result = dict()
for t in totals:
if result.has_key(t):
result[t] += 1
else:
result[t] = 1
return result
def get_won_count(n, d):
return sum(map(lambda x: x[1], filter(lambda x: x[0] < n, d.items())))
def get_total_won_count(p1, p2):
return sum(map(lambda x: get_won_count(x[0], p2) * x[1], p1.items()))
def main():
peter_totals = map(sum, product(range(1, 5), repeat=9))
colin_totals = map(sum, product(range(1, 7), repeat=6))
peter = list_to_dict(peter_totals)
colin = list_to_dict(colin_totals)
print get_total_won_count(peter, colin) / \
float(len(peter_totals) * len(colin_totals))
if __name__ == "__main__":
main()
| [
"ilkerksn@gmail.com"
] | ilkerksn@gmail.com |
92ea82d00e3baa47f0708f8943155310bef045d0 | eda9187adfd53c03f55207ad05d09d2d118baa4f | /python3_base/exception.py | 78bffed238c1ab8437126e7d6c33d8e406d2aae6 | [] | no_license | HuiZhaozh/python_tutorials | 168761c9d21ad127a604512d7c6c6b38b4faa3c7 | bde4245741081656875bcba2e4e4fcb6b711a3d9 | refs/heads/master | 2023-07-07T20:36:20.137647 | 2020-04-24T07:18:25 | 2020-04-24T07:18:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 457 | py | # -*- coding:utf-8 -*-
# /usr/bin/python
'''
Author:Yan Errol
Email:2681506@gmail.com
Wechat:qq260187357
Date:2019-04-29--21:59
Describe:异常诊断
'''
import time
def func():
try:
for i in range(5):
if i >3:
raise Exception("数字大于3了==")
except Exception as ret:
print (ret)
func()
import re
a = "张明 99分"
ret = re.sub(r"\d+","100",a)
print (ret)
a = [1,2,3]
b = [4,5,6]
print(a+b) | [
"2681506@gmail.com"
] | 2681506@gmail.com |
ddf360009afd0063737c2c6a97016a8406234f17 | 89fcb62b3f3a0a75854388c1a840bd7abb30b058 | /boot.py | e3656b7ab0ce61bb02b5d0516e65b90e88563f6f | [
"MIT"
] | permissive | KevinMidboe/esp-stereo-api | 17b66d7f8093bc155cafcb9121da7e34f28b10bd | 55972dbf2377ac5962bd24e4120b83c2e107f5e1 | refs/heads/master | 2020-07-06T02:16:04.466656 | 2019-08-17T09:06:03 | 2019-08-17T09:06:03 | 202,856,963 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,394 | py | import socket
from time import sleep_ms
html = b"""
<!DOCTYPE html><html><body>
<h1>hello world</h1>
<button onclick="navigate('on')">on</button>
<button onclick="navigate('off')">off</button>
</body>
<script type="text/javascript">
function toggle(value) {
console.log('posting', value)
window.fetch('/', {
method: 'POST',
body: value
})
.then(console.log, console.error)
}
function navigate(value) {
console.log('navigating', value)
window.location.replace('/' + value)
}
</script></html>
"""
# This file is executed on every boot (including wake-boot from deepsleep)
#import esp
#esp.osdebug(None)
import uos, machine
#uos.dupterm(None, 1) # disable REPL on UART(0)
import gc
#import webrepl
#webrepl.start()
gc.collect()
# - - - NETWORKING - - -
import network
sta_if = network.WLAN(network.STA_IF)
def connectWifi():
sta_if.active(True)
# PSID and password for wifi
sta_if.connect('', '')
return sta_if
def disconnectWifi():
sta_if.active(False)
if not sta_if.isconnected():
print('connecting to network...')
connectWifi()
while not sta_if.isconnected():
pass
print('network config:', sta_if.ifconfig())
from machine import Pin
from time import sleep
pin = Pin(0, Pin.OPEN_DRAIN)
pin(1)
s = socket.socket()
ai = socket.getaddrinfo("0.0.0.0", 8080)
print("Bind address info:", ai)
addr = ai[0][-1]
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
s.bind(addr)
s.listen(5)
print("Listening, connect your browser to http://{}:8080/".format(addr))
class Headers(object):
def __init__(self, headers):
self.__dict__.update(headers)
def __getitem__(self, name):
return getattr(self, name)
def get(self, name, default=None):
return getattr(self, name, default)
class Request(object):
def __init__(self, sock):
header_off = -1
data = ''
while header_off == -1:
data += sock.recv(2048).decode('utf-8')
header_off = data.find('\r\n\r\n')
header_string = data[:header_off]
self.content = data[header_off+4:]
print('data', data)
# lines = []
# while len(header_string) > 0:
# match = self.header_re.search(header_string)
# group = match.group(0)
# print('mathc', group)
# lines.append(group)
# header_string = header_string[len(group) + 2:]
lines = header_string.split('\r\n')
first = lines.pop(0)
self.method, path, protocol = first.split(' ')
self.headers = Headers(
(header.split(': ')[0].lower().replace('-', '_'), header.split(': ')[1]) for header in lines
)
self.path = path
if self.method in ['POST', 'PUT']:
content_length = int(self.headers.get('content_length', 0))
while len(self.content) < content_length:
self.content += sock.recv(4096).decode('utf-8')
if self.content == 'on':
turnOn()
elif self.content == 'off':
turnOff()
def turnOn():
print('turning on')
pin(0)
sleep_ms(1450)
pin(1)
def turnOff():
print('turning off')
pin(0)
sleep_ms(3500)
pin(1)
while True:
socket, addr = s.accept()
print('client connected from', addr)
req = Request(socket)
if req.path == '/on':
turnOn()
elif req.path == '/off':
turnOff()
if req.method == 'POST':
print('this was a post')
print('req', req.path)
print('content', req.content)
socket.send(b'HTTP/1.1 200 OK\n\n' + html)
socket.close()
| [
"kevin.midboe@gmail.com"
] | kevin.midboe@gmail.com |
a57afd6f9e44073212a1310e56d731461175265f | fcb18bd1e0461e041739b472aef82d8f015a9e80 | /manage.py | 483d210979b01ad8f15a8c3f1d66b81ba278e00f | [] | no_license | gengue/ayremin | 02039c961766662b19e8c81b13a7ebaabf7d83f4 | 4d70b6de025d7f4a3c5f22d48d4b543ea64282f7 | refs/heads/master | 2021-01-20T11:00:18.308601 | 2013-09-26T22:57:46 | 2013-09-26T22:57:46 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 250 | py | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "ayremin.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| [
"labso@labso.(none)"
] | labso@labso.(none) |
3e0296ac48ed41c8dffac6ce628f3f1ecc939d27 | 62bb7e30d5fc0f393357f71f83ce8b450877e854 | /client/ctf_remote_engine.py | 0cd11475f450d176133e2ce12ad0bde3f42455e1 | [] | no_license | leoche666/CTFClient | 8f95fbdf8ef6cc0b620ec1bb5bb62eea09c2f93c | be1450c403fcea8fdb6bbcd47892f0774a3f2da9 | refs/heads/master | 2020-03-28T20:58:46.480380 | 2018-09-18T06:04:11 | 2018-09-18T06:04:11 | 149,119,594 | 4 | 2 | null | null | null | null | UTF-8 | Python | false | false | 18,243 | py | # -*- coding: utf-8 -*-
import re
import time
import socket
import struct
import logging
import json
import threading
import xml.etree.ElementTree as ET
from collections import Iterable
from client.libs import socks
from functools import wraps
from abc import ABCMeta, abstractmethod
from wpyscripts.wetest.engine import GameEngine
from wpyscripts.common.wetest_exceptions import *
from ctf_uitils import Singleton, convert_str, convert_uni
def get_logger(name):
logger = logging.getLogger(name)
logger.setLevel(logging.INFO)
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
formatter = logging.Formatter(
'%(asctime)s - %(filename)s:%(lineno)s - %(levelname)s - %(message)s')
ch.setFormatter(formatter)
logger.addHandler(ch)
return logger
logger = get_logger('ctf_unity_engine')
StrToBool = lambda rStr: True if rStr == "True" else False
class Socket5Client(object):
def __init__(self, _host='localhost', _port=27018):
self.host = _host
self.port = _port
self.socket = None
# self.socket.connect((self.host, self.port))
def _connect(self):
self.socket = socks.socksocket(socket.AF_INET, socket.SOCK_STREAM)
self.socket.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
self.socket.connect((self.host, self.port))
def _send_data(self, data):
try:
serialized = json.dumps(data)
except (TypeError, ValueError) as e:
raise WeTestInvaildArg('You can only send JSON-serializable data')
length = len(serialized)
buff = struct.pack("i", length)
self.socket.send(buff)
self.socket.sendall(serialized)
def _recv_data(self):
length_buffer = self.socket.recv(4)
if length_buffer:
total = struct.unpack_from("i", length_buffer)[0]
else:
raise WeTestSDKError('recv length is None?')
view = memoryview(bytearray(total))
next_offset = 0
while total - next_offset > 0:
recv_size = self.socket.recv_into(view[next_offset:], total - next_offset)
next_offset += recv_size
# print str(view.tobytes())
try:
deserialized = json.loads(view.tobytes())
except (TypeError, ValueError) as e:
raise WeTestInvaildArg('Data received was not in JSON format')
if deserialized['status'] != 0:
message = "Error code: " + str(deserialized['status']) + " msg: "+deserialized['data']
raise WeTestSDKError(message)
return deserialized['data']
def send_command(self, cmd, params=None, timeout=20):
# if params != None and not isinstance(params, dict):
# raise Exception('Params should be dict')
if not params:
params = ""
command = {
"cmd": cmd,
"value": params
}
for retry in range(0, 2):
try:
self.socket.settimeout(timeout)
self._send_data(command)
ret = self._recv_data()
return ret
except WeTestRuntimeError as e:
raise e
except socket.timeout:
self.socket.close()
self._connect()
raise WeTestSDKError("Recv Data From SDK timeout")
except socket.error as e:
time.sleep(1)
print("Retry...{0}".format(e.errno))
self.socket.close()
self._connect()
continue
except:
time.sleep(1)
print("Retry...")
if self.socket:
self.socket.close()
self._connect()
continue
raise Exception('Socket Error')
class UnityComponent(object):
'''
业务需求中不同控件有不同的行为属性。所以对于控件的操作区分对待。该类封装Unity控件的一些通用行为属性。可以继承该类根据业务封装一些控件
'''
# 等待控件消失或者隐藏的时间
DISAPPEAR_OR_HIDE_INTREVAL = 10
__metaclass__ = ABCMeta
TAG = "component"
@property
def GameObject(self):
return self.gameobject
@property
def Component(self):
return type(self)
@property
def Index(self):
return self.index
@property
def Element(self):
return self.element if self.element \
else self.get_element()
@property
def Elements(self):
if self.total_elements:
return self.total_elements
else:
self.get_element()
return self.total_elements
def __init__(self, engine, gameobject, index=None):
self.engine = engine
self.gameobject = gameobject
self.index = index
self.element = None
self.total_elements = None
self.total_wait_time = 0
def wait_for_times(self, count, interval, error):
'''
每隔规定时间等待目前方法执行一次
:param count: 重试的次数
:param interval: 每一次重试的时间间隔
:param error: 超时之后的错误提示
:return: 一个目标函数的装饰器
'''
def decorator(func):
@wraps(func)
def wrap_function(*args, **kwargs):
retry = count
try:
start_time = time.time()
while retry > 0:
# print "try to invoke {}".format(func)
result = func(*args, **kwargs)
if result:
return result
else:
retry -= 1
time.sleep(interval)
else:
raise EnvironmentError(error)
finally:
self.total_wait_time = time.time() - start_time
return wrap_function
return decorator
def wait_interactive(self, properties=["enabled"], count=15, interval=2):
'''
在count*interval时间内等待控件可交互
:param properties: 用于判断状态的属性值list
:param count
:param interval
:return:
'''
obj = self
@obj.wait_for_times(count=count, interval=interval, error="在{}秒内,没有检测到{}可交互".format(count*interval, obj))
def wait_interactive_wrapper():
return obj.is_interactive(properties)
return wait_interactive_wrapper()
def is_interactive(self, properties=["enabled"]):
'''
根据控件之上的一些属性值来判断该控件是否可以进行交互
:param properties: 用于判断状态的属性值list
:return:
'''
active_self = self.engine.get_gameobject_active(self.GameObject)
rets = self.get_component_statuses(properties)
return False if (not active_self) or len(filter(lambda status: status is False, rets)) > 0 else True
def get_element(self, wait=True, count=30, interval=1):
'''
使用Gautomator的提供的查找函数,来查找一个符合的元素实例。找不到抛出异常
1. self.index: 为None时,使用find_elment_wait来查找一个元素。
2. self.index: 为其他的整数时,使用find_elements_path返回一个元素列表,取其中的对应索引的元素
:param wait: 是否在count*interval时间内等待元素实例化
:param count: 等待次数
:param interval: 等待时间间隔
:return: 找到的控件元素
'''
def get_element_once():
if self.index is None:
self.element = self.engine.find_element(self.gameobject)
self.total_elements = [self.element]
else:
self.total_elements = self.engine.find_elements_path(self.gameobject)
self.element = self.total_elements[self.index]
return self.element
return self.wait_for_times(count=count, interval=interval,
error="在{}秒内,{}没有被实例化".format(count*interval, self))\
(get_element_once)() if wait else get_element_once()
def get_component_statuses(self, variables):
'''
获取自身一组属性的属性值
:param element: element实例
:param component: 组件名
:param variables: 一组属性
:return: 属性状态值
'''
assert hasattr(variables, '__iter__')
return [StrToBool(convert_str(self.engine.get_component_field(self.Element, self.TAG, var))) for var in variables]
def get_component_field(self, atr):
'''
获取控件上的属性值
:param atr: 需要获取的属性值
:return:
'''
return convert_str(self.engine.get_component_field(self.Element, self.TAG, atr))
@abstractmethod
def click(self):
'''
所有组件都需要有click行为
:return:
'''
pass
def wait_for_disappear_or_hide(self, properties=["enabled",], interval=0.1):
'''
等待控件消失或者隐藏。找不到该元素或者找到该元素但是隐藏了,满足其中一个条件则视为成功
:param interval: 检测的时间间隔
:return:
'''
obj = self
count = int(self.engine.DISAPPEAR_OR_HIDE_INTREVAL / interval)
@self.wait_for_times(count=count, interval=interval,
error="在{}秒内没有隐藏或者消失".format(count*interval))
def _get_disappear_or_hide_once():
try:
# 找不到元素
# 找到元素但是元素的active属性是False
# 找到元素,元素的active属性是True,但是要求的属性列表中其中有个值是False
# 出现获取属性值发现异常
if obj.get_element(wait=False) \
and obj.engine.get_gameobject_active(obj.GameObject) \
and not (False in obj.get_component_statuses(properties)):
return False
else:
return True
except Exception:
return True
_get_disappear_or_hide_once()
def __getattr__(self, item):
'''
定位Unity控件上的属性值的获取规则
首先检查实例层或者类层是否含有该item,如果有则返回该属性值 -> 如果没有则从控件上去获取该属性值 -> 最后如果获取不到则抛出AttributeError
:param item: 获取的属性值
:return:
'''
# try:
# return super(UnityComponent, self).__getattribute__(item)
# except:
try:
return self.get_component_field(item)
except Exception, ex:
raise AttributeError("{0}没有该{1}属性。Error:{2}".format(self, item, ex))
def __str__(self):
return "<Unity控件 Component={0} GameObject={1}>".format(self.TAG, self.GameObject) if self.Index is None\
else "<Unity控件 Component={0} GameObject={1} Index={2}>".format(self.TAG, self.GameObject, self.Index)
def __repr__(self):
return self.__str__()
class RemoteGameEngine(GameEngine):
__metaclass__ = Singleton
def __init__(self, address, port):
self.address = address
self.port = port
self.sdk_version = None
self.socket = Socket5Client(self.address, self.port)
class CTFRemoteUnityEngine(RemoteGameEngine):
__metaclass__ = Singleton
__filed = {
UnityComponent.TAG: UnityComponent,
}
def __init__(self, host, sock5_port=8719, sdk_port=27019):
self._host = host
self._sock5_port = sock5_port
self._sdk_port = sdk_port
# 使用sock5作为远程代理
socks.set_default_proxy(socks.SOCKS5, self._host, self._sock5_port)
# 使用格式化的文本获取指定Unity控件
self.format1 = "^GameObject=(.+),Component=(\w+)$"
self.format2 = "^GameObject=(.+),Component=(\w+),Index=(-?\d+)$"
'''
在3D分屏界面后台开启了视线晃动线程,主线程来做主要的控件操作。虽然python的线程同一时刻只会存在一个线程在真正的运行,但是两个线程都是使用共享的socket进行对手机的操作,线程的切换会混乱socket的发送和接收数据。
所以要求主线程的控件操作和单次视线晃动操作都是原子操作,由python的threading库提供的Lock来实现加锁。 并且优化了Gautomator的SocketClinet收发函数,让socket的发送和接收数据的时候不可以被线程切换
'''
self.lock = threading.Lock()
# axt-agent 将端口8719重定向到Gautomator SDK端口27019
super(CTFRemoteUnityEngine, self).__init__("127.0.0.1", self._sdk_port)
def __str__(self):
return "CTFRemoteUnityEngine<host={},port={} -> host={},port={}> "\
.format(self._host, self._sock5_port, "127.0.0.1", self._sdk_port)
def __repr__(self):
return self.__str__()
def _lock_self(self, method, *args, **kwargs):
try:
# start = time.time()
self.lock.acquire()
return method(*args, **kwargs)
finally:
# print time.time() - start
self.lock.release()
def _parse_unity_format_str(self, frm_str):
'''
分割特定格式的字符串并返回元素结构体。
:param frm_str: 分割字符串
:return: gameobject,component 或者
gameobject,component,index的元组
'''
m1 = re.match(self.format1, convert_str(frm_str))
m2 = re.match(self.format2, convert_str(frm_str))
if m1:
gameobject, component = m1.groups()
instance = self.__filed.get(component, UnityComponent)
return instance(engine=self, gameobject=gameobject, index=None)
elif m2:
gameobject, component, index = m2.groups()
instance = self.__filed.get(component, UnityComponent)
return instance(engine=self, gameobject=gameobject, index=int(index))
else:
raise EnvironmentError("请按照format={0} or {1}的格式传入定位字符串".format(self.format1, self.format2))
def get_gameobjet(self, frm_str):
'''
从定位字符串中获取gameobject路径
:param frm_str: 定位字符串
:return: gameobject路径
'''
return self._parse_unity_format_str(frm_str).GameObject
def get_component(self, frm_str):
'''
从定位字符串中获取控件名
:param frm_str: 定位字符串
:return: 控件名
'''
return self._parse_unity_format_str(frm_str).Component
def get_index(self, frm_str):
'''
从定位字符串中索引
:param frm_str: 定位字符串
:return: 索引
'''
return self._parse_unity_format_str(frm_str).Index
def get_element(self, frm_str):
'''
获取所定位的元素
:param frm_str: 定位字符串
:return: 元素实例
'''
return self._parse_unity_format_str(frm_str).Element
def get_elements(self, frm_str):
'''
获取所定位的元素列表
:param frm_str: 定位字符串
:return: 所有符合条件的元素实例列表
'''
return self._parse_unity_format_str(frm_str).Elements
def get_instance(self, frm_str):
'''
通过定位字符串获取控件实例
:param frm_str:
:return:
'''
return self._parse_unity_format_str(frm_str)
def join_gameobject(self, gameobject, *keywords):
'''
拼接gameobject
:param gameobject: 前缀gameobject
:param keywords: 加入的gameobject的关键字
:return: 拼接完的gameobject
'''
assert len(gameobject) > 0
assert isinstance(keywords, Iterable)
for keyword in keywords:
gameobject += keyword if gameobject[-1] == '/' else '/' + keyword
return gameobject
def get_dump_tree(self, filename):
'''
获取Unity UI树
:param filename: 保存UI树的文件名
:return:
'''
source = self._get_dump_tree()
tree = ET.ElementTree(ET.fromstring(source['xml']))
# ui_file = os.path.join(os.path.dirname(__file__), filename)
tree.write(filename, encoding='utf-8')
def swipe(self, xyz, offset, direction='x', step=2, delay=2000):
'''
在delay时间内从xyz开始移动offset距离
:param xyz: 世界坐标
:param offset: 偏移
:param direction: 方向,取值为['x','y','z']中的一个
:param step: 步长
:param delay: 执行时间
:return:
'''
rotation = [float(i) for i in convert_str(xyz).split(',')]
distance = float(offset) / step
interval = float(delay) / step
for i in range(step):
if direction == 'x':
rotation[1] += distance
elif direction == 'y':
rotation[0] += distance
elif direction == 'z':
rotation[2] += distance
self.move('{0},{1},{2}'.format(*rotation))
time.sleep(interval/1000)
def wait_for_scene(self, name, max_count=20, sleeptime=2):
'''
等待场景获取成功
:param name:
:param max_count:
:param sleeptime:
:return:
'''
scene = None
for i in range(max_count):
try:
scene = self.get_scene()
except:
time.sleep(sleeptime)
if scene == name:
return True
time.sleep(sleeptime)
return False
| [
"673965587@qq.com"
] | 673965587@qq.com |
023952c2d3a6d5959b48481d39fda48a0ff3ea33 | 815668204d46e6b9d90525ae3ab0338519bee2b5 | /pyaltt2/converters.py | 03dbbb8e20841884f4b6fbdaa0515694fb732999 | [
"MIT"
] | permissive | alttch/pyaltt2 | b1e74da7a38e981e2d0462c48841b5497ea03fb2 | da51459b01c6729a866ca2bb4731d94c031854d1 | refs/heads/master | 2022-05-19T06:37:00.238630 | 2022-05-06T20:11:42 | 2022-05-06T20:11:42 | 225,089,993 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,059 | py | def merge_dict(*args, add_keys=True):
"""
Safely merge two dictionaries
Args:
dct0...n: dicts to merge
add_keys: merge dict keys (default: True)
Returns:
merged dict
"""
if len(args) < 1:
return None
dct = args[0].copy()
from collections.abc import Mapping
for merged in args[1:]:
if not add_keys:
merged = {k: merged[k] for k in set(dct).intersection(set(merged))}
for k, v in merged.items():
if isinstance(dct.get(k), dict) and isinstance(v, Mapping):
dct[k] = merge_dict(dct[k], v, add_keys=add_keys)
else:
if v is None:
if not k in dct:
dct[k] = None
else:
dct[k] = v
return dct
def val_to_boolean(val):
"""
Convert any value to boolean
Boolean: return as-is
- Integer: 1 = True, 0 = False
- Strings (case-insensitive): '1', 'true', 't', 'yes', 'on', 'y' = True
- '0', 'false', 'f', 'no', 'off', 'n' = False
Args:
val: value to convert
Returns:
boolean converted value, None if val is None
Raises:
ValueError: if value can not be converted
"""
if val is None:
return None
elif isinstance(val, bool):
return val
else:
val = str(val)
if val.lower() in ['1', 't', 'true', 'yes', 'on', 'y']:
return True
elif val.lower() in ['0', 'f', 'false', 'no', 'off', 'n']:
return False
else:
raise ValueError
def safe_int(val):
"""
Convert string/float to integer
If input value is integer - return as-is
If input value is a hexadecimal (0x00): converts hex to decimal
Args:
val: value to convert
Raises:
ValueError: if input value can not be converted
"""
if isinstance(val, int):
return val
elif isinstance(val, str):
if 'x' in val:
return int(val, 16)
elif 'b' in val:
return int(val, 2)
elif 'o' in val:
return int(val, 8)
return int(val)
def parse_date(val=None, return_timestamp=True, ms=False):
"""
Parse date from string or float/integer
Input date can be either timestamp or date-time string
If input value is integer and greater than 3000, it's considered as a
timestamp, otherwise - as a year
Args:
val: value to parse
return_timestamp: return UNIX timestamp (default) or datetime object
ms: parse date from milliseconds
Returns:
UNIX timestamp (float) or datetime object. If input value is None,
returns current date/time
"""
import datetime
import time
if val is None:
return time.time() if return_timestamp else datetime.datetime.now()
if isinstance(val, datetime.datetime):
dt = val
else:
try:
val = float(val)
if ms:
val /= 1000
if val > 3000:
return val if return_timestamp else \
datetime.datetime.fromtimestamp(val)
else:
val = int(val)
except:
pass
import dateutil.parser
dt = dateutil.parser.parse(str(val))
return dt.timestamp() if return_timestamp else dt
def parse_number(val):
"""
Tries to parse number from any value
Valid values are:
- any float / integer
- 123.45
- 123 456.899
- 123,456.899
- 123 456,899
- 123.456,82
Args:
val: value to parse
Returns:
val as-is if val is integer, float or None, otherwise parsed value
Raises:
ValueError: if input val can not be parsed
"""
if isinstance(val, int) or isinstance(val, float) or val is None:
return val
if not isinstance(val, str):
raise ValueError(val)
else:
val = val.strip()
try:
return float(val)
except:
pass
spaces = val.count(' ')
commas = val.count(',')
dots = val.count('.')
if spaces > 0:
return float(val.replace(' ', '').replace(',', '.'))
elif commas > 1:
return float(val.replace(',', ''))
elif commas == 1 and commas <= dots:
if val.find(',') < val.find('.'):
return float(val.replace(',', ''))
else:
return float(val.replace('.', '').replace(',', '.'))
else:
return float(val.replace(',', '.'))
def mq_topic_match(topic, mask):
"""
Checks if topic matches mqtt-style mask
Args:
topic: topic (string)
mask: mask to check
Returns:
True if matches, False if don't
"""
if topic == mask:
return True
else:
ms = mask.split('/')
ts = topic.split('/')
lts = len(ts)
for i, s in enumerate(ms):
if s == '#':
return i < lts
elif i >= lts or (s != '+' and s != ts[i]):
return False
return i == lts - 1
| [
"div@altertech.com"
] | div@altertech.com |
da0d5d11aab1727c85b3e10826ba2d3bf070478d | 819c3415009cc2119d8962e415a47ef0d477bd39 | /api/migrations/0001_initial.py | 02dd29775b07ccdc42fcb67fa66a5624662dee31 | [] | no_license | harshitksrivastava/MovieRaterProject | ae739aa686063edc43215054eb017a1e4ddbfc67 | c3a31357defd3d27fd344697438273211908849c | refs/heads/master | 2021-09-29T12:26:31.012420 | 2020-08-03T20:40:14 | 2020-08-03T20:40:14 | 252,273,767 | 0 | 0 | null | 2021-09-22T18:48:46 | 2020-04-01T19:57:20 | Python | UTF-8 | Python | false | false | 1,462 | py | # Generated by Django 3.0.4 on 2020-03-31 19:16
from django.conf import settings
import django.core.validators
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Movie',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=32)),
('description', models.TextField(max_length=360)),
],
),
migrations.CreateModel(
name='Rating',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('stars', models.IntegerField(validators=[django.core.validators.MinValueValidator(1), django.core.validators.MaxLengthValidator(5)])),
('movie', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='api.Movie')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
options={
'unique_together': {('user', 'movie')},
'index_together': {('user', 'movie')},
},
),
]
| [
"harsh0311@gmail.com"
] | harsh0311@gmail.com |
d8ee58373b62a9ed7b7bfb582e09a2d53b076001 | 4eb76b7327aa9383dbfd6d76aa3bc1f890ceb269 | /bot.py | ff9565aa6ba1ea76cc5c1e87a7723cde7a61f670 | [] | no_license | catatonicTrepidation/IrudiaEditor | c99af1cee7b33b9456a79143e030d7be96f15221 | 926d52a902ec2ad5cfcec30af970d5e5191ec1c6 | refs/heads/master | 2020-03-28T19:20:40.276957 | 2018-09-16T05:38:41 | 2018-09-16T05:38:41 | 148,966,473 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,499 | py | import discord
from discord.ext import commands
import json
# my code
from opencv import filters, imgtools, kernelparse
import filter_switchboard
import contextparse
config_data = json.load(open('data/config.json','r',encoding="utf-8_sig"))
TOKEN = config_data['token']
description = '''Apply filters, transform, and combine images with Irudia.
lotta bull'''
bot = commands.Bot(command_prefix=('...','…','---'), description=description)
@bot.command(aliases=['edgelord'], pass_context=True)
async def edge(ctx: commands.Context, url : str = None, kern_dim : int = None, *, args = 'nyan'):
"""Rolls a dice in NdN format."""
# success = None
# if url:
# success = download.download_image(url, 'data\databases\{}\downloaded\images\input_{}.png'.format(ctx.message.server.id, 'edge'))
# elif ctx.message.attachments:
# success = download.download_image(ctx.message.attachments[0]['url'], 'data\databases\{}\downloaded\images\input_{}.png'.format(
# ctx.message.server.id, 'edge'))
# if success:
# # grab image
# img = filter_switchboard.read_image('data\databases\{}\downloaded\images\input_{}.png'.format(ctx.message.server.id, 'edge'))
#
img = contextparse.get_image(ctx, url, 'edge')
if img is not None:
result_img = filters.edge(img, kern_dim)
imgtools.write_image(result_img, ctx.message.server.id, 'edge')
await bot.send_file(ctx.message.channel, 'data\databases\{}\output\images\output_edge.png'.format(ctx.message.server.id))
return True
await bot.say('Need to supply img! (might make bot check a few images above, dunno)')
@bot.command(aliases=['custom','k'], pass_context=True)
async def kernel(ctx: commands.Context, url : str = None, *, args = 'nyan'):
"""Rolls a dice in NdN format."""
img = contextparse.get_image(ctx, url, 'customkernel')
if img is None:
await bot.say('Had trouble reading! (might make bot check a few images above, dunno)')
operands = None
if '-' in args or '+' in args:
operands = True
if operands:
kern = kernelparse.parse_multiple_matrices(args)
else:
kern = kernelparse.parse_one_matrix(args)
result_img = filters.convolve(img, kern)
imgtools.write_image(result_img, ctx.message.server.id, 'customkernel')
await bot.send_file(ctx.message.channel, 'data\databases\{}\output\images\output_customkernel.png'.format(ctx.message.server.id))
return True
bot.run(TOKEN) | [
"catatonictrepidation@gmail.com"
] | catatonictrepidation@gmail.com |
23470986e0d975fc77b67f88b1d926e6e829517a | b2a5af52edbef6ebcd81d541dd891e0eeaab90f6 | /2019/5b.py | 82cf24faf1d993bcc57fd0da3174eab2033a5475 | [] | no_license | balshetzer/AdventOfCode | 1fbed0798fe34e336d660f8e307f479b32f56670 | 5348a65ae5ae94fa48bf1f1119c7ba28bf937536 | refs/heads/master | 2022-12-11T08:37:39.982059 | 2022-12-04T15:33:32 | 2022-12-04T15:33:32 | 227,736,960 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 135 | py | #!/usr/bin/env python3
import fileinput
import intcode
print(intcode.Interpreter(next(fileinput.input())).run(input=5, output=True))
| [
"hesky@hesky-macbookpro.roam.corp.google.com"
] | hesky@hesky-macbookpro.roam.corp.google.com |
9aad4b162dea01375c98a8d3e8a080cee17f46b4 | e0ff1acb2d6cd05e639a6d9bc4d367a059e15113 | /backend/api/views_util.py | 9d8f7106970cd51af5769559d184bbfa4532d8fa | [] | no_license | majh0/product_graduation | 961f7826840a69a5b848a9cd10333c8e6daa8722 | 2a8f19010bcbeffdc77e2462b74385d0106725e0 | refs/heads/master | 2023-08-25T13:21:18.084708 | 2021-10-28T04:22:15 | 2021-10-28T04:22:15 | 416,997,791 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,646 | py | def obj_to_post(obj):
post = dict(vars(obj))
if obj.modify_dt:
post['modify_dt'] = obj.modify_dt.strftime('%Y-%m-%d %H:%M')
else:
post['modify_dt'] = ''
if obj.tags:
post['tags'] = [tag.name for tag in obj.tags.all()]
else:
post['tags'] = []
if obj.owner:
post['owner'] = obj.owner.username
else:
post['owner'] = 'Anonymous'
del post['_state']
return post
def prev_next_post(obj):
try:
prevObj = obj.get_prev()
prevDict = {'id': prevObj.id, 'title':prevObj.title}
except obj.DoesNotExist as e:
prevDict = {}
try:
nextObj = obj.get_next()
nextDict = {'id': nextObj.id, 'title':nextObj.title}
except obj.DoesNotExist as e:
nextDict = {}
return prevDict, nextDict
def make_tag_cloud(qsTag):
minCount = min(tag.count for tag in qsTag)
maxCount = max(tag.count for tag in qsTag)
# minweight, maxweight = 1, 3
def get_weight_func(minweight, maxweight):
if minCount == maxCount:
factor = 1.0
else:
factor = (maxweight - minweight) / (maxCount - minCount)
factor = (maxweight - minweight) / (maxCount - minCount)
def func(count):
weight = round(minweight + (factor * (count - minCount)))
return weight
return func
weight_func = get_weight_func(1, 3)
tagList = []
for tag in qsTag:
weight = weight_func(tag.count)
tagList.append({
'name': tag.name,
'count': tag.count,
'weight': weight,
})
return tagList
| [
"majh00@naver.com"
] | majh00@naver.com |
3dcf0a2a5e0e815380124ab0ef3fdcff617882b5 | 6f701256f47be8a8779669e81a9467a02523f747 | /views/app.py | fd187ef584fcf853abbf698c3090c74ccdb1876e | [] | no_license | yvesjordan06/automata_python | 7b52ec440359f2c73c3a51086afc40056513a6a0 | 07f80486137d188238651f9f8c52a197cbab0468 | refs/heads/master | 2020-09-19T17:53:58.661117 | 2019-12-21T02:49:36 | 2019-12-21T02:49:36 | 224,257,460 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,947 | py | #!/usr/bin/python3.7
# -*- coding: utf-8 -*-
"""
Automata 304
This is an assignment on Automata
Author: Yves Jordan Nguejip Mukete
email: yvesjordan06@gmail.com
Last edited: December 2019
"""
from PyQt5.QtGui import QIcon
from PyQt5.QtWidgets import *
from views import Pages
from views.Components import HErrorDialog, HAction
def show_automata():
a = Pages.HCreateAutomata()
a.exec()
class MainWindow(QMainWindow):
def __init__(self, flags=None, *args, **kwargs):
super().__init__(flags, *args, **kwargs)
self.AppPages = {
'main': Pages.HMainWindow(),
'help': Pages.HHelpWindow(),
'new': Pages.HCreateAutomata()
}
self.AppActions = {
'exit': HAction(
name='Exit',
shortcut='Ctrl+Q',
status_tip='Quit Application',
slot=self.close,
icon=QIcon('icons/exit.png')
),
'help': HAction(
name='About',
shortcut='Ctrl+F1',
slot=[self.change_page, 'help']
),
'new': HAction(
name='New',
shortcut='Ctrl+N',
slot=[self.change_page, 'new'],
status_tip='Create a new Automata'
)
}
self.windows = list()
self.stack = QStackedWidget()
try:
self.title = kwargs['title']
except KeyError:
self.title = 'Hiro Automata'
self.create_menu()
# Initialise et Demarre la vue
self.initUI()
def initUI(self):
self.register_pages()
self.setWindowTitle(self.title)
self.statusBar().showMessage('Prêt')
self.setCentralWidget(self.stack)
def register_pages(self):
for name, page in self.AppPages.items():
self.stack.addWidget(page)
self.stack.setCurrentWidget(self.AppPages['new'])
def change_page(self, page):
try:
self.stack.setCurrentWidget(self.AppPages[page])
except KeyError:
HErrorDialog('Page Not Found', f'The page {page} is not found', 'Did you register the page ?').exec()
def pop_page(self, page):
try:
self.AppPages[page].exec()
except KeyError:
HErrorDialog('Page Not Found', f'The page {page} is not found', 'Did you register the page ?').exec()
def create_menu(self):
mainMenu = self.menuBar()
# Sub menu
fileMenu = mainMenu.addMenu('&File')
helpMenu = mainMenu.addMenu('&Help')
# Actions to sub Menu
fileMenu.addAction(self.AppActions['new'])
fileMenu.addAction(self.AppActions['exit'])
helpMenu.addAction(self.AppActions['help'])
def start_app():
app = QApplication([])
window = MainWindow()
window.show()
app.exec()
if __name__ == '__main__':
start_app()
| [
"yvesjordan06@gmail.com"
] | yvesjordan06@gmail.com |
268ee4217c663eceae027c1a73c3fd4d5134936f | 784a6f72e7f75fff24d6e3f5dc686a2a0b97849a | /webot工程/完整训练和测试代码/hjk_real_facing_people_webots_pioneer3_4layers_restore.py | f6fa509450666f88738f8d00d67ca07cbb07f2e7 | [] | no_license | ruclion/follow_geek_tactron | 3546521c28f0d0229fabc9b99083776498b82530 | 5ab7a9fdaf3a3f0f9d33888a467497953c71ed56 | refs/heads/master | 2021-09-04T11:52:05.118377 | 2017-12-26T09:21:25 | 2017-12-26T09:21:25 | 109,073,119 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,809 | py | from __future__ import print_function
from collections import deque
from hjk_real_facing_people_webots_env_obstacle import WebotsLidarNnEnv
from hjk_saved_neural_qlearning import NeuralQLearner
from hjk_real_facing_people_webots_env_obstacle import actionoutPath
import tensorflow as tf
import numpy as np
import sys
import rospy
import random
import copy
import xml.dom.minidom
import gc
from time import gmtime, strftime
out_wintimes_path = "my_net22/wintimes_" + strftime("%Y-%m-%d-%H-%M-%S", gmtime()) + ".txt"
out_test_wintimes_path = "my_net22/test_wintimes_" + strftime("%Y-%m-%d-%H-%M-%S", gmtime()) + ".txt"
laser_dim = 16
dst_dim = 2
facing_dim = 1
history_dim = 1
state_dim = laser_dim + dst_dim + facing_dim + history_dim
num_actions = 5
MAX_STEPS = 150
COLLISION_THRESHOLD = 0.3
episode_history = deque(maxlen=100)
# 0 means to train; 1 means to test for my case ;
py_function = 0
MAX_EPISODES = 100000
JUSTTEST = 0
# now when justtest, it will make use. And should use right limit_sta_dis_value and limit_sta_dis_pos
USELIMITSTADIS = 0
LIMITSTADISVALUE = 3 * 10
LIMITSTADISPOS = laser_dim # 0~15is laser, 16 is dist
## four layers
num_layers = 4
num_neural = [state_dim, 18, 18, num_actions]
human_x = []
human_y = []
human_rotation_z = []
robot_x = []
robot_y = []
robot_rotation_z = []
def loadxml():
dom = xml.dom.minidom.parse('src/env/test.xml')
root = dom.documentElement
bb = root.getElementsByTagName('episode')
for i, var in enumerate(bb):
human_x.append(float(var.getElementsByTagName('human_x')[0].firstChild.data))
human_y.append(float(var.getElementsByTagName('human_y')[0].firstChild.data))
human_rotation_z.append(int(var.getElementsByTagName('human_rotation_z')[0].firstChild.data))
robot_x.append(float(var.getElementsByTagName('robot_x')[0].firstChild.data))
robot_y.append(float(var.getElementsByTagName('robot_y')[0].firstChild.data))
robot_rotation_z.append(float(var.getElementsByTagName('robot_rotation_z')[0].firstChild.data))
def add_layer(inputs, in_size, out_size, w_name, b_name, activation_function=None):
Weights = tf.get_variable(w_name, [in_size, out_size],
initializer=tf.random_normal_initializer(mean=0.0, stddev=0.2))
biases = tf.get_variable(b_name, out_size,
initializer=tf.constant_initializer(0))
Wx_plus_b = tf.matmul(inputs, Weights) + biases
if activation_function is None:
outputs = Wx_plus_b
else:
outputs = activation_function(Wx_plus_b)
return outputs
def init_q_net(states):
h1 = add_layer(states, num_neural[0], num_neural[1], 'W1', 'b1', activation_function=tf.nn.relu)
h2 = add_layer(h1, num_neural[1], num_neural[2], 'W2', 'b2', activation_function=tf.nn.relu)
q = add_layer(h2, num_neural[2], num_neural[3], 'W3', 'b3', activation_function=None)
return q
def complex_init_q_net(states):
laser = tf.slice(states, [0, 0], [-1, laser_dim])
goal = tf.slice(states, [0, laser_dim], [-1, dst_dim + facing_dim])
his_a = tf.slice(states, [0, laser_dim + dst_dim + facing_dim], [-1, history_dim])
# print(laser)
# print(goal)
# print(his_a)
laser_8 = add_layer(laser, laser_dim, 8, 'W1', 'b1', activation_function=tf.nn.relu)
cat_laser_8_his_a = tf.concat([laser_8, his_a], 1)
# print(cat_laser_8_his_a)
laser_5 = add_layer(cat_laser_8_his_a, 9, 5, 'W2', 'b2', activation_function=tf.nn.relu)
cat_laser_5_goal = tf.concat([laser_5, goal], 1)
cat_r8 = add_layer(cat_laser_5_goal, 8, 8, 'W3', 'b3', activation_function=tf.nn.relu)
cat_5 = add_layer(cat_r8, 8, 5, 'W4', 'b4', activation_function=None)
# h_8_and_1 = tf.concat(1, h_)
# h2 = add_layer(h1, num_neural[1], num_neural[2], 'W2', 'b2', activation_function=tf.nn.relu)
# q = add_layer(h2, num_neural[2], num_neural[3], 'W3', 'b3', activation_function=None)
return cat_5
def testtest(path):
env.collision_threshold = 0.25
xml_test_cnt = 0
sum_steps = 0
l = len(human_x)
#print('---- ', l)
for i_episode in xrange(l):
env.my_case = -1
state = env.reset(1, human_x[i_episode], human_y[i_episode], human_rotation_z[i_episode], robot_x[i_episode],
robot_y[i_episode], robot_rotation_z[i_episode])
last_action = -1
wrongActionTimes = 0
record_t = -1
for t in xrange(MAX_STEPS):
record_t = t
#print("In episode ", i_episode, ":")
#print('step ' + str(t))
# print("new change : ", state, "sure: ", state[-1])
# print("333333 --- state: ", state[LIMITSTADISPOS], state)
limit_state = copy.deepcopy(state)
if limit_state[LIMITSTADISPOS] > LIMITSTADISVALUE:
limit_state[LIMITSTADISPOS] = LIMITSTADISVALUE - random.random() * 10
# print("www i don't konw: ", state)
print('tttttttttttttttttttttttttttttttttttttttttttt: ', i_episode, 'wintimes: ', xml_test_cnt)
action = q_learner.eGreedyAction(limit_state[np.newaxis, :], False)
if (last_action == 3 and action == 4) or (last_action == 4 and action == 3):
wrongActionTimes = wrongActionTimes + 1
if wrongActionTimes == 2:
wrongActionTimes = 0
print('hjk--- ffffff :', action)
action = 0
#print('hjk--action: ', action, 'lastaction', last_action)
actionout = open(actionoutPath, 'a+')
# print("??????????????????????????????????????????????????????????????????????????????????????")
print('action: ', action, 'lastaction: ', last_action, file = actionout)
sys.stdout.flush()
actionout.close()
next_state, reward, done, _ = env.step(action)
last_action = action
state = next_state
if done:
if reward >= 500 - 1:
xml_test_cnt += 1
sum_steps += t
out_test_wintimes = open(out_test_wintimes_path, 'a+')
print('testround: ', i_episode, "win!! use steps: ", t, file=out_test_wintimes)
out_test_wintimes.close()
break
if xml_test_cnt == 0:
mean_steps = 0
else:
mean_steps = sum_steps * 1.0 / xml_test_cnt
out_test_wintimes = open(out_test_wintimes_path, 'a+')
print('test ', path, "wintimes: ", xml_test_cnt, "mean_steps: ", mean_steps, file=out_test_wintimes)
sys.stdout.flush()
out_test_wintimes.close()
env.collision_threshold = COLLISION_THRESHOLD
if __name__ == '__main__':
len_args = len(sys.argv)
path = None
if (len_args > 1):
path = str(sys.argv[1])
loadxml()
env_name = 'facing_people_webots_env_obstacle'
sess = tf.Session()
#### hjk change the learning_rate to 0.001. nnn.....
optimizer = tf.train.RMSPropOptimizer(learning_rate=0.004, decay=0.9)
# writer = tf.train.SummaryWriter("/tmp/{}-experiment-1".format(env_name), graph=sess.graph)
writer = tf.summary.FileWriter("my_net20/{}-experiment-1".format(env_name), graph=sess.graph)
if path is not None:
print('resotre net path: ' + path)
else:
print("init")
# restore_net(sess, path)
q_learner = NeuralQLearner(sess,
optimizer,
complex_init_q_net,
path,
state_dim,
num_actions,
512, # batch_size=32,
0.5, # init_exp=0.3, # 0.5, # initial exploration prob
0.1, # final_exp=0.001, # final exploration prob
# anneal_steps=10000, # N steps for annealing exploration
200000, # anneal_steps=2000, # N steps for annealing exploration
10000, # replay_buffer_size=10000,
3, # store_replay_every=3, # how frequent to store experience
0.9, # discount_factor=0.9, # discount future rewards
0.01, # target_update_rate=0.01,
0.01, # reg_param=0.01, # regularization constants
5, # max_gradient=5, # max gradient norms
False, # double_q_learning=False,
None, # summary=None,
100 # summary_every=100
)
# print(sess.run(tf.get_default_graph().get_tensor_by_name("q_network/b3:0")))
# print(sess.run(tf.get_default_graph().get_tensor_by_name("target_network/b3:0")))
env = WebotsLidarNnEnv(laser_dim, COLLISION_THRESHOLD)
wintimes = 0
for i_episode in xrange(MAX_EPISODES):
# initialize
if py_function == 1:
env.my_case = i_episode
else:
env.my_case = -1
state = env.reset()
# print("2222222 --- state: ", state[LIMITSTADISPOS])
total_rewards = 0
last_action = -1
wrongActionTimes = 0
record_t = -1
for t in xrange(MAX_STEPS):
gc.collect()
record_t = t
print("In episode ", i_episode, ":")
print('step ' + str(t))
# print("new change : ", state, "sure: ", state[-1])
if JUSTTEST == 0:
action = q_learner.eGreedyAction(state[np.newaxis, :])
else:
if USELIMITSTADIS == 0:
action = q_learner.eGreedyAction(state[np.newaxis, :], False)
else:
# print("333333 --- state: ", state[LIMITSTADISPOS], state)
limit_state = copy.deepcopy(state)
if limit_state[LIMITSTADISPOS] > LIMITSTADISVALUE:
limit_state[LIMITSTADISPOS] = LIMITSTADISVALUE - random.random() * 10
# print("www i don't konw: ", state)
print('hjk--- limit_state: ', state, limit_state)
action = q_learner.eGreedyAction(limit_state[np.newaxis, :], False)
if (last_action == 3 and action == 4) or (last_action == 4 and action == 3):
wrongActionTimes = wrongActionTimes + 1
if wrongActionTimes == 2:
wrongActionTimes = 0
print('hjk--- ffffff :', action)
action = 0
print('hjk--action: ', action, 'lastaction', last_action)
actionout = open(actionoutPath, 'a+')
# print("??????????????????????????????????????????????????????????????????????????????????????")
print('action: ', action, 'lastaction: ', last_action, file=actionout)
sys.stdout.flush()
actionout.close()
next_state, reward, done, _ = env.step(action)
last_action = action
total_rewards += reward
if JUSTTEST == 0:
if state[-1] != -1 and next_state[-1] != -1:
q_learner.storeExperience(state, action, reward, next_state, done)
q_learner.updateModel(i_episode)
state = next_state
if done:
if reward >= 500 - 1:
wintimes += 1
else:
record_t = -1
break
episode_history.append(wintimes)
# mean_rewards = np.mean(episode_history)
print("Episode {}".format(i_episode))
#print("Finished after {} timesteps".format(t + 1))
print("Reward for this episode: {}".format(total_rewards))
# print("last 99 episodes wintimes: ", episode_history[-1][0] - episode_history[0][0])
out_wintimes = open(out_wintimes_path, "a+")
print("Episode: ", i_episode, " ", q_learner.exploration, " ", "last 99 episodes wintimes: ",
episode_history[-1] - episode_history[0], 'step: ', record_t,
file=out_wintimes)
sys.stdout.flush()
out_wintimes.close()
# print("Average reward for last 100 episodes: {}".format(mean_rewards))
if JUSTTEST == 0:
if i_episode >= 200 and i_episode % 200 == 0:
path = 'my_net22/' + env_name + '_' + str(num_layers) \
+ 'layers_' + str(i_episode + 1) + 'epsiode_' + \
strftime("%Y-%m-%d-%H-%M-%S", gmtime()) + 'restore_network_rerandom'
q_learner.save_net(path)
testtest(str(i_episode + 1))
| [
"ruclion@163.com"
] | ruclion@163.com |
46a9ea2d394fede56dd4689d643f5f6492dbb5d8 | 9e05aa78126e76040e4afdd83c1eba95a9c787f5 | /generator/list2.py | 9ddb23a03b2684eb7ade8a8f5033cda8d41be041 | [
"MIT"
] | permissive | lreis2415/geovalidator | 8df4cb4671288b1242d0035cf1cde1944676e1df | dd64b0577aa458b39022afa503e890e966eb56d8 | refs/heads/master | 2022-12-10T18:32:41.293337 | 2021-03-10T01:04:20 | 2021-03-10T01:04:20 | 233,007,264 | 0 | 0 | MIT | 2022-12-08T08:04:28 | 2020-01-10T09:00:31 | Python | UTF-8 | Python | false | false | 1,131 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# author: houzhiwei
# time: 2020/1/4 16:10
from rdflib import BNode, Graph, RDF, Namespace, Literal
from rdflib.namespace import DCTERMS
g = Graph()
# namespaces
data = Namespace("http://www.egc.org/ont/data#")
saga = Namespace("http://www.egc.org/ont/process/saga#")
sh = Namespace("http://www.w3.org/ns/shacl#")
process = Namespace('http://www.egc.org/ont/gis/process#')
# prefixes
g.bind('data', data)
g.bind('sh', sh)
g.bind('saga', saga)
g.bind('process', process)
g.bind('dcterms', DCTERMS)
# SHACL shape graph
ds = saga.FlowAccumulationTopDownShape
g.add((ds, RDF.type, sh.NodeShape))
# [tool]_[parameter]
g.add((ds, sh.targetNode, saga.method_of_flow_accumulation_top_down))
p1 = BNode()
g.add((p1, sh.path, process.hasData))
g.add((p1, sh.minCount, Literal(0)))
g.add((p1, sh.maxCount, Literal(1)))
g.add((p1, sh.message, Literal('Must has at most one input value for option ‘Method’ of tool ‘Flow Accumulation (Top-Down)’', lang='en')))
g.add((ds, sh.property, p1))
# save as turtle file
g.serialize('../shapes/L2_FunctionalityLevelShape.ttl', format='turtle')
| [
"yesqincheng@sina.com"
] | yesqincheng@sina.com |
f5f1c76db9c43e020437e9648bba1815b6f4a874 | 441c16a4636eb4e2bdd92095173635139910dbb3 | /jwt_py/resources/item.py | 4c7b1f60d05e2d4317c8793fb578093f7c92af31 | [] | no_license | Srishtii-Srivastava/Python | 8fb5b17ba79b13bb9f169b33d4608c11a3ffa739 | a1783f4801ad053f94f8ab1bfd319c6b05cdd67c | refs/heads/master | 2021-09-27T16:49:08.458087 | 2021-03-15T10:51:23 | 2021-03-15T10:51:23 | 230,907,955 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,802 | py | import sqlite3
from flask_restful import Resource,reqparse
from flask_jwt import jwt_required
from flask import request
from models.item import ItemModel
class Item(Resource):
parser = reqparse.RequestParser()
parser.add_argument('price',type=float,required=True,help='This feild cannot be left blank')
parser.add_argument('store_id',type=int,required=True,help='Every item needs a store id.')
@jwt_required()
def get(self,name):
item = ItemModel.get_item_by_name(name)
if item :
return item.json(),200
return {'message' : 'Item not found'},404
def post(self,name):
if ItemModel.get_item_by_name(name):
return {'message' : f"Item '{name}' already exists."},400
data = Item.parser.parse_args()
item = ItemModel(name,**data)
try :
item.save_to_db()
except:
return {'message' : 'Error occurred while inserting item'},500
return {'message' : 'Item added successfully.'},200
@jwt_required()
def put(self,name):
data = Item.parser.parse_args()
item = ItemModel.get_item_by_name(name)
if item is None :
item = ItemModel(name,**data)
else :
item.price = data['price']
item.store_id = data['store_id']
item.save_to_db()
return item.json(),200
@jwt_required()
def delete(self,name):
item = ItemModel.get_item_by_name(name)
if item:
item.delete_from_db()
return {'message' : f"{name} deleted!"},200
else:
return{'message' : f"{name} not found!"},404
class ItemList(Resource):
def get(self):
return {'items' : [item.json() for item in ItemModel.query.all()]} | [
"srishti.srivastava25@icloud.com"
] | srishti.srivastava25@icloud.com |
80b8867d668de3ba11170cad58921cd389bef6d3 | 2e5b5738853a3ebf186421c5f870d2595ef77d06 | /server/dessa_model/utils.py | b395937b61be483fd253216d11fa8ba5efc843b7 | [] | no_license | David-Happel/realtime_deepfake_audio_detection | 44b62380696c06b0ed290ea3a398b33ddd0a6a3e | 1d1b4a73e7e235c830288a3b9b24f2066dd724ef | refs/heads/master | 2023-02-19T22:24:49.779306 | 2021-01-14T15:43:53 | 2021-01-14T15:43:53 | 297,591,770 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 46,654 | py | import os
import numpy as np
import matplotlib
import nlpaug.augmenter.audio as naa
import tensorflow as tf
from keras.layers import Add, Lambda, Concatenate, SpatialDropout1D
import keras
from keras.layers import Input, Activation, Dense, Conv1D, Dropout, BatchNormalization
from keras.callbacks import EarlyStopping, ReduceLROnPlateau, TensorBoard
from keras.models import load_model, Model
from keras import optimizers
from keras.layers.advanced_activations import LeakyReLU
from keras import backend as K
from sklearn.metrics import f1_score, accuracy_score
from tqdm import tqdm
import matplotlib.pyplot as plt
import librosa.display
import librosa.filters
from joblib import Parallel, delayed
import multiprocessing
from constants import model_params, base_data_path
from scipy import signal
from scipy.io import wavfile
from skopt import gp_minimize
from skopt.space import Real
from functools import partial
from pydub import AudioSegment
from keras.utils import multi_gpu_model
from constants import *
# Set a random seed for numpy for reproducibility
np.random.seed(42)
if os.environ.get('DISPLAY', '') == '':
print('no display found. Using non-interactive Agg backend')
matplotlib.use('Agg')
try:
import foundations
except Exception as e:
print(e)
def load_wav(path, sr):
return librosa.core.load(path, sr=sr)[0]
def save_wav(wav, path, sr):
wav *= 32767 / max(0.01, np.max(np.abs(wav)))
# proposed by @dsmiller
wavfile.write(path, sr, wav.astype(np.int16))
def save_wavenet_wav(wav, path, sr, inv_preemphasize, k):
# wav = inv_preemphasis(wav, k, inv_preemphasize)
wav *= 32767 / max(0.01, np.max(np.abs(wav)))
wavfile.write(path, sr, wav.astype(np.int16))
def preemphasis(wav, k, preemphasize=True):
if preemphasize:
return signal.lfilter([1, -k], [1], wav)
return wav
def inv_preemphasis(wav, k, inv_preemphasize=True):
if inv_preemphasize:
return signal.lfilter([1], [1, -k], wav)
return wav
# From https://github.com/r9y9/wavenet_vocoder/blob/master/audio.py
def start_and_end_indices(quantized, silence_threshold=2):
for start in range(quantized.size):
if abs(quantized[start] - 127) > silence_threshold:
break
for end in range(quantized.size - 1, 1, -1):
if abs(quantized[end] - 127) > silence_threshold:
break
assert abs(quantized[start] - 127) > silence_threshold
assert abs(quantized[end] - 127) > silence_threshold
return start, end
def trim_silence(wav, hparams):
'''Trim leading and trailing silence
Useful for M-AILABS dataset if we choose to trim the extra 0.5 silence at beginning and end.
'''
# Thanks @begeekmyfriend and @lautjy for pointing out the params contradiction. These params are separate and tunable per dataset.
return librosa.effects.trim(wav, top_db=hparams.trim_top_db, frame_length=hparams.trim_fft_size, hop_length=hparams.trim_hop_size)[0]
def get_hop_size(hparams):
hop_size = hparams.hop_size
if hop_size is None:
assert hparams.frame_shift_ms is not None
hop_size = int(hparams.frame_shift_ms / 1000 * hparams.sample_rate)
return hop_size
def linearspectrogram(wav, hparams):
# D = _stft(preemphasis(wav, hparams.preemphasis, hparams.preemphasize), hparams)
D = _stft(wav, hparams)
S = _amp_to_db(np.abs(D)**hparams.magnitude_power, hparams) - hparams.ref_level_db
if hparams.signal_normalization:
return _normalize(S, hparams)
return S
def melspectrogram(wav, hparams):
# D = _stft(preemphasis(wav, hparams.preemphasis, hparams.preemphasize), hparams)
D = _stft(wav, hparams)
S = _amp_to_db(_linear_to_mel(np.abs(D)**hparams.magnitude_power, hparams), hparams) - hparams.ref_level_db
if hparams.signal_normalization:
return _normalize(S, hparams)
return S
def inv_linear_spectrogram(linear_spectrogram, hparams):
'''Converts linear spectrogram to waveform using librosa'''
if hparams.signal_normalization:
D = _denormalize(linear_spectrogram, hparams)
else:
D = linear_spectrogram
S = _db_to_amp(D + hparams.ref_level_db)**(1/hparams.magnitude_power) # Convert back to linear
if hparams.use_lws:
processor = _lws_processor(hparams)
D = processor.run_lws(S.astype(np.float64).T ** hparams.power)
y = processor.istft(D).astype(np.float32)
return inv_preemphasis(y, hparams.preemphasis, hparams.preemphasize)
else:
return inv_preemphasis(_griffin_lim(S ** hparams.power, hparams), hparams.preemphasis, hparams.preemphasize)
def inv_mel_spectrogram(mel_spectrogram, hparams):
'''Converts mel spectrogram to waveform using librosa'''
if hparams.signal_normalization:
D = _denormalize(mel_spectrogram, hparams)
else:
D = mel_spectrogram
S = _mel_to_linear(_db_to_amp(D + hparams.ref_level_db) **
(1/hparams.magnitude_power), hparams) # Convert back to linear
if hparams.use_lws:
processor = _lws_processor(hparams)
D = processor.run_lws(S.astype(np.float64).T ** hparams.power)
y = processor.istft(D).astype(np.float32)
return inv_preemphasis(y, hparams.preemphasis, hparams.preemphasize)
else:
return inv_preemphasis(_griffin_lim(S ** hparams.power, hparams), hparams.preemphasis, hparams.preemphasize)
###########################################################################################
# tensorflow Griffin-Lim
# Thanks to @begeekmyfriend: https://github.com/begeekmyfriend/Tacotron-2/blob/mandarin-new/datasets/audio.py
def inv_linear_spectrogram_tensorflow(spectrogram, hparams):
'''Builds computational graph to convert spectrogram to waveform using TensorFlow.
Unlike inv_spectrogram, this does NOT invert the preemphasis. The caller should call
inv_preemphasis on the output after running the graph.
'''
if hparams.signal_normalization:
D = _denormalize_tensorflow(spectrogram, hparams)
else:
D = linear_spectrogram
S = tf.pow(_db_to_amp_tensorflow(D + hparams.ref_level_db), (1/hparams.magnitude_power))
return _griffin_lim_tensorflow(tf.pow(S, hparams.power), hparams)
def inv_mel_spectrogram_tensorflow(mel_spectrogram, hparams):
'''Builds computational graph to convert mel spectrogram to waveform using TensorFlow.
Unlike inv_mel_spectrogram, this does NOT invert the preemphasis. The caller should call
inv_preemphasis on the output after running the graph.
'''
if hparams.signal_normalization:
D = _denormalize_tensorflow(mel_spectrogram, hparams)
else:
D = mel_spectrogram
S = tf.pow(_db_to_amp_tensorflow(D + hparams.ref_level_db), (1/hparams.magnitude_power))
S = _mel_to_linear_tensorflow(S, hparams) # Convert back to linear
return _griffin_lim_tensorflow(tf.pow(S, hparams.power), hparams)
###########################################################################################
def _lws_processor(hparams):
import lws
return lws.lws(hparams.n_fft, get_hop_size(hparams), fftsize=hparams.win_size, mode="speech")
def _griffin_lim(S, hparams):
'''librosa implementation of Griffin-Lim
Based on https://github.com/librosa/librosa/issues/434
'''
angles = np.exp(2j * np.pi * np.random.rand(*S.shape))
S_complex = np.abs(S).astype(np.complex)
y = _istft(S_complex * angles, hparams)
for i in range(hparams.griffin_lim_iters):
angles = np.exp(1j * np.angle(_stft(y, hparams)))
y = _istft(S_complex * angles, hparams)
return y
def _griffin_lim_tensorflow(S, hparams):
'''TensorFlow implementation of Griffin-Lim
Based on https://github.com/Kyubyong/tensorflow-exercises/blob/master/Audio_Processing.ipynb
'''
with tf.variable_scope('griffinlim'):
# TensorFlow's stft and istft operate on a batch of spectrograms; create batch of size 1
S = tf.expand_dims(S, 0)
S_complex = tf.identity(tf.cast(S, dtype=tf.complex64))
y = tf.contrib.signal.inverse_stft(S_complex, hparams.win_size, get_hop_size(hparams), hparams.n_fft)
for i in range(hparams.griffin_lim_iters):
est = tf.contrib.signal.stft(y, hparams.win_size, get_hop_size(hparams), hparams.n_fft)
angles = est / tf.cast(tf.maximum(1e-8, tf.abs(est)), tf.complex64)
y = tf.contrib.signal.inverse_stft(S_complex * angles, hparams.win_size,
get_hop_size(hparams), hparams.n_fft)
return tf.squeeze(y, 0)
def _stft(y, hparams):
if hparams.use_lws:
return _lws_processor(hparams).stft(y).T
else:
return librosa.stft(y=y, n_fft=hparams.n_fft, hop_length=get_hop_size(hparams), win_length=hparams.win_size, pad_mode='constant')
def _istft(y, hparams):
return librosa.istft(y, hop_length=get_hop_size(hparams), win_length=hparams.win_size)
##########################################################
# Those are only correct when using lws!!! (This was messing with Wavenet quality for a long time!)
def num_frames(length, fsize, fshift):
"""Compute number of time frames of spectrogram
"""
pad = (fsize - fshift)
if length % fshift == 0:
M = (length + pad * 2 - fsize) // fshift + 1
else:
M = (length + pad * 2 - fsize) // fshift + 2
return M
def pad_lr(x, fsize, fshift):
"""Compute left and right padding
"""
M = num_frames(len(x), fsize, fshift)
pad = (fsize - fshift)
T = len(x) + 2 * pad
r = (M - 1) * fshift + fsize - T
return pad, pad + r
##########################################################
# Librosa correct padding
def librosa_pad_lr(x, fsize, fshift, pad_sides=1):
'''compute right padding (final frame) or both sides padding (first and final frames)
'''
assert pad_sides in (1, 2)
# return int(fsize // 2)
pad = (x.shape[0] // fshift + 1) * fshift - x.shape[0]
if pad_sides == 1:
return 0, pad
else:
return pad // 2, pad // 2 + pad % 2
# Conversions
_mel_basis = None
_inv_mel_basis = None
def _linear_to_mel(spectogram, hparams):
global _mel_basis
if _mel_basis is None:
_mel_basis = _build_mel_basis(hparams)
return np.dot(_mel_basis, spectogram)
def _mel_to_linear(mel_spectrogram, hparams):
global _inv_mel_basis
if _inv_mel_basis is None:
_inv_mel_basis = np.linalg.pinv(_build_mel_basis(hparams))
return np.maximum(1e-10, np.dot(_inv_mel_basis, mel_spectrogram))
def _mel_to_linear_tensorflow(mel_spectrogram, hparams):
global _inv_mel_basis
if _inv_mel_basis is None:
_inv_mel_basis = np.linalg.pinv(_build_mel_basis(hparams))
return tf.transpose(tf.maximum(1e-10, tf.matmul(tf.cast(_inv_mel_basis, tf.float32), tf.transpose(mel_spectrogram, [1, 0]))), [1, 0])
def _build_mel_basis(hparams):
assert hparams.fmax <= hparams.sample_rate // 2
return librosa.filters.mel(hparams.sample_rate, hparams.n_fft, n_mels=hparams.num_mels,
fmin=hparams.fmin, fmax=hparams.fmax)
def _amp_to_db(x, hparams):
min_level = np.exp(hparams.min_level_db / 20 * np.log(10))
return 20 * np.log10(np.maximum(min_level, x))
def _db_to_amp(x):
return np.power(10.0, (x) * 0.05)
def _db_to_amp_tensorflow(x):
return tf.pow(tf.ones(tf.shape(x)) * 10.0, x * 0.05)
def _normalize(S, hparams):
if hparams.allow_clipping_in_normalization:
if hparams.symmetric_mels:
return np.clip((2 * hparams.max_abs_value) * ((S - hparams.min_level_db) / (-hparams.min_level_db)) - hparams.max_abs_value,
-hparams.max_abs_value, hparams.max_abs_value)
else:
return np.clip(hparams.max_abs_value * ((S - hparams.min_level_db) / (-hparams.min_level_db)), 0, hparams.max_abs_value)
assert S.max() <= 0 and S.min() - hparams.min_level_db >= 0
if hparams.symmetric_mels:
return (2 * hparams.max_abs_value) * ((S - hparams.min_level_db) / (-hparams.min_level_db)) - hparams.max_abs_value
else:
return hparams.max_abs_value * ((S - hparams.min_level_db) / (-hparams.min_level_db))
def _denormalize(D, hparams):
if hparams.allow_clipping_in_normalization:
if hparams.symmetric_mels:
return (((np.clip(D, -hparams.max_abs_value,
hparams.max_abs_value) + hparams.max_abs_value) * -hparams.min_level_db / (2 * hparams.max_abs_value))
+ hparams.min_level_db)
else:
return ((np.clip(D, 0, hparams.max_abs_value) * -hparams.min_level_db / hparams.max_abs_value) + hparams.min_level_db)
if hparams.symmetric_mels:
return (((D + hparams.max_abs_value) * -hparams.min_level_db / (2 * hparams.max_abs_value)) + hparams.min_level_db)
else:
return ((D * -hparams.min_level_db / hparams.max_abs_value) + hparams.min_level_db)
def _denormalize_tensorflow(D, hparams):
if hparams.allow_clipping_in_normalization:
if hparams.symmetric_mels:
return (((tf.clip_by_value(D, -hparams.max_abs_value,
hparams.max_abs_value) + hparams.max_abs_value) * -hparams.min_level_db / (2 * hparams.max_abs_value))
+ hparams.min_level_db)
else:
return ((tf.clip_by_value(D, 0, hparams.max_abs_value) * -hparams.min_level_db / hparams.max_abs_value) + hparams.min_level_db)
if hparams.symmetric_mels:
return (((D + hparams.max_abs_value) * -hparams.min_level_db / (2 * hparams.max_abs_value)) + hparams.min_level_db)
else:
return ((D * -hparams.min_level_db / hparams.max_abs_value) + hparams.min_level_db)
# given a path, return list of all files in directory
def get_list_of_wav_files(file_path):
files = os.listdir(file_path)
absolute_given_dir = os.path.abspath(file_path)
absolute_files = list(map(lambda file_path: os.path.join(absolute_given_dir, file_path), files))
return absolute_files
def convert_to_flac(dir_path):
for file_path in os.listdir(dir_path):
if file_path.split('.')[-1] != "flac":
read_file = AudioSegment.from_file(os.path.join(dir_path, file_path), file_path.split('.')[-1])
os.remove(os.path.join(dir_path, file_path))
base_name = file_path.split('.')[:-1]
# read_file = read_file.set_channels(8)
# base_name = ".".join(base_name)
read_file.export(os.path.join(dir_path, f"{base_name[0]}.flac"), format="flac")
def get_target(file_path):
if '/real/' in file_path:
return 'real'
elif '/fake/' in file_path:
return 'fake'
def save_wav_to_npy(output_file, spectrogram):
np.save(output_file, spectrogram)
def wav_to_mel(input_file, output_path):
y, sr = librosa.load(input_file)
filename = os.path.basename(input_file)
target = get_target(input_file)
output_file = '{}{}-{}'.format(output_path, filename.split('.')[0], target)
mel_spectrogram_of_audio = librosa.feature.melspectrogram(y=y, sr=sr).T
save_wav_to_npy(output_file, mel_spectrogram_of_audio)
def convert_and_save(real_audio_files, output_real, fake_audio_files, output_fake):
for file in real_audio_files:
wav_to_mel(file, output_real)
print(str(len(real_audio_files)) + ' real files converted to spectrogram')
for file in fake_audio_files:
wav_to_mel(file, output_fake)
print(str(len(fake_audio_files)) + ' fake files converted to spectrogram')
def split_title_line(title_text, max_words=5):
"""
A function that splits any string based on specific character
(returning it with the string), with maximum number of words on it
"""
seq = title_text.split()
return '\n'.join([' '.join(seq[i:i + max_words]) for i in range(0, len(seq), max_words)])
def plot_spectrogram(pred_spectrogram, path, title=None, split_title=False, target_spectrogram=None, max_len=None,
auto_aspect=False):
if max_len is not None:
target_spectrogram = target_spectrogram[:max_len]
pred_spectrogram = pred_spectrogram[:max_len]
if split_title:
title = split_title_line(title)
fig = plt.figure(figsize=(10, 8))
# Set common labels
fig.text(0.5, 0.18, title, horizontalalignment='center', fontsize=16)
# target spectrogram subplot
if target_spectrogram is not None:
ax1 = fig.add_subplot(311)
ax2 = fig.add_subplot(312)
if auto_aspect:
im = ax1.imshow(np.rot90(target_spectrogram), aspect='auto', interpolation='none')
else:
im = ax1.imshow(np.rot90(target_spectrogram), interpolation='none')
ax1.set_title('Target Mel-Spectrogram')
fig.colorbar(mappable=im, shrink=0.65, orientation='horizontal', ax=ax1)
ax2.set_title('Predicted Mel-Spectrogram')
else:
ax2 = fig.add_subplot(211)
if auto_aspect:
im = ax2.imshow(np.rot90(pred_spectrogram), aspect='auto', interpolation='none')
else:
im = ax2.imshow(np.rot90(pred_spectrogram), interpolation='none')
fig.colorbar(mappable=im, shrink=0.65, orientation='horizontal', ax=ax2)
plt.tight_layout()
plt.savefig(path, format='png')
plt.close()
def process_audio_files(filename, dirpath):
audio_array, sample_rate = librosa.load(os.path.join(dirpath, 'flac', filename), sr=16000)
trim_audio_array, index = librosa.effects.trim(audio_array)
mel_spec_array = melspectrogram(trim_audio_array, hparams=hparams).T
# mel_spec_array = librosa.feature.melspectrogram(y=trim_audio_array, sr=sample_rate, n_mels=model_params['num_freq_bin']).T
label_name = filename.split('_')[-1].split('.')[0]
if (label_name == 'bonafide') or ('target' in label_name):
label = 1
elif label_name == 'spoof':
label = 0
else:
label = None
if label is None:
print(f"Removing {filename} since it does not have label")
os.remove(os.path.join(dirpath, 'flac', filename))
return (mel_spec_array, label)
def convert_audio_to_processed_list(input_audio_array_list, filename, dirpath):
label_name = filename.split('_')[-1].split('.')[0]
out_list = []
if (label_name == 'spoof'):
audio_array_list = [input_audio_array_list[0]]
choose_random_one_ind = np.random.choice(np.arange(1, len(input_audio_array_list)))
audio_array_list.append(input_audio_array_list[choose_random_one_ind])
label = 0
elif (label_name == 'bonafide') or ('target' in label_name):
audio_array_list = input_audio_array_list
label = 1
else:
audio_array_list = [input_audio_array_list[0]]
label = None
for audio_array in audio_array_list:
trim_audio_array, index = librosa.effects.trim(audio_array)
mel_spec_array = melspectrogram(trim_audio_array, hparams=hparams).T
# mel_spec_array = librosa.feature.melspectrogram(y=trim_audio_array, sr=sample_rate, n_mels=model_params['num_freq_bin']).T
if label is None:
print(f"Removing {filename} since it does not have label")
os.remove(os.path.join(dirpath, 'flac', filename))
out_list.append([mel_spec_array, label])
return out_list
def process_audio_files_with_aug(filename, dirpath):
sr = 16000
audio_array, sample_rate = librosa.load(os.path.join(dirpath, 'flac', filename), sr=sr)
aug_crop = naa.CropAug(sampling_rate=sr)
audio_array_crop = aug_crop.augment(audio_array)
aug_loud = naa.LoudnessAug(loudness_factor=(2, 5))
audio_array_loud = aug_loud.augment(audio_array)
aug_noise = naa.NoiseAug(noise_factor=0.03)
audio_array_noise = aug_noise.augment(audio_array)
audio_array_list = [audio_array, audio_array_crop, audio_array_loud,
audio_array_noise]
out_list = convert_audio_to_processed_list(audio_array_list, filename, dirpath)
return out_list
def preprocess_and_save_audio_from_ray_parallel(dirpath, mode, recompute=False, dir_num=None, isaug=False):
if isaug:
preproc_filename = f'{mode}_preproc_aug.npy'
else:
preproc_filename = f'{mode}_preproc.npy'
# if mode != 'train':
# preproc_filename = f'{mode}_preproc.npy'
if dir_num is not None:
base_path = base_data_path[dir_num]
else:
base_path = base_data_path[0]
if not os.path.isfile(os.path.join(f'{base_path}/preprocessed_data', preproc_filename)) or recompute:
filenames = os.listdir(os.path.join(dirpath, 'flac'))
num_cores = multiprocessing.cpu_count()-1
if isaug:
precproc_list_saved = Parallel(n_jobs=num_cores)(
delayed(process_audio_files_with_aug)(filename, dirpath) for filename in tqdm(filenames))
# Flatten the list
print(f"******original len of preproc_list: {len(precproc_list_saved)}")
precproc_list = []
for i in range(len(precproc_list_saved)):
precproc_list.extend(precproc_list_saved[i])
# precproc_list = [item for sublist in precproc_list for item in sublist]
print(f"******flattened len of preproc_list: {len(precproc_list)}")
else:
precproc_list = Parallel(n_jobs=num_cores)(
delayed(process_audio_files)(filename, dirpath) for filename in tqdm(filenames))
precproc_list = [x for x in precproc_list if x[1] is not None]
if not os.path.isdir(f'{base_path}/preprocessed_data'):
os.mkdir(f'{base_path}/preprocessed_data')
np.save(os.path.join(f'{base_path}/preprocessed_data', preproc_filename), precproc_list)
else:
print("Preprocessing already done!")
def process_audio_files_inference(filename, dirpath, mode):
audio_array, sample_rate = librosa.load(os.path.join(dirpath, mode, filename), sr=16000)
trim_audio_array, index = librosa.effects.trim(audio_array)
mel_spec_array = melspectrogram(trim_audio_array, hparams=hparams).T
if mode == 'unlabeled':
return mel_spec_array
elif mode == 'real':
label = 1
elif mode == 'fake':
label = 0
return mel_spec_array, label
def preprocess_from_ray_parallel_inference(dirpath, mode, use_parallel=True):
filenames = os.listdir(os.path.join(dirpath, mode))
if use_parallel:
num_cores = multiprocessing.cpu_count()
preproc_list = Parallel(n_jobs=num_cores)(
delayed(process_audio_files_inference)(filename, dirpath, mode) for filename in tqdm(filenames))
else:
preproc_list = []
for filename in tqdm(filenames):
preproc_list.append(process_audio_files_inference(filename, dirpath, mode))
return preproc_list
def preprocess_and_save_audio_from_ray(dirpath, mode, recompute=False):
filenames = os.listdir(os.path.join(dirpath, 'flac'))
if not os.path.isfile(os.path.join(f'{base_data_path}/preprocessed_data', f'{mode}_preproc.npy')) or recompute:
precproc_list = []
for filename in tqdm(filenames):
audio_array, sample_rate = librosa.load(os.path.join(dirpath, 'flac', filename), sr=16000)
trim_audio_array, index = librosa.effects.trim(audio_array)
mel_spec_array = melspectrogram(trim_audio_array, hparams=hparams).T
# mel_spec_array = librosa.feature.melspectrogram(y=trim_audio_array, sr=sample_rate, n_mels=model_params['num_freq_bin']).T
label_name = filename.split('_')[-1].split('.')[0]
if label_name == 'bonafide':
label = 1
elif label_name == 'spoof':
label = 0
else:
label = None
if label is not None:
precproc_list.append((mel_spec_array, label))
if label is None:
print("Removing {filename} since it does not have label")
os.remove(os.path.join(dirpath, 'flac', filename))
if not os.path.isdir(f'{base_data_path}/preprocessed_data'):
os.mkdir(f'{base_data_path}/preprocessed_data')
np.save(os.path.join(f'{base_data_path}/preprocessed_data', f'{mode}_preproc.npy'), precproc_list)
# np.save(os.path.join(dirpath, 'preproc', 'preproc.npy'), precproc_list)
else:
print("Preprocessing already done!")
def preprocess_and_save_audio(dirpath, recompute=False):
filenames = os.listdir(os.path.join(dirpath, 'flac'))
if not os.path.isfile(os.path.join(dirpath, 'preproc', 'preproc.npy')) or recompute:
precproc_list = []
for filename in tqdm(filenames):
audio_array, sample_rate = librosa.load(os.path.join(dirpath, 'flac', filename), sr=16000)
trim_audio_array, index = librosa.effects.trim(audio_array)
mel_spec_array = librosa.feature.melspectrogram(y=trim_audio_array, sr=sample_rate,
n_mels=model_params['num_freq_bin']).T
label_name = filename.split('_')[-1].split('.')[0]
if label_name == 'bonafide':
label = 1
elif label_name == 'spoof':
label = 0
else:
label = None
if label is not None:
precproc_list.append((mel_spec_array, label))
if label is None:
print("Removing {filename} since it does not have label")
os.remove(os.path.join(dirpath, 'flac', filename))
if not os.path.isdir(os.path.join(dirpath, 'preproc')):
os.mkdir(os.path.join(dirpath, 'preproc'))
np.save(os.path.join(dirpath, 'preproc', 'preproc.npy'), precproc_list)
else:
print("Preprocessing already done!")
def describe_array(arr):
print(f"Mean duration: {arr.mean()}\n Standard Deviation: {arr.std()}\nNumber of Clips: {len(arr)}")
plt.hist(arr, bins=40)
plt.show()
def get_durations_from_dir(audio_dir, file_extension='.wav'):
durations = list()
for root, dirs, filenames in os.walk(audio_dir):
for file_name in filenames:
if file_extension in file_name:
file_path = os.path.join(root, file_name)
audio = AudioSegment.from_wav(file_path)
duration = audio.duration_seconds
durations.append(duration)
return np.array(durations)
def get_zero_pad(batch_input):
# find max length
max_length = np.max([len(x) for x in batch_input])
for i, arr in enumerate(batch_input):
curr_length = len(arr)
pad_length = max_length - curr_length
if len(arr.shape) > 1:
arr = np.concatenate([arr, np.zeros((pad_length, arr.shape[-1]))])
else:
arr = np.concatenate([arr, np.zeros((pad_length))])
batch_input[i] = arr
return batch_input
def truncate_array(batch_input):
min_arr_len = np.min([len(x) for x in batch_input])
for i, arr in enumerate(batch_input):
batch_input[i] = arr[:min_arr_len]
return batch_input
def random_truncate_array(batch_input):
min_arr_len = np.min([len(x) for x in batch_input])
for i, arr in enumerate(batch_input):
upper_limit_start_point = len(arr)-min_arr_len
if upper_limit_start_point > 0:
start_point = np.random.randint(0, upper_limit_start_point)
else:
start_point = 0
batch_input[i] = arr[start_point:(start_point+min_arr_len)]
return batch_input
class f1_score_callback(keras.callbacks.Callback):
def __init__(self, x_val_inp, y_val_inp, model_save_filename=None, save_model=True):
self.x_val = x_val_inp
self.y_val = y_val_inp
self.model_save_filename = model_save_filename
self.save_model = save_model
self._val_f1 = 0
def on_train_begin(self, logs={}):
self.f1_score_value = []
def on_epoch_end(self, epoch, logs={}):
y_val = self.y_val
datagen_val = DataGenerator(self.x_val, mode='test')
y_pred = self.model.predict_generator(datagen_val, use_multiprocessing=False, max_queue_size=50)
y_pred_labels = np.zeros((len(y_pred)))
y_pred_labels[y_pred.flatten() > 0.5] = 1
self._val_f1 = f1_score(y_val, y_pred_labels.astype(int))
print(f"val_f1: {self._val_f1:.4f}")
self.f1_score_value.append(self._val_f1)
if self.save_model:
if self._val_f1 >= max(self.f1_score_value):
print("F1 score has improved. Saving model.")
self.model.save(self.model_save_filename)
try:
foundations.log_metric('epoch_val_f1_score', self._val_f1)
foundations.log_metric('best_f1_score', max(self.f1_score_value))
except Exception as e:
print(e)
return
class DataGenerator(keras.utils.Sequence):
def __init__(self, x_set, y_set=None, sample_weights=None, batch_size=model_params['batch_size'], shuffle=False, mode='train'):
self.x, self.y = x_set, y_set
self.batch_size = batch_size
self.shuffle = shuffle
self.mode = mode
self.sample_weights = sample_weights
if self.mode != 'train':
self.shuffle = False
self.n = 0
self.max = self.__len__()
def __len__(self):
return int(np.ceil(len(self.x) / float(self.batch_size)))
def __getitem__(self, idx):
batch_x = self.x[idx * self.batch_size:(idx + 1) * self.batch_size]
batch_x = get_zero_pad(batch_x)
# batch_x = random_truncate_array(batch_x)
batch_x = np.array(batch_x)
batch_x = batch_x.reshape((len(batch_x), -1, hparams.num_mels))
if self.mode != 'test':
batch_y = self.y[idx * self.batch_size:(idx + 1) * self.batch_size]
# read your data here using the batch lists, batch_x and batch_y
if self.mode == 'train':
return np.array(batch_x), np.array(batch_y)
if self.mode == 'val':
return np.array(batch_x), np.array(batch_y)
if self.mode == 'test':
return np.array(batch_x)
def __next__(self):
if self.n >= self.max:
self.n = 0
result = self.__getitem__(self.n)
self.n += 1
return result
def customPooling(x):
target = x[1]
inputs = x[0]
maskVal = 0
# getting the mask by observing the model's inputs
mask = K.equal(inputs, maskVal)
mask = K.all(mask, axis=-1, keepdims=True)
# inverting the mask for getting the valid steps for each sample
mask = 1 - K.cast(mask, K.floatx())
# summing the valid steps for each sample
stepsPerSample = K.sum(mask, axis=1, keepdims=False)
# applying the mask to the target (to make sure you are summing zeros below)
target = target * mask
# calculating the mean of the steps (using our sum of valid steps as averager)
means = K.sum(target, axis=1, keepdims=False) / stepsPerSample
return means
def build_custom_convnet():
K.clear_session()
image_input = Input(shape=(None, model_params['num_freq_bin']), name='image_input')
num_conv_blocks = model_params['num_conv_blocks']
init_neurons = model_params['num_conv_filters']
spatial_dropout_fraction = model_params['spatial_dropout_fraction']
num_dense_layers = model_params['num_dense_layers']
num_dense_neurons = model_params['num_dense_neurons']
learning_rate = model_params['learning_rate']
convnet = []
convnet_5 = []
convnet_7 = []
for ly in range(0, num_conv_blocks):
if ly == 0:
convnet.append(Conv1D(init_neurons, 3, strides=1, activation='linear', padding='causal')(image_input))
convnet_5.append(Conv1D(init_neurons, 5, strides=1, activation='linear', padding='causal')(image_input))
convnet_7.append(Conv1D(init_neurons, 7, strides=1, activation='linear', padding='causal')(image_input))
else:
convnet.append(
Conv1D(init_neurons * (ly * 2), 3, strides=1, activation='linear', padding='causal')(convnet[ly - 1]))
convnet_5.append(
Conv1D(init_neurons * (ly * 2), 5, strides=1, activation='linear', padding='causal')(convnet_5[ly - 1]))
convnet_7.append(
Conv1D(init_neurons * (ly * 2), 7, strides=1, activation='linear', padding='causal')(convnet_7[ly - 1]))
convnet[ly] = LeakyReLU()(convnet[ly])
convnet_5[ly] = LeakyReLU()(convnet_5[ly])
convnet_7[ly] = LeakyReLU()(convnet_7[ly])
if model_params['residual_con'] > 0 and (ly - model_params['residual_con']) >= 0:
res_conv = Conv1D(init_neurons * (ly * 2), 1, strides=1, activation='linear', padding='same')(
convnet[ly - model_params['residual_con']])
convnet[ly] = Add(name=f'residual_con_3_{ly}')([convnet[ly], res_conv])
res_conv_5 = Conv1D(init_neurons * (ly * 2), 1, strides=1, activation='linear', padding='same')(
convnet_5[ly - model_params['residual_con']])
convnet_5[ly] = Add(name=f'residual_con_5_{ly}')([convnet_5[ly], res_conv_5])
res_conv_7 = Conv1D(init_neurons * (ly * 2), 1, strides=1, activation='linear', padding='same')(
convnet_7[ly - model_params['residual_con']])
convnet_7[ly] = Add(name=f'residual_con_7_{ly}')([convnet_7[ly], res_conv_7])
if ly < (num_conv_blocks-1):
convnet[ly] = SpatialDropout1D(spatial_dropout_fraction)(convnet[ly])
convnet_5[ly] = SpatialDropout1D(spatial_dropout_fraction)(convnet_5[ly])
convnet_7[ly] = SpatialDropout1D(spatial_dropout_fraction)(convnet_7[ly])
dense = Lambda(lambda x: customPooling(x))([image_input, convnet[ly]])
dense_5 = Lambda(lambda x: customPooling(x))([image_input, convnet_5[ly]])
dense_7 = Lambda(lambda x: customPooling(x))([image_input, convnet_7[ly]])
dense = Concatenate()([dense, dense_5, dense_7])
for layers in range(num_dense_layers):
dense = Dense(num_dense_neurons, activation='linear')(dense)
dense = BatchNormalization()(dense)
dense = LeakyReLU()(dense)
dense = Dropout(model_params['dense_dropout'])(dense)
output_layer = Dense(1)(dense)
output_layer = Activation('sigmoid')(output_layer)
model = Model(inputs=image_input, outputs=output_layer)
opt = optimizers.Adam(lr=learning_rate)
try:
model = multi_gpu_model(model, gpus=4)
except:
pass
model.compile(optimizer=opt, loss='binary_crossentropy', metrics=['accuracy'])
return model
class Discriminator_Model():
def __init__(self, load_pretrained=False, saved_model_name=None, real_test_mode=False):
if not os.path.exists(model_params['model_save_dir']):
os.makedirs(model_params['model_save_dir'])
if not load_pretrained:
self.model = build_custom_convnet()
self.model.summary()
else:
self.model = load_model(os.path.join(
f"./{model_params['model_save_dir']}", saved_model_name), custom_objects={'customPooling': customPooling})
self.model_name = f"saved_model_{'_'.join(str(v) for k,v in model_params.items())}.h5"
self.real_test_model_name = f"real_test_saved_model_{'_'.join(str(v) for k,v in model_params.items())}.h5"
self.model_save_filename = os.path.join(f"./{model_params['model_save_dir']}", self.model_name)
self.real_test_model_save_filename = os.path.join(
f"./{model_params['model_save_dir']}", self.real_test_model_name)
if real_test_mode:
if run_on_foundations:
self.real_test_data_dir = "/data/inference_data/"
else:
self.real_test_data_dir = "../data/inference_data/"
# preprocess the files
self.real_test_processed_data_real = preprocess_from_ray_parallel_inference(
self.real_test_data_dir, "real", use_parallel=True)
self.real_test_processed_data_fake = preprocess_from_ray_parallel_inference(self.real_test_data_dir,
"fake",
use_parallel=True)
self.real_test_processed_data = self.real_test_processed_data_real + self.real_test_processed_data_fake
self.real_test_processed_data = sorted(self.real_test_processed_data, key=lambda x: len(x[0]))
self.real_test_features = [x[0] for x in self.real_test_processed_data]
self.real_test_labels = [x[1] for x in self.real_test_processed_data]
print(f"Length of real_test_processed_data: {len(self.real_test_processed_data)}")
def train(self, xtrain, ytrain, xval, yval):
callbacks = []
tb = TensorBoard(log_dir='tflogs', write_graph=True, write_grads=False)
callbacks.append(tb)
try:
foundations.set_tensorboard_logdir('tflogs')
except:
print("foundations command not found")
es = EarlyStopping(monitor='val_loss', mode='min', patience=5, min_delta=0.0001,
verbose=1)
callbacks.append(tb)
callbacks.append(es)
rp = ReduceLROnPlateau(monitor='val_loss', factor=0.6, patience=2,
verbose=1)
callbacks.append(rp)
f1_callback = f1_score_callback(xval, yval, model_save_filename=self.model_save_filename)
callbacks.append(f1_callback)
class_weights = {1: 5, 0: 1}
train_generator = DataGenerator(xtrain, ytrain)
validation_generator = DataGenerator(xval, yval)
self.model.fit_generator(train_generator,
steps_per_epoch=len(train_generator),
epochs=model_params['epochs'],
validation_data=validation_generator,
callbacks=callbacks,
shuffle=False,
use_multiprocessing=True,
verbose=1,
class_weight=class_weights)
self.model = load_model(self.model_save_filename, custom_objects={'customPooling': customPooling})
try:
foundations.save_artifact(self.model_save_filename, key='trained_model.h5')
except:
print("foundations command not found")
def inference_on_real_data(self, threshold=0.5):
datagen_val = DataGenerator(self.real_test_features, mode='test', batch_size=1)
y_pred = self.model.predict_generator(datagen_val, use_multiprocessing=False, max_queue_size=50)
y_pred_labels = np.zeros((len(y_pred)))
y_pred_labels[y_pred.flatten() > threshold] = 1
acc_score = accuracy_score(self.real_test_labels, y_pred_labels)
f1_score_val = f1_score(self.real_test_labels, y_pred_labels)
return acc_score, f1_score_val
def get_labels_from_prob(self, y, threshold=0.5):
y_pred_labels = np.zeros((len(y)))
y = np.array(y)
if isinstance(threshold, list):
y_pred_labels[y.flatten() > threshold[0]] = 1
else:
y_pred_labels[y.flatten() > threshold] = 1
return y_pred_labels
def get_f1score_for_optimization(self, threshold, y_true, y_pred, ismin=False):
y_pred_labels = self.get_labels_from_prob(y_pred, threshold=threshold)
if ismin:
return - f1_score(y_true, y_pred_labels)
else:
return f1_score(y_true, y_pred_labels)
def predict_labels(self, x, threshold=0.5, raw_prob=False, batch_size=model_params['batch_size']):
test_generator = DataGenerator(x, mode='test', batch_size=batch_size)
y_pred = self.model.predict_generator(test_generator, steps=len(test_generator), max_queue_size=10)
print(y_pred)
if raw_prob:
return y_pred
else:
y_pred_labels = self.get_labels_from_prob(y_pred, threshold=threshold)
return y_pred_labels
def optimize_threshold(self, xtrain, ytrain, xval, yval):
ytrain_pred = self.predict_labels(xtrain, raw_prob=True)
yval_pred = self.predict_labels(xval, raw_prob=True)
self.opt_threshold = 0.5
ytrain_pred_labels = self.get_labels_from_prob(ytrain_pred, threshold=self.opt_threshold)
yval_pred_labels = self.get_labels_from_prob(yval_pred, threshold=self.opt_threshold)
train_f1_score = f1_score(ytrain_pred_labels, ytrain)
val_f1_score = f1_score(yval_pred_labels, yval)
print(f"train f1 score: {train_f1_score}, val f1 score: {val_f1_score}")
f1_train_partial = partial(self.get_f1score_for_optimization, y_true=ytrain.copy(),
y_pred=ytrain_pred.copy(), ismin=True)
n_searches = 50
dim_0 = Real(low=0.2, high=0.8, name='dim_0')
dimensions = [dim_0]
search_result = gp_minimize(func=f1_train_partial,
dimensions=dimensions,
acq_func='gp_hedge', # Expected Improvement.
n_calls=n_searches,
# n_jobs=n_cpu,
verbose=False)
self.opt_threshold = search_result.x
if isinstance(self.opt_threshold, list):
self.opt_threshold = self.opt_threshold[0]
self.optimum_threshold_filename = f"model_threshold_{'_'.join(str(v) for k, v in model_params.items())}.npy"
np.save(os.path.join(f"{model_params['model_save_dir']}", self.optimum_threshold_filename), self.opt_threshold)
train_f1_score = self.get_f1score_for_optimization(self.opt_threshold, y_true=ytrain, y_pred=ytrain_pred)
val_f1_score = self.get_f1score_for_optimization(self.opt_threshold, y_true=yval, y_pred=yval_pred)
print(f"optimized train f1 score: {train_f1_score}, optimized val f1 score: {val_f1_score}")
def evaluate(self, xtrain, ytrain, xval, yval, num_examples=1):
ytrain_pred = self.predict_labels(xtrain, raw_prob=True)
yval_pred = self.predict_labels(xval, raw_prob=True)
try:
self.optimum_threshold_filename = f"model_threshold_{'_'.join(str(v) for k, v in model_params.items())}.npy"
self.opt_threshold = np.load(os.path.join(
f"{model_params['model_save_dir']}", self.optimum_threshold_filename)).item()
print(f"loaded optimum threshold: {self.opt_threshold}")
except:
self.opt_threshold = 0.5
ytrain_pred_labels = self.get_labels_from_prob(ytrain_pred, threshold=self.opt_threshold)
yval_pred_labels = self.get_labels_from_prob(yval_pred, threshold=self.opt_threshold)
train_accuracy = accuracy_score(ytrain, ytrain_pred_labels)
val_accuracy = accuracy_score(yval, yval_pred_labels)
train_f1_score = f1_score(ytrain, ytrain_pred_labels)
val_f1_score = f1_score(yval, yval_pred_labels)
print(f"train accuracy: {train_accuracy}, train_f1_score: {train_f1_score},"
f"val accuracy: {val_accuracy}, val_f1_score: {val_f1_score} ")
try:
foundations.log_metric('train_accuracy', np.round(train_accuracy, 2))
foundations.log_metric('val_accuracy', np.round(val_accuracy, 2))
foundations.log_metric('train_f1_score', np.round(train_f1_score, 2))
foundations.log_metric('val_f1_score', np.round(val_f1_score, 2))
foundations.log_metric('optimum_threshold', np.round(self.opt_threshold, 2))
except Exception as e:
print(e)
# True Positive Example
ind_tp = np.argwhere(np.equal((yval_pred_labels + yval).astype(int), 2)).reshape(-1, )
# True Negative Example
ind_tn = np.argwhere(np.equal((yval_pred_labels + yval).astype(int), 0)).reshape(-1, )
# False Positive Example
ind_fp = np.argwhere(np.greater(yval_pred_labels, yval)).reshape(-1, )
# False Negative Example
ind_fn = np.argwhere(np.greater(yval, yval_pred_labels)).reshape(-1, )
path_to_save_spetrograms = './spectrograms'
if not os.path.isdir(path_to_save_spetrograms):
os.makedirs(path_to_save_spetrograms)
specs_saved = os.listdir(path_to_save_spetrograms)
if len(specs_saved) > 0:
for file_ in specs_saved:
os.remove(os.path.join(path_to_save_spetrograms, file_))
ind_random_tp = np.random.choice(ind_tp, num_examples).reshape(-1,)
tp_x = [xtrain[i] for i in ind_random_tp]
ind_random_tn = np.random.choice(ind_tn, num_examples).reshape(-1,)
tn_x = [xtrain[i] for i in ind_random_tn]
ind_random_fp = np.random.choice(ind_fp, num_examples).reshape(-1,)
fp_x = [xtrain[i] for i in ind_random_fp]
ind_random_fn = np.random.choice(ind_fn, num_examples).reshape(-1,)
fn_x = [xtrain[i] for i in ind_random_fn]
print("Plotting spectrograms to show what the hell the model has learned")
for i in range(num_examples):
plot_spectrogram(tp_x[i], path=os.path.join(path_to_save_spetrograms, f'true_positive_{i}.png'))
plot_spectrogram(tn_x[i], path=os.path.join(path_to_save_spetrograms, f'true_negative_{i}.png'))
plot_spectrogram(fp_x[i], path=os.path.join(path_to_save_spetrograms, f'false_positive_{i}.png'))
plot_spectrogram(fn_x[i], path=os.path.join(path_to_save_spetrograms, f'fale_negative_{i}.png'))
try:
foundations.save_artifact(os.path.join(path_to_save_spetrograms,
f'true_positive_{i}.png'), key='true_positive_example')
foundations.save_artifact(os.path.join(path_to_save_spetrograms,
f'true_negative_{i}.png'), key='true_negative_example')
foundations.save_artifact(os.path.join(path_to_save_spetrograms,
f'false_positive_{i}.png'), key='false_positive_example')
foundations.save_artifact(os.path.join(path_to_save_spetrograms,
f'fale_negative_{i}.png'), key='false_negative_example')
except Exception as e:
print(e)
| [
"davidvhappel1@gmail.com"
] | davidvhappel1@gmail.com |
05dd1d8270322ebb4719a83756fb0b5d1e7ce34b | 9580f5194f3251bebe44c97df2c86e6ea73cfd83 | /app/__init__.py | 696477f66f4e4f7fd6d4a4be3e27a946a49f68f0 | [] | no_license | Sakuoz/Myblog | ada2659d2a2ea717879faa0b3383dbb59890c5d3 | 7ed454b2b7b2e3eefd7788630d95b426c0c3a5da | refs/heads/master | 2021-01-12T06:22:31.062389 | 2017-03-06T09:50:37 | 2017-03-06T09:50:37 | 77,349,328 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,379 | py | import os
from flask import Flask
from flask.ext.sqlalchemy import SQLAlchemy
from flask.ext.login import LoginManager
from flask.ext.openid import OpenID
from config import basedir, ADMINS, MAIL_SERVER, MAIL_PORT, MAIL_USERNAME, MAIL_PASSWORD
app = Flask(__name__)
app.config.from_object('config')
db = SQLAlchemy(app) # 创建对象(数据库)
lm = LoginManager()
lm.init_app(app)
lm.login_view = 'login'
lm.login_message = '请登录!!'
oid = OpenID(app, os.path.join(basedir, 'tmp'))
if not app.debug:
import logging
from logging.handlers import SMTPHandler
credentials = None
if MAIL_USERNAME or MAIL_PASSWORD:
credentials = (MAIL_USERNAME, MAIL_PASSWORD)
mail_handler = SMTPHandler((MAIL_SERVER, MAIL_PORT), 'no-reply@' + MAIL_SERVER, ADMINS, 'myblog failure', credentials)
mail_handler.setLevel(logging.ERROR)
app.logger.addHandler(mail_handler)
if not app.debug:
import logging
from logging.handlers import RotatingFileHandler
file_handler = RotatingFileHandler('tmp/myblog.log', 'a', 1 * 1024 * 1024, 10)
file_handler.setFormatter('%(asctime)s %(levelname)s: %(message)s [in %(pathname)s:%(lineno)d]')
app.logger.setLevel(logging.INFO)
app.logger.addHandler(file_handler)
app.logger.info('myblog startup')
from app import views, models # 导入模块
| [
"sakuoz@163.com"
] | sakuoz@163.com |
4d84009041db6676138b1de259e5c129c4ec8dbe | abbab4a61a530bdce6959264cb98a6f61eb7d284 | /src/social/migrations/0006_auto_20180621_1858.py | 91f54c4d7a4a3299dfe1bd57a0f25a1ab3996a61 | [] | no_license | KozhonazarRysbaev/kvn | f41ff87ea73c561a15de197e7e3ccb513e1fd85b | 24711afbd48baf585d7748863ae20096197209e6 | refs/heads/master | 2020-12-15T09:47:28.191233 | 2018-09-27T20:07:47 | 2018-09-27T20:07:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 456 | py | # Generated by Django 2.0.6 on 2018-06-21 12:58
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('social', '0005_auto_20180620_2211'),
]
operations = [
migrations.AlterField(
model_name='post',
name='title',
field=models.CharField(blank=True, db_index=True, max_length=250, null=True, verbose_name='Заголовок'),
),
]
| [
"kairatomurbek2@gmail.com"
] | kairatomurbek2@gmail.com |
9a0bef543cc1d04d8dfb77e6c44b5ab77b83c037 | b30996c01747e6259501e1fd6647724addfce7dc | /src/features/feature_extractor.py | d9eb8bcbd8e9b823ee1ac9126c808534f7ec45e5 | [] | no_license | lichenyu/Two-stage_Popularity_Prediction | 23a3a9161a34540e6a6df906d0f0009bf86d18cf | e4263747514a6184bb3d8cdaf6380c05a68ca291 | refs/heads/master | 2021-01-01T05:43:41.063932 | 2016-05-14T03:30:08 | 2016-05-14T03:30:08 | 57,261,511 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 36,830 | py | # -*- coding: utf-8 -*-
import json
import re
import jieba
import jieba.posseg as pseg
from snownlp import SnowNLP
from datetime import date
from datetime import timedelta
import data.data_extractor
# extract features for videos published on certain date
# video features
# user features
# topic features
# text features
# history features
# for each level of vc30, extract the tags
def get_tags_bylevel(level_file, json_files, out_file_prefix):
# get level for each video
vid_level_map = {}
level_fd = open(level_file, 'r')
for line in level_fd.readlines():
fields = line.strip().split('\t', -1)
# vid, level_vc7, level_vc30, (level_vc7, level_vc30)
vid_level_map[fields[0]] = fields[2]
level_fd.close()
out_l1_fd = open(out_file_prefix + '_level1', 'w')
out_l2_fd = open(out_file_prefix + '_level2', 'w')
out_l3_fd = open(out_file_prefix + '_level3', 'w')
out_l4_fd = open(out_file_prefix + '_level4', 'w')
# read json each day, overwrite meta-data in the map, check and output
for json_file in json_files:
vid_tags_map = {}
json_fd = open(json_file, 'r')
for line in json_fd.readlines():
video_metadata = json.loads(line.strip())
vid_tags_map[video_metadata['id']] = video_metadata['tags']
json_fd.close()
for vid in vid_tags_map.keys():
if 0 < len(vid_tags_map[vid]) and vid_level_map.has_key(vid):
if '1' == vid_level_map[vid]:
out_fd = out_l1_fd
elif '2' == vid_level_map[vid]:
out_fd = out_l2_fd
elif '3' == vid_level_map[vid]:
out_fd = out_l3_fd
elif '4' == vid_level_map[vid]:
out_fd = out_l4_fd
else:
print('Impossible')
for tag in vid_tags_map[vid].strip().split(',', -1):
out_fd.write(vid + '\t')
out_fd.write(tag.encode('utf-8'))
out_fd.write('\n')
else:
continue
out_l1_fd.close()
out_l2_fd.close()
out_l3_fd.close()
out_l4_fd.close()
# count tags for each level
def count_tags(in_file, out_file):
tag_count_map = {}
total_count = 0
in_fd = open(in_file, 'r')
for line in in_fd.readlines():
fields = line.strip().split('\t', -1)
# vid, tag
if tag_count_map.has_key(fields[1]):
tag_count_map[fields[1]] = tag_count_map[fields[1]] + 1
else:
tag_count_map[fields[1]] = 1
total_count = total_count + 1
in_fd.close()
sorted_map = sorted(tag_count_map.items(), lambda i1, i2: cmp(i1[1], i2[1]), reverse = True)
out_fd = open(out_file, 'w')
for item in sorted_map:
out_fd.write(item[0] + '\t' + str(item[1]) + '\t' + '%.04f' % (item[1] * 100. / total_count) + '\n')
out_fd.close()
# get tag list for each level
def get_taglist(in_files, out_files):
level1_tag_list = []
level2_tag_list = []
level3_tag_list = []
level4_tag_list = []
level1_tag_set = set()
level2_tag_set = set()
level3_tag_set = set()
level4_tag_set = set()
# iterate level1 to level4 in files
for i in range(0, 4):
tag_list = eval('level' + str(i + 1) + '_tag_list')
tag_set = eval('level' + str(i + 1) + '_tag_set')
in_fd = open(in_files[i], 'r')
lines = in_fd.readlines()
for j in range(0, 200):
fields = lines[j].strip().split('\t', -1)
# tag, count, pct
tag_list.append((fields[0], lines[j]))
tag_set.add(fields[0])
in_fd.close
# iterate each tag in each level
for i in range(0, 4):
out_fd = open(out_files[i], 'w')
tag_list = eval('level' + str(i % 4 + 1) + '_tag_list')
tag_set1 = eval('level' + str((i + 1) % 4 + 1) + '_tag_set')
tag_set2 = eval('level' + str((i + 2) % 4 + 1) + '_tag_set')
tag_set3 = eval('level' + str((i + 3) % 4 + 1) + '_tag_set')
for item in tag_list:
if False == (item[0] in tag_set1) and False == (item[0] in tag_set2) and False == (item[0] in tag_set3):
out_fd.write(item[1])
else:
if 0 == i:
if True == (item[0] in tag_set1) and False == (item[0] in tag_set2) and False == (item[0] in tag_set3):
out_fd.write(item[1])
elif 3 == i:
if False == (item[0] in tag_set1) and False == (item[0] in tag_set2) and True == (item[0] in tag_set3):
out_fd.write(item[1])
else:
if True == (item[0] in tag_set1) and False == (item[0] in tag_set2) and False == (item[0] in tag_set3):
out_fd.write(item[1])
elif False == (item[0] in tag_set1) and False == (item[0] in tag_set2) and True == (item[0] in tag_set3):
out_fd.write(item[1])
out_fd.close()
def get_titlewords_bylevel(level_file, json_files, out_file_prefix):
# get level for each video
vid_level_map = {}
level_fd = open(level_file, 'r')
for line in level_fd.readlines():
fields = line.strip().split('\t', -1)
# vid, level_vc7, level_vc30, (level_vc7, level_vc30)
vid_level_map[fields[0]] = fields[2]
level_fd.close()
out_l1_fd = open(out_file_prefix + '_level1', 'w')
out_l2_fd = open(out_file_prefix + '_level2', 'w')
out_l3_fd = open(out_file_prefix + '_level3', 'w')
out_l4_fd = open(out_file_prefix + '_level4', 'w')
# read json each day, overwrite meta-data in the map, check and output
for json_file in json_files:
vid_title_map = {}
json_fd = open(json_file, 'r')
for line in json_fd.readlines():
video_metadata = json.loads(line.strip())
vid_title_map[video_metadata['id']] = video_metadata['title']
json_fd.close()
for vid in vid_title_map.keys():
if 0 < len(vid_title_map[vid]) and vid_level_map.has_key(vid):
if '1' == vid_level_map[vid]:
out_fd = out_l1_fd
elif '2' == vid_level_map[vid]:
out_fd = out_l2_fd
elif '3' == vid_level_map[vid]:
out_fd = out_l3_fd
elif '4' == vid_level_map[vid]:
out_fd = out_l4_fd
else:
print('Impossible')
for word in jieba.lcut(vid_title_map[vid].strip()):
out_fd.write(vid + '\t')
out_fd.write(word.replace('\n', ' ').encode('unicode-escape'))
out_fd.write('\n')
else:
continue
out_l1_fd.close()
out_l2_fd.close()
out_l3_fd.close()
out_l4_fd.close()
def count_titlewords(in_file, out_file):
titleword_count_map = {}
total_count = 0
in_fd = open(in_file, 'r')
for line in in_fd.readlines():
fields = line.strip().split('\t', -1)
# vid, titleword
if 2 > len(fields):
continue
if titleword_count_map.has_key(fields[1]):
titleword_count_map[fields[1]] = titleword_count_map[fields[1]] + 1
else:
titleword_count_map[fields[1]] = 1
total_count = total_count + 1
in_fd.close()
sorted_map = sorted(titleword_count_map.items(), lambda i1, i2: cmp(i1[1], i2[1]), reverse = True)
out_fd = open(out_file, 'w')
for item in sorted_map:
#out_fd.write(item[0].decode('unicode-escape').encode('utf-8'))
out_fd.write(item[0] + '\t' + str(item[1]) + '\t' + '%.04f' % (item[1] * 100. / total_count) + '\n')
out_fd.close()
def get_titlewordlist(in_files, out_files):
level1_titleword_list = []
level2_titleword_list = []
level3_titleword_list = []
level4_titleword_list = []
level1_titleword_set = set()
level2_titleword_set = set()
level3_titleword_set = set()
level4_titleword_set = set()
# iterate level1 to level4 in files
for i in range(0, 4):
titleword_list = eval('level' + str(i + 1) + '_titleword_list')
titleword_set = eval('level' + str(i + 1) + '_titleword_set')
in_fd = open(in_files[i], 'r')
lines = in_fd.readlines()
for j in range(0, 500):
fields = lines[j].strip().split('\t', -1)
# titleword, count, pct
titleword_list.append((fields[0], lines[j]))
titleword_set.add(fields[0])
in_fd.close
# iterate each titleword in each level
for i in range(0, 4):
out_fd = open(out_files[i], 'w')
titleword_list = eval('level' + str(i % 4 + 1) + '_titleword_list')
titleword_set1 = eval('level' + str((i + 1) % 4 + 1) + '_titleword_set')
titleword_set2 = eval('level' + str((i + 2) % 4 + 1) + '_titleword_set')
titleword_set3 = eval('level' + str((i + 3) % 4 + 1) + '_titleword_set')
for item in titleword_list:
if False == (item[0] in titleword_set1) and False == (item[0] in titleword_set2) and False == (item[0] in titleword_set3):
out_fd.write(item[1])
else:
if 0 == i:
if True == (item[0] in titleword_set1) and False == (item[0] in titleword_set2) and False == (item[0] in titleword_set3):
out_fd.write(item[1])
elif 3 == i:
if False == (item[0] in titleword_set1) and False == (item[0] in titleword_set2) and True == (item[0] in titleword_set3):
out_fd.write(item[1])
else:
if True == (item[0] in titleword_set1) and False == (item[0] in titleword_set2) and False == (item[0] in titleword_set3):
out_fd.write(item[1])
elif False == (item[0] in titleword_set1) and False == (item[0] in titleword_set2) and True == (item[0] in titleword_set3):
out_fd.write(item[1])
out_fd.close()
# ------------------------- get topic resources above -------------------------
# uid, regist_time, is_verified, is_vip, videos_count, vv_count, favorites_count, playlists_count, statuses_count, followers_count (subscribe_count), following_count
# extract all users in the json file each day
def get_user_info(json_path, date_str, out_file):
cur_date = date(int(date_str[0 : 4]), int(date_str[5 : 7]), int(date_str[8 : 10]))
json_fd = open(json_path + date_str, 'r')
out_fd = open(out_file, 'w')
for line in json_fd.readlines():
user_metadata = json.loads(line.strip())
if 0 == len(user_metadata):
continue
out_fd.write(user_metadata['id'])
if 0 < len(user_metadata['regist_time']):
reg_date = date(int(user_metadata['regist_time'][0 : 4]), int(user_metadata['regist_time'][5 : 7]), int(user_metadata['regist_time'][8 : 10]))
out_fd.write('\t' + str((cur_date - reg_date).days))
else:
out_fd.write('\t0')
if 0 < user_metadata['is_verified']:
out_fd.write('\tTrue')
else:
out_fd.write('\tFalse')
out_fd.write('\t' + str(user_metadata['is_vip']))
out_fd.write('\t' + str(user_metadata['videos_count']))
out_fd.write('\t' + str(user_metadata['vv_count']))
out_fd.write('\t' + str(user_metadata['favorites_count']))
out_fd.write('\t' + str(user_metadata['playlists_count']))
out_fd.write('\t' + str(user_metadata['statuses_count']))
if user_metadata['followers_count'] >= user_metadata['subscribe_count']:
out_fd.write('\t' + str(user_metadata['followers_count']))
else:
out_fd.write('\t' + str(user_metadata['subscribe_count']))
out_fd.write('\t' + str(user_metadata['following_count']))
out_fd.write('\n')
json_fd.close()
out_fd.close()
def count_sourcename(json_path, date_strs, out_file):
name_count_map = {}
total_count = 0
for date_str in date_strs:
first_date = date(int(date_str[0 : 4]), int(date_str[5 : 7]), int(date_str[8 : 10]))
day_delta = timedelta(days = 29)
last_date = first_date + day_delta
json_fd = open(json_path + str(first_date) + '_' + str(last_date), 'r')
for line in json_fd.readlines():
video_metadata = json.loads(line.strip())
name = video_metadata['source']['name']
if name_count_map.has_key(name):
name_count_map[name] = name_count_map[name] + 1
else:
name_count_map[name] = 1
total_count = total_count + 1
json_fd.close()
sorted_map = sorted(name_count_map.items(), lambda i1, i2: cmp(i1[1], i2[1]), reverse = True)
out_fd = open(out_file, 'w')
for item in sorted_map:
out_fd.write(item[0].encode('utf-8'))
out_fd.write('\t' + str(item[1]) + '\t' + '%.04f' % (item[1] * 100. / total_count) + '\n')
out_fd.close()
# vid, category, duration, published_tod, len(streamtypes), copyright_type, public_type, source[name], user[ID]
# l1-4_tag_count, l1-4_titleword_count
# title: len, cn, n, v, adj, adv, prep, num, eng, punc, senti;
# for each set of videos, check the json of last two observation days (because for data clean and the cat may change)
def get_video_info(vci_file, sourcename_file, tag_path_prefix, titleword_path_prefix, json_path, date_str, video_properties_file, content_topic_file, textual_analysis_file):
# get vids for one-day video set
vid_set = set()
vci_fd = open(vci_file, 'r')
for line in vci_fd.readlines():
fields = line.strip().split('\t', -1)
# vid, vci1, vci2, ..., vci30
vid_set.add(fields[0])
vci_fd.close()
# load source name list (top 15)
sn_set = set()
sn_fd = open(sourcename_file, 'r')
for _ in range(0, 15):
line = sn_fd.readline()
fields = line.strip().split('\t', -1)
# name, count, pct
sn_set.add(fields[0])
sn_fd.close()
# load tag list for each level
l1_tag_set = set()
l2_tag_set = set()
l3_tag_set = set()
l4_tag_set = set()
for i in range(1, 1 + 4):
tag_set = eval('l' + str(i) + '_tag_set')
tag_fd = open(tag_path_prefix + str(i), 'r')
for line in tag_fd.readlines():
fields = line.strip().split('\t', -1)
# tag, count, pct
tag_set.add(fields[0])
tag_fd.close()
# load titleword list for each level
l1_titleword_set = set()
l2_titleword_set = set()
l3_titleword_set = set()
l4_titleword_set = set()
for i in range(1, 1 + 4):
titleword_set = eval('l' + str(i) + '_titleword_set')
titleword_fd = open(titleword_path_prefix + str(i), 'r')
for line in titleword_fd.readlines():
fields = line.strip().split('\t', -1)
# unicode(titleword), count, pct
titleword_set.add(fields[0])
titleword_fd.close()
# PoS label set
noun_set = set(['n', 'nr', 'nr1', 'nr2', 'nrj', 'nrf', 'ns', 'nsf', 'nt', 'nz', 'nl', 'ng', 'vn', 'an'])
verb_set = set(['v', 'vshi', 'vyou', 'vf', 'vx', 'vi', 'vl', 'vg'])
adjective_set = set(['a', 'ag', 'al'])
adverb_set = set(['d', 'vd', 'ad'])
preposition_set = set(['p', 'pba', 'pbei'])
numeral_set = set(['m', 'mq'])
eng_set = set(['eng'])
punctuation_set = set(['x', 'xe', 'xs', 'xm', 'xu', 'w', 'wkz', 'wky', 'wyz', 'wyy', 'wj', 'ww', 'wt', 'wd', 'wf', 'wn', 'wm', 'ws', 'wp', 'wb', 'wh'])
re_cnchar = re.compile(ur'[\u4e00-\u9fa5]')
# for the last observation date
first_date = date(int(date_str[0 : 4]), int(date_str[5 : 7]), int(date_str[8 : 10]))
day_delta = timedelta(days = 29)
last_date = first_date + day_delta
json_fd = open(json_path + str(first_date) + '_' + str(last_date), 'r')
video_properties_fd = open(video_properties_file, 'w')
content_topic_fd = open(content_topic_file, 'w')
textual_analysis_fd = open(textual_analysis_file, 'w')
for line in json_fd.readlines():
video_metadata = json.loads(line.strip())
if video_metadata['id'] in vid_set:
vid_set.remove(video_metadata['id'])
# video properties
video_properties_fd.write(video_metadata['id'])
video_properties_fd.write('\t' + video_metadata['category'].encode('utf-8'))
if None == video_metadata['duration']:
video_properties_fd.write('\t0')
else:
video_properties_fd.write('\t' + video_metadata['duration'])
video_properties_fd.write('\t' + video_metadata['published'][11:13])
video_properties_fd.write('\t' + str(len(video_metadata['streamtypes'])))
video_properties_fd.write('\t' + video_metadata['copyright_type'])
video_properties_fd.write('\t' + video_metadata['public_type'])
if video_metadata['source']['name'].encode('utf-8') in sn_set:
video_properties_fd.write('\t' + video_metadata['source']['name'].encode('utf-8'))
else:
video_properties_fd.write('\tothers')
video_properties_fd.write('\t' + video_metadata['user']['id'] + '\n')
# content topic
t1 = t2 = t3 = t4 = 0
for cur_tag in video_metadata['tags'].strip().split(',', -1):
if cur_tag.encode('utf-8') in l1_tag_set:
t1 = t1 + 1
if cur_tag.encode('utf-8') in l2_tag_set:
t2 = t2 + 1
if cur_tag.encode('utf-8') in l3_tag_set:
t3 = t3 + 1
if cur_tag.encode('utf-8') in l4_tag_set:
t4 = t4 + 1
w1 = w2 = w3 = w4 = 0
for cur_titleword in jieba.lcut(video_metadata['title'].strip()):
if cur_titleword.encode('unicode-escape') in l1_titleword_set:
w1 = w1 + 1
if cur_titleword.encode('unicode-escape') in l2_titleword_set:
w2 = w2 + 1
if cur_titleword.encode('unicode-escape') in l3_titleword_set:
w3 = w3 + 1
if cur_titleword.encode('unicode-escape') in l4_titleword_set:
w4 = w4 + 1
content_topic_fd.write(video_metadata['id'])
content_topic_fd.write('\t' + str(t1) + '\t' + str(t2) + '\t' + str(t3) + '\t' + str(t4))
content_topic_fd.write('\t' + str(w1) + '\t' + str(w2) + '\t' + str(w3) + '\t' + str(w4))
content_topic_fd.write('\n')
# textual analysis
cur_title = video_metadata['title']
title_len = len(cur_title)
title_cnchar_len = len(re_cnchar.findall(cur_title))
noun_count = 0
verb_count = 0
adjective_count = 0
adverb_count = 0
preposition_count = 0
numeral_count = 0
eng_count = 0
punctuation_count = 0
words = pseg.cut(cur_title)
word_count = 0
for word, flag in words:
if flag in noun_set:
noun_count = noun_count + 1
if flag in verb_set:
verb_count = verb_count + 1
if flag in adjective_set:
adjective_count = adjective_count + 1
if flag in adverb_set:
adverb_count = adverb_count + 1
if flag in preposition_set:
preposition_count = preposition_count + 1
if flag in numeral_set:
numeral_count = numeral_count + 1
if flag in eng_set:
eng_count = eng_count + 1
if flag in punctuation_set:
punctuation_count = punctuation_count + 1
word_count = word_count + 1
title_senti = SnowNLP(cur_title).sentiments
cur_des = video_metadata['description']
des_len = len(cur_des)
if 0 == des_len:
des_senti = 0.5
else:
des_senti = SnowNLP(cur_des).sentiments
cur_tags = video_metadata['tags']
tags_count = len(cur_tags.strip().split(',', -1))
if 0 == len(cur_tags):
tags_senti = 0.5
else:
tags_senti = SnowNLP(cur_tags).sentiments
textual_analysis_fd.write(video_metadata['id'])
textual_analysis_fd.write('\t%d\t%d\t%0.4f\t%d\t%0.4f\t%d\t%0.4f\t%d\t%0.4f\t%d\t%0.4f\t%d\t%0.4f\t%d\t%0.4f\t%d\t%0.4f\t%d\t%0.4f\t%0.8f' %
(title_len, title_cnchar_len, 1. * title_cnchar_len / title_len,
noun_count, 1. * noun_count / word_count,
verb_count, 1. * verb_count / word_count,
adjective_count, 1. * adjective_count / word_count,
adverb_count, 1. * adverb_count / word_count,
preposition_count, 1. * preposition_count / word_count,
numeral_count, 1. * numeral_count / word_count,
eng_count, 1. * eng_count / word_count,
punctuation_count, 1. * punctuation_count / word_count,
title_senti))
textual_analysis_fd.write('\t%d\t%0.8f\t%d\t%0.8f' % (des_len, des_senti, tags_count, tags_senti))
textual_analysis_fd.write('\n')
json_fd.close()
# for the second to the last observation date
day_delta = timedelta(days = 28)
last_date = first_date + day_delta
json_fd = open(json_path + str(first_date) + '_' + str(last_date), 'r')
for line in json_fd.readlines():
video_metadata = json.loads(line.strip())
if video_metadata['id'] in vid_set:
# video properties
video_properties_fd.write(video_metadata['id'])
video_properties_fd.write('\t' + video_metadata['category'].encode('utf-8'))
if None == video_metadata['duration']:
video_properties_fd.write('\t0')
else:
video_properties_fd.write('\t' + video_metadata['duration'])
video_properties_fd.write('\t' + video_metadata['published'][11:13])
video_properties_fd.write('\t' + str(len(video_metadata['streamtypes'])))
video_properties_fd.write('\t' + video_metadata['copyright_type'])
video_properties_fd.write('\t' + video_metadata['public_type'])
if video_metadata['source']['name'].encode('utf-8') in sn_set:
video_properties_fd.write('\t' + video_metadata['source']['name'].encode('utf-8'))
else:
video_properties_fd.write('\tothers')
video_properties_fd.write('\t' + video_metadata['user']['id'] + '\n')
# content topic
t1 = t2 = t3 = t4 = 0
for cur_tag in video_metadata['tags'].strip().split(',', -1):
if cur_tag in l1_tag_set:
t1 = t1 + 1
if cur_tag in l2_tag_set:
t2 = t2 + 1
if cur_tag in l3_tag_set:
t3 = t3 + 1
if cur_tag in l4_tag_set:
t4 = t4 + 1
w1 = w2 = w3 = w4 = 0
for cur_titleword in jieba.lcut(video_metadata['title'].strip()):
if cur_titleword.encode('unicode-escape') in l1_titleword_set:
w1 = w1 + 1
if cur_titleword.encode('unicode-escape') in l2_titleword_set:
w2 = w2 + 1
if cur_titleword.encode('unicode-escape') in l3_titleword_set:
w3 = w3 + 1
if cur_titleword.encode('unicode-escape') in l4_titleword_set:
w4 = w4 + 1
content_topic_fd.write(video_metadata['id'])
content_topic_fd.write('\t' + str(t1) + '\t' + str(t2) + '\t' + str(t3) + '\t' + str(t4))
content_topic_fd.write('\t' + str(w1) + '\t' + str(w2) + '\t' + str(w3) + '\t' + str(w4))
content_topic_fd.write('\n')
# textual analysis
cur_title = video_metadata['title']
title_len = len(cur_title)
title_cnchar_len = len(re_cnchar.findall(cur_title))
noun_count = 0
verb_count = 0
adjective_count = 0
adverb_count = 0
preposition_count = 0
numeral_count = 0
eng_count = 0
punctuation_count = 0
words = pseg.cut(cur_title)
word_count = 0
for word, flag in words:
if flag in noun_set:
noun_count = noun_count + 1
if flag in verb_set:
verb_count = verb_count + 1
if flag in adjective_set:
adjective_count = adjective_count + 1
if flag in adverb_set:
adverb_count = adverb_count + 1
if flag in preposition_set:
preposition_count = preposition_count + 1
if flag in numeral_set:
numeral_count = numeral_count + 1
if flag in eng_set:
eng_count = eng_count + 1
if flag in punctuation_set:
punctuation_count = punctuation_count + 1
word_count = word_count + 1
title_senti = SnowNLP(cur_title).sentiments
cur_des = video_metadata['description']
des_len = len(cur_des)
if 0 == des_len:
des_senti = 0.5
else:
des_senti = SnowNLP(cur_des).sentiments
cur_tags = video_metadata['tags']
tags_count = len(cur_tags.strip().split(',', -1))
if 0 == len(cur_tags):
tags_senti = 0.5
else:
tags_senti = SnowNLP(cur_tags).sentiments
textual_analysis_fd.write(video_metadata['id'])
textual_analysis_fd.write('\t%d\t%d\t%0.4f\t%d\t%0.4f\t%d\t%0.4f\t%d\t%0.4f\t%d\t%0.4f\t%d\t%0.4f\t%d\t%0.4f\t%d\t%0.4f\t%d\t%0.4f\t%0.8f' %
(title_len, title_cnchar_len, 1. * title_cnchar_len / title_len,
noun_count, 1. * noun_count / word_count,
verb_count, 1. * verb_count / word_count,
adjective_count, 1. * adjective_count / word_count,
adverb_count, 1. * adverb_count / word_count,
preposition_count, 1. * preposition_count / word_count,
numeral_count, 1. * numeral_count / word_count,
eng_count, 1. * eng_count / word_count,
punctuation_count, 1. * punctuation_count / word_count,
title_senti))
textual_analysis_fd.write('\t%d\t%0.8f\t%d\t%0.8f' % (des_len, des_senti, tags_count, tags_senti))
textual_analysis_fd.write('\n')
json_fd.close()
video_properties_fd.close()
content_topic_fd.close()
textual_analysis_fd.close()
# vci1-7, vci_rate1-7, burst, vc, comment, favorite, up, down
def get_historical_populairty(vci_file, json_path, date_str, historical_popularity_file):
# get <vid, vci> for one-day video set
vid_vci_map = {}
vci_fd = open(vci_file, 'r')
for line in vci_fd.readlines():
fields = line.strip().split('\t', -1)
# vid, vci1, vci2, ..., vci30
vid_vci_map[fields[0]] = []
for i in range(1, 1 + 7):
vid_vci_map[fields[0]].append(int(fields[i]))
vci_fd.close()
# for the last observation date
first_date = date(int(date_str[0 : 4]), int(date_str[5 : 7]), int(date_str[8 : 10]))
day_delta = timedelta(days = 6)
last_date = first_date + day_delta
json_fd = open(json_path + str(first_date) + '_' + str(last_date), 'r')
historical_popularity_fd = open(historical_popularity_file, 'w')
for line in json_fd.readlines():
video_metadata = json.loads(line.strip())
if video_metadata['id'] in vid_vci_map.keys():
historical_popularity_fd.write(video_metadata['id'])
for vci in vid_vci_map[video_metadata['id']]:
historical_popularity_fd.write('\t%d' % vci)
s = sum(vid_vci_map[video_metadata['id']])
burst_flag = False
for vci in vid_vci_map[video_metadata['id']]:
if 0 == s:
historical_popularity_fd.write('\t0')
else:
historical_popularity_fd.write('\t%0.4f' % (1. * vci / s))
if 0 < s and 3 * 1. / 7 <= 1. * vci / s:
burst_flag = True
historical_popularity_fd.write('\t' + str(burst_flag))
historical_popularity_fd.write('\t%d\t%d\t%d\t%d\t%d' %
(video_metadata['view_count'],
int(video_metadata['comment_count']),
int(video_metadata['favorite_count']),
int(video_metadata['up_count']),
int(video_metadata['down_count'])))
historical_popularity_fd.write('\n')
vid_vci_map.pop(video_metadata['id'])
json_fd.close()
# for the second to the last observation date
day_delta = timedelta(days = 5)
last_date = first_date + day_delta
json_fd = open(json_path + str(first_date) + '_' + str(last_date), 'r')
for line in json_fd.readlines():
video_metadata = json.loads(line.strip())
if video_metadata['id'] in vid_vci_map.keys():
historical_popularity_fd.write(video_metadata['id'])
for vci in vid_vci_map[video_metadata['id']]:
historical_popularity_fd.write('\t%d' % vci)
s = sum(vid_vci_map[video_metadata['id']])
burst_flag = False
for vci in vid_vci_map[video_metadata['id']]:
if 0 == s:
historical_popularity_fd.write('\t0')
else:
historical_popularity_fd.write('\t%0.4f' % (1. * vci / s))
if 0 < s and 3 * 1. / 7 <= 1. * vci / s:
burst_flag = True
historical_popularity_fd.write('\t' + str(burst_flag))
historical_popularity_fd.write('\t%d\t%d\t%d\t%d\t%d' %
(video_metadata['view_count'],
int(video_metadata['comment_count']),
int(video_metadata['favorite_count']),
int(video_metadata['up_count']),
int(video_metadata['down_count'])))
historical_popularity_fd.write('\n')
json_fd.close()
historical_popularity_fd.close()
if '__main__' == __name__:
datapath = '/Users/ouyangshuxin/Documents/Datasets/Youku_Popularity_151206_160103/'
workpath = '/Users/ouyangshuxin/Documents/Two-stage_Popularity_Prediction/'
date_strs = ['2015-12-12', '2015-12-13', '2015-12-14', '2015-12-15', '2015-12-16',
'2015-12-17', '2015-12-18', '2015-12-19', '2015-12-20', '2015-12-21']
# date_strs = ['2015-12-12', '2015-12-13']
# get tag list
# in_files = []
# for d in date_strs:
# in_files.append(datapath + 'video_detail/' + d + '_' + d)
# get_tags_bylevel(workpath + 'features/popularity level/levels',
# in_files,
# workpath + 'features/tags/vid_tag')
# for i in range(0, 4):
# count_tags(workpath + 'features/tags/vid_tag_level' + str(i + 1),
# workpath + 'features/tags/vid_tag_count_level' + str(i + 1))
# in_files = []
# out_files = []
# for i in range(0, 4):
# in_files.append(workpath + 'features/tags/vid_tag_count_level' + str(i + 1))
# out_files.append(workpath + 'features/tags/taglist_level' + str(i + 1))
# get_taglist(in_files, out_files)
# get titlewords list
# in_files = []
# for d in date_strs:
# in_files.append(datapath + 'video_detail/' + d + '_' + d)
# get_titlewords_bylevel(workpath + 'features/popularity level/levels',
# in_files,
# workpath + 'features/titlewords/vid_titleword')
# for i in range(0, 4):
# count_titlewords(workpath + 'features/titlewords/vid_titleword_level' + str(i + 1),
# workpath + 'features/titlewords/vid_titleword_count_level' + str(i + 1))
# in_files = []
# out_files = []
# for i in range(0, 4):
# in_files.append(workpath + 'features/titlewords/vid_titleword_count_level' + str(i + 1))
# out_files.append(workpath + 'features/titlewords/titlewordlist_level' + str(i + 1))
# get_titlewordlist(in_files, out_files)
# get source names
# count_sourcename(datapath + 'video_detail/',
# date_strs,
# workpath + 'features/sourcenames/names')
# # extract video features
# for d in date_strs:
# # vci_file, sourcename_file, tag_path_prefix, titleword_path_prefix,
# # json_path, date_str,
# # video_properties_file, content_topic_file, textual_analysis_file
# get_video_info(workpath + 'data/view count clean increase/' + d,
# workpath + 'features/sourcenames/names',
# workpath + 'features/tags/taglist_level',
# workpath + 'features/titlewords/titlewordlist_level',
# datapath + 'video_detail/',
# d,
# workpath + 'features/video property/' + d,
# workpath + 'features/content topic/' + d,
# workpath + 'features/textual analysis/' + d)
# # extract user features
# for d in date_strs:
# get_user_info(datapath + 'user_detail/',
# d,
# workpath + 'features/user statistic/' + d)
# get historical popularitys
for d in date_strs:
get_historical_populairty(workpath + 'data/view count clean increase/' + d,
datapath + 'video_detail/',
d,
workpath + 'features/historical popularity/' + d)
# merge files
in_files = []
for d in date_strs:
in_files.append(workpath + 'features/video property/' + d)
data.data_extractor.merge_files(in_files,
workpath + 'features/video property/video_property_features')
in_files = []
for d in date_strs:
in_files.append(workpath + 'features/user statistic/' + d)
data.data_extractor.merge_files(in_files,
workpath + 'features/user statistic/user_statistic_features')
in_files = []
for d in date_strs:
in_files.append(workpath + 'features/content topic/' + d)
data.data_extractor.merge_files(in_files,
workpath + 'features/content topic/content_topic_features')
in_files = []
for d in date_strs:
in_files.append(workpath + 'features/textual analysis/' + d)
data.data_extractor.merge_files(in_files,
workpath + 'features/textual analysis/textual_analysis_features')
in_files = []
for d in date_strs:
in_files.append(workpath + 'features/historical popularity/' + d)
data.data_extractor.merge_files(in_files,
workpath + 'features/historical popularity/historical_popularity_features')
print('All Done!')
| [
"ouyangshuxin@gmail.com"
] | ouyangshuxin@gmail.com |
8d3238d6ea928ddc31a5a446dc96770ccb003b9c | eb6129ed33b467f0882b0c690ae0e23ed0738a40 | /src/image_adaptor/srv/_normalImage.py | 409c93330c291f032b04e020821302d51650a640 | [] | no_license | Michi05/image_adaptor-deprecated- | db5ef7ee0954978d43185f6629251f75946ae391 | 662442f1c1ef6dbd88acc23a6ec9828172d2db6b | refs/heads/master | 2020-03-30T18:49:55.509798 | 2012-05-31T12:43:30 | 2012-05-31T12:43:30 | 3,694,339 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 13,143 | py | """autogenerated by genmsg_py from normalImageRequest.msg. Do not edit."""
import roslib.message
import struct
class normalImageRequest(roslib.message.Message):
_md5sum = "af8ad02b46d61aef136a826c5d08279b"
_type = "image_adaptor/normalImageRequest"
_has_header = False #flag to mark the presence of a Header object
_full_text = """string topicName
int64 nImages
"""
__slots__ = ['topicName','nImages']
_slot_types = ['string','int64']
def __init__(self, *args, **kwds):
"""
Constructor. Any message fields that are implicitly/explicitly
set to None will be assigned a default value. The recommend
use is keyword arguments as this is more robust to future message
changes. You cannot mix in-order arguments and keyword arguments.
The available fields are:
topicName,nImages
@param args: complete set of field values, in .msg order
@param kwds: use keyword arguments corresponding to message field names
to set specific fields.
"""
if args or kwds:
super(normalImageRequest, self).__init__(*args, **kwds)
#message fields cannot be None, assign default values for those that are
if self.topicName is None:
self.topicName = ''
if self.nImages is None:
self.nImages = 0
else:
self.topicName = ''
self.nImages = 0
def _get_types(self):
"""
internal API method
"""
return self._slot_types
def serialize(self, buff):
"""
serialize message into buffer
@param buff: buffer
@type buff: StringIO
"""
try:
_x = self.topicName
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
buff.write(_struct_q.pack(self.nImages))
except struct.error as se: self._check_types(se)
except TypeError as te: self._check_types(te)
def deserialize(self, str):
"""
unpack serialized message in str into this message instance
@param str: byte array of serialized message
@type str: str
"""
try:
end = 0
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
self.topicName = str[start:end]
start = end
end += 8
(self.nImages,) = _struct_q.unpack(str[start:end])
return self
except struct.error as e:
raise roslib.message.DeserializationError(e) #most likely buffer underfill
def serialize_numpy(self, buff, numpy):
"""
serialize message with numpy array types into buffer
@param buff: buffer
@type buff: StringIO
@param numpy: numpy python module
@type numpy module
"""
try:
_x = self.topicName
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
buff.write(_struct_q.pack(self.nImages))
except struct.error as se: self._check_types(se)
except TypeError as te: self._check_types(te)
def deserialize_numpy(self, str, numpy):
"""
unpack serialized message in str into this message instance using numpy for array types
@param str: byte array of serialized message
@type str: str
@param numpy: numpy python module
@type numpy: module
"""
try:
end = 0
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
self.topicName = str[start:end]
start = end
end += 8
(self.nImages,) = _struct_q.unpack(str[start:end])
return self
except struct.error as e:
raise roslib.message.DeserializationError(e) #most likely buffer underfill
_struct_I = roslib.message.struct_I
_struct_q = struct.Struct("<q")
"""autogenerated by genmsg_py from normalImageResponse.msg. Do not edit."""
import roslib.message
import struct
import std_msgs.msg
import sensor_msgs.msg
class normalImageResponse(roslib.message.Message):
_md5sum = "465f5ebe142654711d8c5bf4770df57a"
_type = "image_adaptor/normalImageResponse"
_has_header = False #flag to mark the presence of a Header object
_full_text = """sensor_msgs/Image[] images
================================================================================
MSG: sensor_msgs/Image
# This message contains an uncompressed image
# (0, 0) is at top-left corner of image
#
Header header # Header timestamp should be acquisition time of image
# Header frame_id should be optical frame of camera
# origin of frame should be optical center of cameara
# +x should point to the right in the image
# +y should point down in the image
# +z should point into to plane of the image
# If the frame_id here and the frame_id of the CameraInfo
# message associated with the image conflict
# the behavior is undefined
uint32 height # image height, that is, number of rows
uint32 width # image width, that is, number of columns
# The legal values for encoding are in file src/image_encodings.cpp
# If you want to standardize a new string format, join
# ros-users@lists.sourceforge.net and send an email proposing a new encoding.
string encoding # Encoding of pixels -- channel meaning, ordering, size
# taken from the list of strings in src/image_encodings.cpp
uint8 is_bigendian # is this data bigendian?
uint32 step # Full row length in bytes
uint8[] data # actual matrix data, size is (step * rows)
================================================================================
MSG: std_msgs/Header
# Standard metadata for higher-level stamped data types.
# This is generally used to communicate timestamped data
# in a particular coordinate frame.
#
# sequence ID: consecutively increasing ID
uint32 seq
#Two-integer timestamp that is expressed as:
# * stamp.secs: seconds (stamp_secs) since epoch
# * stamp.nsecs: nanoseconds since stamp_secs
# time-handling sugar is provided by the client library
time stamp
#Frame this data is associated with
# 0: no frame
# 1: global frame
string frame_id
"""
__slots__ = ['images']
_slot_types = ['sensor_msgs/Image[]']
def __init__(self, *args, **kwds):
"""
Constructor. Any message fields that are implicitly/explicitly
set to None will be assigned a default value. The recommend
use is keyword arguments as this is more robust to future message
changes. You cannot mix in-order arguments and keyword arguments.
The available fields are:
images
@param args: complete set of field values, in .msg order
@param kwds: use keyword arguments corresponding to message field names
to set specific fields.
"""
if args or kwds:
super(normalImageResponse, self).__init__(*args, **kwds)
#message fields cannot be None, assign default values for those that are
if self.images is None:
self.images = []
else:
self.images = []
def _get_types(self):
"""
internal API method
"""
return self._slot_types
def serialize(self, buff):
"""
serialize message into buffer
@param buff: buffer
@type buff: StringIO
"""
try:
length = len(self.images)
buff.write(_struct_I.pack(length))
for val1 in self.images:
_v1 = val1.header
buff.write(_struct_I.pack(_v1.seq))
_v2 = _v1.stamp
_x = _v2
buff.write(_struct_2I.pack(_x.secs, _x.nsecs))
_x = _v1.frame_id
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = val1
buff.write(_struct_2I.pack(_x.height, _x.width))
_x = val1.encoding
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = val1
buff.write(_struct_BI.pack(_x.is_bigendian, _x.step))
_x = val1.data
length = len(_x)
# - if encoded as a list instead, serialize as bytes instead of string
if type(_x) in [list, tuple]:
buff.write(struct.pack('<I%sB'%length, length, *_x))
else:
buff.write(struct.pack('<I%ss'%length, length, _x))
except struct.error as se: self._check_types(se)
except TypeError as te: self._check_types(te)
def deserialize(self, str):
"""
unpack serialized message in str into this message instance
@param str: byte array of serialized message
@type str: str
"""
try:
end = 0
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
self.images = []
for i in range(0, length):
val1 = sensor_msgs.msg.Image()
_v3 = val1.header
start = end
end += 4
(_v3.seq,) = _struct_I.unpack(str[start:end])
_v4 = _v3.stamp
_x = _v4
start = end
end += 8
(_x.secs, _x.nsecs,) = _struct_2I.unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
_v3.frame_id = str[start:end]
_x = val1
start = end
end += 8
(_x.height, _x.width,) = _struct_2I.unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
val1.encoding = str[start:end]
_x = val1
start = end
end += 5
(_x.is_bigendian, _x.step,) = _struct_BI.unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
val1.data = str[start:end]
self.images.append(val1)
return self
except struct.error as e:
raise roslib.message.DeserializationError(e) #most likely buffer underfill
def serialize_numpy(self, buff, numpy):
"""
serialize message with numpy array types into buffer
@param buff: buffer
@type buff: StringIO
@param numpy: numpy python module
@type numpy module
"""
try:
length = len(self.images)
buff.write(_struct_I.pack(length))
for val1 in self.images:
_v5 = val1.header
buff.write(_struct_I.pack(_v5.seq))
_v6 = _v5.stamp
_x = _v6
buff.write(_struct_2I.pack(_x.secs, _x.nsecs))
_x = _v5.frame_id
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = val1
buff.write(_struct_2I.pack(_x.height, _x.width))
_x = val1.encoding
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = val1
buff.write(_struct_BI.pack(_x.is_bigendian, _x.step))
_x = val1.data
length = len(_x)
# - if encoded as a list instead, serialize as bytes instead of string
if type(_x) in [list, tuple]:
buff.write(struct.pack('<I%sB'%length, length, *_x))
else:
buff.write(struct.pack('<I%ss'%length, length, _x))
except struct.error as se: self._check_types(se)
except TypeError as te: self._check_types(te)
def deserialize_numpy(self, str, numpy):
"""
unpack serialized message in str into this message instance using numpy for array types
@param str: byte array of serialized message
@type str: str
@param numpy: numpy python module
@type numpy: module
"""
try:
end = 0
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
self.images = []
for i in range(0, length):
val1 = sensor_msgs.msg.Image()
_v7 = val1.header
start = end
end += 4
(_v7.seq,) = _struct_I.unpack(str[start:end])
_v8 = _v7.stamp
_x = _v8
start = end
end += 8
(_x.secs, _x.nsecs,) = _struct_2I.unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
_v7.frame_id = str[start:end]
_x = val1
start = end
end += 8
(_x.height, _x.width,) = _struct_2I.unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
val1.encoding = str[start:end]
_x = val1
start = end
end += 5
(_x.is_bigendian, _x.step,) = _struct_BI.unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
val1.data = str[start:end]
self.images.append(val1)
return self
except struct.error as e:
raise roslib.message.DeserializationError(e) #most likely buffer underfill
_struct_I = roslib.message.struct_I
_struct_2I = struct.Struct("<2I")
_struct_BI = struct.Struct("<BI")
class normalImage(roslib.message.ServiceDefinition):
_type = 'image_adaptor/normalImage'
_md5sum = 'b5607901045b06c3620e4e142df98f90'
_request_class = normalImageRequest
_response_class = normalImageResponse
| [
"cibermichi@gmail.com"
] | cibermichi@gmail.com |
b5fe46abd8d9ae411adef6c9e83d0aec6768af4e | d37123396d07c9fc7470763857f8ed7739d7c885 | /examples/example1/example.py | 947db099f88956930b8b2aa0a27945ee0c95f81a | [
"Apache-2.0"
] | permissive | Panaetius/python-profiling-presentation | 8bf07bed3b07e9a71199cb71b006ea078ddaf067 | 0ed7a20fcefbb3bb5afc2fe7f99b603e458a0575 | refs/heads/main | 2023-08-20T03:18:07.583527 | 2021-10-07T12:34:04 | 2021-10-07T12:34:04 | 414,101,169 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 367 | py | from typing import List
def top_level() -> List[int]:
result = []
for i in range(15000):
l = lower_level(i)
s = sum(l)
result.append(s)
return result
def lower_level(i: int) -> List[int]:
result = []
for j in range(i):
result.append(j)
return result
if __name__ == "__main__":
result = top_level()
| [
"ralf.grubenmann@sdsc.ethz.ch"
] | ralf.grubenmann@sdsc.ethz.ch |
c050d236e13cb1e6d2c399fb599e3352abb6595f | 9801207478d671b7380bbc60f00eb76b3eb83367 | /main3.py | 891a2479ac8be2821785e2cd97abf0cebfd8f127 | [
"MIT"
] | permissive | mdpe-ir/mdVpn | 0131d089e659c9085eccfeb83300519507a64d5f | febc718c2b3e0142ebe9cecc08c2739ea0f78774 | refs/heads/main | 2023-03-13T04:27:26.434903 | 2021-03-06T21:49:37 | 2021-03-06T21:51:49 | 345,194,931 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,066 | py | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'main.ui'
#
# Created by: PyQt5 UI code generator 5.15.2
#
# WARNING: Any manual changes made to this file will be lost when pyuic5 is
# run again. Do not edit this file unless you know what you are doing.
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(552, 344)
font = QtGui.QFont()
font.setFamily("IRANSans")
MainWindow.setFont(font)
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap("../../Downloads/vpn-icon-with-shield_116137-218.jpg"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
MainWindow.setWindowIcon(icon)
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.label = QtWidgets.QLabel(self.centralwidget)
self.label.setGeometry(QtCore.QRect(10, 100, 531, 211))
self.label.setText("")
self.label.setObjectName("label")
self.horizontalLayoutWidget = QtWidgets.QWidget(self.centralwidget)
self.horizontalLayoutWidget.setGeometry(QtCore.QRect(10, 0, 531, 81))
self.horizontalLayoutWidget.setObjectName("horizontalLayoutWidget")
self.horizontalLayout = QtWidgets.QHBoxLayout(self.horizontalLayoutWidget)
self.horizontalLayout.setContentsMargins(0, 0, 0, 0)
self.horizontalLayout.setObjectName("horizontalLayout")
self.disconnectBtn = QtWidgets.QPushButton(self.horizontalLayoutWidget)
self.disconnectBtn.setObjectName("disconnectBtn")
self.horizontalLayout.addWidget(self.disconnectBtn)
self.connectTofastesServerButton = QtWidgets.QPushButton(self.horizontalLayoutWidget)
self.connectTofastesServerButton.setObjectName("connectTofastesServerButton")
self.horizontalLayout.addWidget(self.connectTofastesServerButton)
self.label_2 = QtWidgets.QLabel(self.centralwidget)
self.label_2.setGeometry(QtCore.QRect(216, 320, 331, 20))
self.label_2.setObjectName("label_2")
MainWindow.setCentralWidget(self.centralwidget)
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "ام دی وی پی ان"))
self.disconnectBtn.setText(_translate("MainWindow", "قطع کردن اتصال"))
self.connectTofastesServerButton.setText(_translate("MainWindow", "اتصال به سریع ترین سرور"))
self.label_2.setText(_translate("MainWindow", "کپی رایت 2020 | ماهان | برپایه ی پروتون وی پی ان"))
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
MainWindow = QtWidgets.QMainWindow()
ui = Ui_MainWindow()
ui.setupUi(MainWindow)
MainWindow.show()
sys.exit(app.exec_())
| [
"mdpe84.ir@gmail.com"
] | mdpe84.ir@gmail.com |
763d6410b4189a1a5acdeb4c468f957584048877 | 02f8d733759bf57cab2e08026e68a7e1c8fa5812 | /tf-rex/one_step_actor_critic.py | 97ed832f284217a5fde6e45b6880583739473078 | [] | no_license | Lumotheninja/dino-reinforcement-learning | b18faaa7fcc210d7d5d51b51027383df1fbb5632 | 927d7b534e2f2ff52f2ea8b6cdd8e4a27eb232a0 | refs/heads/master | 2022-12-01T10:32:19.201343 | 2019-08-12T14:40:14 | 2019-08-12T14:40:14 | 199,822,824 | 3 | 2 | null | 2022-11-22T04:09:52 | 2019-07-31T09:21:27 | JavaScript | UTF-8 | Python | false | false | 7,195 | py | from environment import Environment
from preprocessor import Preprocessor
import math
import random
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
from collections import namedtuple
from itertools import count
from PIL import Image
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
import torchvision.transforms as T
# set up matplotlib
is_ipython = 'inline' in matplotlib.get_backend()
if is_ipython:
from IPython import display
plt.ion()
# if gpu is to be used
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
#device = 'cpu'
Transition = namedtuple('Transition',
('state', 'action', 'next_state', 'reward'))
class DQN(nn.Module):
def __init__(self, h, w, output):
super(DQN, self).__init__()
self.conv1 = nn.Conv2d(4, 24, kernel_size=8, stride=4)
self.bn1 = nn.BatchNorm2d(24)
self.conv2 = nn.Conv2d(24, 32, kernel_size=4, stride=2)
self.bn2 = nn.BatchNorm2d(32)
self.conv3 = nn.Conv2d(32, 32, kernel_size=3, stride=1)
self.bn3 = nn.BatchNorm2d(32)
# Number of Linear input connections depends on output of conv2d layers
# and therefore the input image size, so compute it.
def conv2d_size_out(size, kernel_size = 3, stride = 1):
return (size - (kernel_size - 1) - 1) // stride + 1
convw = conv2d_size_out(conv2d_size_out(conv2d_size_out(w,8,4),4,2))
convh = conv2d_size_out(conv2d_size_out(conv2d_size_out(h,8,4),4,2))
linear_input_size = convw * convh * 32
head_output_size = (linear_input_size+output)//2
self.head = nn.Linear(linear_input_size, head_output_size)
self.tail = nn.Linear(head_output_size,output)
self.valuefc = nn.Linear(head_output_size, 1)
# Called with either one element to determine next action, or a batch
# during optimization. Returns tensor([[left0exp,right0exp]...]).
def forward(self, x):
x = x.float()
x = F.relu(self.bn1(self.conv1(x)))
x = F.relu(self.bn2(self.conv2(x)))
x = F.relu(self.bn3(self.conv3(x)))
x = self.head(x.view(x.size(0), -1))
x = F.leaky_relu(x)
return F.softmax(self.tail(x).float(), dim=-1), self.valuefc(x).float()
def select_action(state):
global steps_done
sample = random.random()
eps_threshold = EPS_END + (EPS_START - EPS_END) * \
math.exp(-1. * steps_done / EPS_DECAY)
steps_done += 1
if sample > eps_threshold:
with torch.no_grad():
dist = torch.distributions.Categorical(policy_net(state)[0].squeeze())
action = dist.sample()
return action
else:
return torch.tensor([[random.randrange(n_actions)]], device=device, dtype=torch.long)
def plot_rewards():
plt.figure(2)
plt.clf()
durations_t = torch.tensor(episode_rewards, dtype=torch.float)
plt.title('Training...')
plt.xlabel('Episode')
plt.ylabel('Reward')
plt.plot(durations_t.numpy())
# Take 100 episode averages and plot them too
if len(durations_t) >= 100:
means = durations_t.unfold(0, 100, 1).mean(1).view(-1)
means = torch.cat((torch.zeros(99), means))
plt.plot(means.numpy())
plt.pause(0.001) # pause a bit so that plots are updated
if is_ipython:
display.clear_output(wait=True)
display.display(plt.gcf())
def optimize_model(state_list, reward_list, action_list):
states = torch.stack(state_list).to(device).squeeze()
actions = torch.Tensor(action_list).long().to(device)
logits, q = policy_net(states)
log_prob = torch.log(logits.squeeze().gather(1,actions[:,None]).squeeze())
rewards = torch.Tensor(reward_list).to(device)
R = 0
rewards2 = []
for r in reward_list[::-1]:
R = r + GAMMA * R
rewards2.insert(0,R)
rewards2 = torch.Tensor(rewards2).to(device)
rewards -= q.detach().squeeze()
rewards[:-1] += GAMMA*q.detach().squeeze()[1:]
loss1 = -torch.sum(torch.mul(log_prob, rewards))
loss2 = F.smooth_l1_loss(rewards2, q)
# rewards = (rewards - rewards.mean()) / (rewards.std() + np.finfo(np.float32).eps)
loss = loss1 + loss2
# Optimize the model
optimizer.zero_grad()
loss.backward()
for param in policy_net.parameters():
param.grad.data.clamp_(-1, 1)
optimizer.step()
if __name__ == "__main__":
env = Environment("127.0.0.1", 9090)
BATCH_SIZE = 512
GAMMA = 0.999
EPS_START = 0.9
EPS_END = 0.005
EPS_DECAY = 200
TARGET_UPDATE = 10
width = 80
height = 80
preprocessor = Preprocessor(width, height)
n_actions = len(env.actions.keys())
policy_net = DQN(height, width, n_actions).float().to(device)
target_net = DQN(height, width, n_actions).float().to(device)
target_net.load_state_dict(policy_net.state_dict())
target_net.eval()
episode_rewards = []
steps_done = 0
lr = 1e-4
optimizer = optim.Adam(policy_net.parameters(), lr)
num_episodes = 1000
for i_episode in range(num_episodes):
# Initialize the environment and state
frame, _, done = env.start_game()
frame = preprocessor.process(frame)
print (frame)
state = preprocessor.get_initial_state(frame)
state = torch.tensor(state).unsqueeze(0).float().to(device)
cum_rewards = 0
# Initialize the environment and state
state_list = []
reward_list = []
action_list = []
while not done:
# Select and perform an action
action = select_action(state)
action_str = Environment.actions[action.item()]
print("action: ", action_str)
frame, reward, done = env.do_action(action.item())
frame = preprocessor.process(frame)
next_state = preprocessor.get_updated_state(frame)
next_state = torch.tensor(next_state).unsqueeze(0).float().to(device)
reward = torch.tensor([reward], device=device).float()
cum_rewards += reward
# Store the transition in memory
state_list.append(state)
reward_list.append(reward)
action_list.append(action)
# Move to the next state
state = next_state
# Model is only optimized after entire policy is ran
optimize_model(state_list, reward_list, action_list)
episode_rewards.append(cum_rewards)
plot_rewards()
# Save weights
if i_episode%50==0:
torch.save(policy_net.state_dict(), 'dino_one_step_ac_reward_%.1f_ep_%d.pt'%(cum_rewards,i_episode))
print('Complete')
# env.render()
# env.close()
plt.ioff()
plt.savefig("dino_one_step_ac_final.png")
torch.save(policy_net.state_dict(), 'dino_one_step_ac_final.pt') | [
"wentat126@yahoo.com"
] | wentat126@yahoo.com |
f94750a8ca196b2ccb9a794de4f5b8392ae32303 | 99abd35b33b7cdeb3ff2006fe0ea99b6501571fb | /stabilizer_search/stabilizers/eigenstates.py | d9942c009c3927bdf53536f8c6f1077986952168 | [] | no_license | daochenw/rl-stabilizer | 72e37adc5325bfff3e765b655f0f11f2185e5dc6 | 9a5531fc1c5d39a3b54fab46feb405eee797a49b | refs/heads/main | 2023-08-16T23:05:23.678315 | 2021-08-05T14:23:52 | 2021-08-05T14:23:52 | 393,067,628 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,236 | py | """Module which defines methods for converting the QObj Pauli Operators that
generate each stabilizer group, building a projector and finding the associated
+1 eigenstate."""
from math import pow as fpow
from numpy import allclose, imag, isclose, real
from numpy.linalg import eig, norm
from ..mat import qeye
def find_projector(generating_set):
n_qubits = len(generating_set)
_id = qeye(pow(2, n_qubits))
res = qeye(pow(2, n_qubits))
for _g in generating_set:
res = res * (_id+_g)
return res/fpow(2, n_qubits)
def find_eigenstate(projector):
eigs, vecs = eig(projector)
for _n, _eig in enumerate(eigs):
if allclose(_eig, complex(1)) or allclose(_eig, 1.):
state = (vecs[:,_n])
r = real(state)
im = imag(state)
r[isclose(r, 0.)] = 0
im[isclose(im, 0.)] = 0
state = r+1j*im
state = state / norm(state, 2)
return state
return None
def py_find_eigenstates(generating_sets, real_only=False):
""" """
states = [find_eigenstate(x) for x in map(find_projector, generating_sets)]
if real_only:
return list(filter(lambda x: allclose(imag(x), 0.), states))
return states
| [
"wdaochen@gmail.com"
] | wdaochen@gmail.com |
098b677bf2fa4b7b1ccee658537382ae24bf7ec1 | d5e54f1a01e307347ccd2b250cbdd500018c1cbc | /parse_cli_parameter.py | a713272caee16d44336f5e2225013b7c73ec3154 | [] | no_license | cyru8/cloudguru | 0ad21770363ae1b09628ceb1fb50d6abfb474838 | 8c2921fa6e78dd80384110b6f498550aac7c3fb0 | refs/heads/main | 2023-01-06T07:19:14.704044 | 2020-11-09T02:33:03 | 2020-11-09T02:33:03 | 311,202,645 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 122 | py | #/usr/bin/env python3
import sys
print(f"Positional argument: {sys.argv[:1]}")
print(f"First argument: {sys.argv[1]} ")
| [
"olumide.adetiba@live.com"
] | olumide.adetiba@live.com |
18a3c808b9a37fa071cde60a38bdb476e43b92f8 | e57e52007f79603264dc839bb932b2cbd1681b01 | /blog/token.py | 752a0e4406b28062ec69c269aaba8adf865de836 | [] | no_license | Sanjarbek-AI/Fast-API | 6ba22851446a9f29ed2002b07efa00e49c131f45 | b8171fbb05c8ba6540d0f12bf28679706ce45aba | refs/heads/master | 2023-07-18T19:08:52.336704 | 2021-09-23T05:12:20 | 2021-09-23T05:12:20 | 408,771,814 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 995 | py | from datetime import datetime, timedelta
from typing import Optional
from jose import jwt, JWTError
from . import schemes
SECRET_KEY = "09d25e094faa6ca2556c818166b7a9563b93f7099f6f0f4caa6cf63b88e8d3e7"
ALGORITHM = "HS256"
ACCESS_TOKEN_EXPIRE_MINUTES = 30
def create_access_token(data: dict, expires_delta: Optional[timedelta] = None):
to_encode = data.copy()
if expires_delta:
expire = datetime.utcnow() + expires_delta
else:
expire = datetime.utcnow() + timedelta(minutes=15)
to_encode.update({"exp": expire})
encoded_jwt = jwt.encode(to_encode, SECRET_KEY, algorithm=ALGORITHM)
return encoded_jwt
def verify_token(token: str, credentials_exception):
try:
payload = jwt.decode(token, SECRET_KEY, algorithms=[ALGORITHM])
email: str = payload.get("sub")
if email is None:
raise credentials_exception
token_data = schemes.TokenData(email=email)
except JWTError:
raise credentials_exception
| [
"sanjarbekmohinaxon0205@gmail.com"
] | sanjarbekmohinaxon0205@gmail.com |
d6ef24a15f6ab1e5fd02c5785f469825d298a0cb | f4fad88ca529fd11d91fa8f45b176a3011c1f350 | /test/circuit/library/ansatzes/test_uccsd.py | 2b9a1cdd047661020eeb80c3f1f7a667388c8c3b | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | eggerdj/qiskit-nature | 03d77d303ac6894a59ebf69ea7ac8ccedc930a11 | c24a85140eb514628e2b9b1a5f0e03a689f8ade7 | refs/heads/main | 2023-04-17T08:02:00.762509 | 2021-04-30T16:07:18 | 2021-04-30T16:07:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,979 | py | # This code is part of Qiskit.
#
# (C) Copyright IBM 2021.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""Test the UCCSD Ansatz."""
from test import QiskitNatureTestCase
from test.circuit.library.ansatzes.test_ucc import assert_ucc_like_ansatz
from ddt import ddt, data, unpack
from qiskit_nature.circuit.library import UCCSD
from qiskit_nature.mappers.second_quantization import JordanWignerMapper
from qiskit_nature.operators.second_quantization import FermionicOp
from qiskit_nature.converters.second_quantization import QubitConverter
@ddt
class TestUCCSD(QiskitNatureTestCase):
"""Tests for the UCCSD Ansatz."""
@unpack
@data(
(4, (1, 1), [FermionicOp([('+-II', 1j), ('-+II', 1j)]),
FermionicOp([('II+-', 1j), ('II-+', 1j)]),
FermionicOp([('+-+-', 1j), ('-+-+', -1j)])]),
(8, (2, 2), [FermionicOp([('+I-IIIII', 1j), ('-I+IIIII', 1j)]),
FermionicOp([('+II-IIII', 1j), ('-II+IIII', 1j)]),
FermionicOp([('I+-IIIII', 1j), ('I-+IIIII', 1j)]),
FermionicOp([('I+I-IIII', 1j), ('I-I+IIII', 1j)]),
FermionicOp([('IIII+I-I', 1j), ('IIII-I+I', 1j)]),
FermionicOp([('IIII+II-', 1j), ('IIII-II+', 1j)]),
FermionicOp([('IIIII+-I', 1j), ('IIIII-+I', 1j)]),
FermionicOp([('IIIII+I-', 1j), ('IIIII-I+', 1j)]),
FermionicOp([('++--IIII', 1j), ('--++IIII', -1j)]),
FermionicOp([('+I-I+I-I', 1j), ('-I+I-I+I', -1j)]),
FermionicOp([('+I-I+II-', 1j), ('-I+I-II+', -1j)]),
FermionicOp([('+I-II+-I', 1j), ('-I+II-+I', -1j)]),
FermionicOp([('+I-II+I-', 1j), ('-I+II-I+', -1j)]),
FermionicOp([('+II-+I-I', 1j), ('-II+-I+I', -1j)]),
FermionicOp([('+II-+II-', 1j), ('-II+-II+', -1j)]),
FermionicOp([('+II-I+-I', 1j), ('-II+I-+I', -1j)]),
FermionicOp([('+II-I+I-', 1j), ('-II+I-I+', -1j)]),
FermionicOp([('I+-I+I-I', 1j), ('I-+I-I+I', -1j)]),
FermionicOp([('I+-I+II-', 1j), ('I-+I-II+', -1j)]),
FermionicOp([('I+-II+-I', 1j), ('I-+II-+I', -1j)]),
FermionicOp([('I+-II+I-', 1j), ('I-+II-I+', -1j)]),
FermionicOp([('I+I-+I-I', 1j), ('I-I+-I+I', -1j)]),
FermionicOp([('I+I-+II-', 1j), ('I-I+-II+', -1j)]),
FermionicOp([('I+I-I+-I', 1j), ('I-I+I-+I', -1j)]),
FermionicOp([('I+I-I+I-', 1j), ('I-I+I-I+', -1j)]),
FermionicOp([('IIII++--', 1j), ('IIII--++', -1j)])]),
(8, (2, 1), [FermionicOp([('+I-IIIII', 1j), ('-I+IIIII', 1j)]),
FermionicOp([('+II-IIII', 1j), ('-II+IIII', 1j)]),
FermionicOp([('I+-IIIII', 1j), ('I-+IIIII', 1j)]),
FermionicOp([('I+I-IIII', 1j), ('I-I+IIII', 1j)]),
FermionicOp([('IIII+-II', 1j), ('IIII-+II', 1j)]),
FermionicOp([('IIII+I-I', 1j), ('IIII-I+I', 1j)]),
FermionicOp([('IIII+II-', 1j), ('IIII-II+', 1j)]),
FermionicOp([('++--IIII', 1j), ('--++IIII', -1j)]),
FermionicOp([('+I-I+-II', 1j), ('-I+I-+II', -1j)]),
FermionicOp([('+I-I+I-I', 1j), ('-I+I-I+I', -1j)]),
FermionicOp([('+I-I+II-', 1j), ('-I+I-II+', -1j)]),
FermionicOp([('+II-+-II', 1j), ('-II+-+II', -1j)]),
FermionicOp([('+II-+I-I', 1j), ('-II+-I+I', -1j)]),
FermionicOp([('+II-+II-', 1j), ('-II+-II+', -1j)]),
FermionicOp([('I+-I+-II', 1j), ('I-+I-+II', -1j)]),
FermionicOp([('I+-I+I-I', 1j), ('I-+I-I+I', -1j)]),
FermionicOp([('I+-I+II-', 1j), ('I-+I-II+', -1j)]),
FermionicOp([('I+I-+-II', 1j), ('I-I+-+II', -1j)]),
FermionicOp([('I+I-+I-I', 1j), ('I-I+-I+I', -1j)]),
FermionicOp([('I+I-+II-', 1j), ('I-I+-II+', -1j)])]),
)
def test_uccsd_ansatz(self, num_spin_orbitals, num_particles, expect):
"""Tests the UCCSD Ansatz."""
converter = QubitConverter(JordanWignerMapper())
ansatz = UCCSD(qubit_converter=converter,
num_particles=num_particles,
num_spin_orbitals=num_spin_orbitals)
assert_ucc_like_ansatz(self, ansatz, num_spin_orbitals, expect)
| [
"noreply@github.com"
] | eggerdj.noreply@github.com |
3420af5543cff2284f020349d8d24d5e938e69be | 4bfbeb2370a7235ababb71d12b5b1d685b27e3f4 | /user/migrations/0001_initial.py | 6e158f0da1dcf10baf0c348c644cb5e79330aadf | [] | no_license | dawoodsiddiq786/python-apis | f3e08d1de4c1378f1c2bb35e6b88b486b5d68213 | 67812a0fbf9497abc7bfa53fd2b3fb029c28b6fe | refs/heads/main | 2023-01-24T01:13:18.220421 | 2020-12-06T19:34:51 | 2020-12-06T19:34:51 | 319,114,981 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,372 | py | # Generated by Django 2.2.4 on 2020-08-28 13:59
import django.contrib.auth.models
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0011_update_proxy_permissions'),
]
operations = [
migrations.CreateModel(
name='Categorie',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(default='', max_length=50)),
('description', models.CharField(default='', max_length=500)),
('image', models.URLField(blank=True, default='https://elysator.com/wp-content/uploads/blank-profile-picture-973460_1280-e1523978675847.png')),
('created', models.DateTimeField(default=django.utils.timezone.now)),
],
),
migrations.CreateModel(
name='Comment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('description', models.CharField(default='', max_length=500)),
],
),
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('first_name', models.CharField(default='', max_length=50)),
('last_name', models.CharField(default='', max_length=50)),
('email', models.EmailField(max_length=254, unique=True)),
('is_active', models.BooleanField(default=True)),
('is_superuser', models.BooleanField(default=False)),
('password', models.CharField(default=None, max_length=20)),
('image', models.URLField(blank=True, default='https://elysator.com/wp-content/uploads/blank-profile-picture-973460_1280-e1523978675847.png')),
('address', models.CharField(default='', max_length=600)),
('date_joined', models.DateTimeField(default=django.utils.timezone.now)),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='Role',
fields=[
],
options={
'indexes': [],
'constraints': [],
'proxy': True,
},
bases=('auth.group',),
managers=[
('objects', django.contrib.auth.models.GroupManager()),
],
),
migrations.CreateModel(
name='Product',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(default='', max_length=50)),
('description', models.CharField(default='', max_length=500)),
('volume', models.TextField(default='')),
('address', models.TextField(default='')),
('reason_for_selling', models.TextField(default='')),
('brand', models.TextField(default='')),
('model', models.TextField(default='')),
('media', models.TextField(default='')),
('price', models.FloatField(max_length=500)),
('is_used', models.BooleanField(default=False)),
('is_by_admin', models.BooleanField(default=False)),
('is_delivered', models.BooleanField(default=False)),
('is_cash', models.BooleanField(default=True)),
('created_at', models.DateTimeField(default=django.utils.timezone.now)),
('category', models.ForeignKey(null=True, on_delete=django.db.models.deletion.PROTECT, to='user.Categorie')),
('ordered_by', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='orderby', to='user.User')),
('posted_by', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='user.User')),
],
),
migrations.CreateModel(
name='Post',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('description', models.CharField(default='', max_length=500)),
('media', models.TextField(default='')),
('created_at', models.DateTimeField(default=django.utils.timezone.now)),
('comments', models.ManyToManyField(default=None, null=True, related_name='cc', to='user.Comment')),
('likes', models.ManyToManyField(default=None, null=True, related_name='likces', to='user.User')),
('posted_by', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='user.User')),
],
),
migrations.AddField(
model_name='comment',
name='posted_by',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='user.User'),
),
]
| [
"tasfiqul.ghani@northsouth.edu"
] | tasfiqul.ghani@northsouth.edu |
62393888a82435bbcb27916ed36a8bed00d86eeb | e754a93652eeca77074c621376aeb8ebbe5926ec | /Project_oNe/urls.py | 1fc4416b6593019ca15d965c3b1ce4216e059dd0 | [] | no_license | obiwills/my_custom_user | b56404a747fdc17bd0661f9c70d1f3271f231e3f | df4e55048466c59444e0e53afb98321d299c5f7f | refs/heads/master | 2021-05-10T14:35:46.287479 | 2018-01-22T23:19:35 | 2018-01-22T23:19:35 | 118,523,021 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 989 | py | """Project_oNe URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
from django.views.generic.base import TemplateView
urlpatterns = [
path('', TemplateView.as_view(template_name='home.html'), name='home'),
path('admin/', admin.site.urls),
path('users/', include('users.urls')),
path('users/', include('django.contrib.auth.urls')),
]
| [
"mtk902@yahoo.com"
] | mtk902@yahoo.com |
bb442bea8780dda7af20ef8d7a2dc8ae075f8043 | f3dcbf16a83224045796026fff14b44d943fe30d | /AI/jarvis.py | eb6eb7076bfdc6d841a722ac5fab3e5bcf06f281 | [] | no_license | keithchad/Javris-AI | 1256981bcaf31035b18950d22e9b603a54f10825 | b7f1573a949f5f1b6c5c916dff3a7f72e501a4d8 | refs/heads/master | 2022-11-16T21:27:50.354068 | 2020-07-17T15:46:51 | 2020-07-17T15:46:51 | 279,426,270 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,892 | py | import pyttsx3 # pip install pyttsx3
import datetime
import speech_recognition as sr
import wikipedia
engine = pyttsx3.init()
def speak(audio):
engine.say(audio) #Speaks what is in the brackets
engine.runAndWait()
def time():
Time = datetime.datetime.now().strftime("%I:%M:%S")
speak("the current time is")
speak(Time)
def date():
year = int(datetime.datetime.now().year)
month = int(datetime.datetime.now().month)
date = int(datetime.datetime.now().day)
speak("the current date is")
speak(date)
speak(month)
speak(year)
def wishme():
speak("Welcome Back Sir!")
time()
date()
hour = datetime.datetime.now().hour
if hour >= 6 and hour < 12:
speak("Goodmorning sir!")
elif hour >= 12 and hour < 18:
speak("Goodafternoon sir!")
elif hour >= 18 and hour < 24:
speak("Goodevining sir!")
else :
speak("Goodnight sir!")
speak("Jarvis at your service!")
def takeCommand():
r = sr.Recognizer()
with sr.Microphone as source:
print("Listening...")
r.pause_threshold = 1
audio = r.listen(source)
try:
print("Recongnizing...")
query = r.recognize_google(audio, language='en-in')
print(query)
except Exception as e:
print (e)
speak("Say that again")
return"None"
return query
if __name__ == "__main__":
wishme()
while True :
query = takeCommand().lower()
if 'time' in query:
time()
elif 'date' in query:
date()
elif 'wikipedia' in query:
speak("Searching...")
query = query.replace("wikipedia","")
result = wikipedia.summary(query, sentences=2)
print(result)
speak(result)
elif 'offline' in query:
quit()
takeCommand() | [
"63049827+keithchad@users.noreply.github.com"
] | 63049827+keithchad@users.noreply.github.com |
7562eab065b565fc40986e5b85bde0cffe2bf27d | dfcb65de02953afaac24cc926ee32fcdede1ac21 | /src/pyrin/caching/local/handlers/__init__.py | 4f7d9594e8de35a0a70f0749192b9e0b9fa7c5d4 | [
"BSD-3-Clause"
] | permissive | mononobi/pyrin | 031d0c38da945b76b07ea100554ffc7f8081b05e | 9d4776498225de4f3d16a4600b5b19212abe8562 | refs/heads/master | 2023-08-31T03:56:44.700142 | 2023-08-20T22:20:06 | 2023-08-20T22:20:06 | 185,481,041 | 20 | 8 | null | null | null | null | UTF-8 | Python | false | false | 348 | py | # -*- coding: utf-8 -*-
"""
caching local handlers package.
"""
from pyrin.packaging.base import Package
class CachingLocalHandlersPackage(Package):
"""
caching local handlers package class.
"""
NAME = __name__
DEPENDS = ['pyrin.configuration',
'pyrin.globalization.datetime',
'pyrin.logging']
| [
"mohamadnobakht@gmail.com"
] | mohamadnobakht@gmail.com |
3071a6ed8d24d42ac210dd4efe459091ab880013 | a574b04497e597b8fb9b8698f89fe24afa7d13cc | /preProcessing/Make3D/checkcaffeIO.py | 46ae1a9aac92b4ae240ae02f108a7b0eb36ae91c | [
"BSD-2-Clause"
] | permissive | myBestLove/FCRN-DepthPrediction | 0aff250ef6fe061b7fddbbde63ddaeb13be59e25 | 2527c900f13bf150d4ffc048a9ebab8bfb79c808 | refs/heads/master | 2021-06-12T03:52:50.181080 | 2016-12-24T09:16:40 | 2016-12-24T09:16:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 817 | py | #!/home/zhujun/python27/bin/python
import caffe
import skimage.io as io
import sys
test_proto = sys.argv[1]
model_file = sys.argv[2]
'''
mode 1: check input
mode 2: check output
mode 3: check internal
'''
mode = int(sys.argv[3])
net = caffe.Net(test_proto, model_file, caffe.TEST)
out = net.forward()
if mode == 1:
image = net.blobs['train_data'].data
image = image[0,:,:].transpose((1,2,0))
io.imsave('test_image.png', image)
label = net.blobs['train_label'].data
label = label[0,0,:,:]
io.imsave('test_label.png', label)
elif mode == 2:
label = net.blobs['train_label'].data
label = label[0,0,:,:]
io.imsave('test_label.png', label)
score = net.blobs['score'].data
score = score[0,0,:,:]
io.imsave('test_score.png', score)
else:
layername = sys.argv[4]
out = net.blobs[layername].data
print out
| [
"zhujun@bigdata-gpu-server24.xg01"
] | zhujun@bigdata-gpu-server24.xg01 |
7e1f5288db8d09ad2fb7d6daa8f7910887c01bb7 | b5c6fcff7d71970c5623773986a0b5f7da3e7ff5 | /manage.py | 5c87cb9bff1b33efb5e1c10f416c9b532d3b1951 | [] | no_license | MargaSandor/TimeSpeck | 2b5f2fdcbba3ec273dd5532ac63317c7690b4d42 | 2db3e2270b7af93bda4600cdabdd6dea86fefef0 | refs/heads/master | 2021-01-10T16:17:11.235281 | 2016-02-14T14:57:01 | 2016-02-14T14:57:01 | 51,696,924 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 254 | py | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "TimeSpeck.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| [
"Margareta.Sandor@icclowe.com"
] | Margareta.Sandor@icclowe.com |
ed6907e93a4cbb9e708a12f97eb53533aba7cde3 | 95560b466177a48b7a530fc83619851babfbd9d6 | /rotate_elements.py | ff3b4748b9f4db7c0d5ba684a6dd3b673fdb5ff3 | [] | no_license | br-anupama/vmware | aee888f301e9d77922a11b85d9f10d238910ca17 | 08df4fd63eed53b4d643b6c0cd4f940f8db4e13e | refs/heads/master | 2021-09-02T04:12:44.957528 | 2017-12-30T07:38:33 | 2017-12-30T07:38:33 | 115,680,050 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 801 | py | #!/usr/bin/python
import sys
def rotate_by_pos(list_elements, pos):
res = list_elements[pos:] + list_elements[:pos]
try:
print "Enter list elements"
elements = raw_input()
elements = [int(x) for x in elements.split()]
if not elements:
print "You have not entered list elements"
sys.exit(-1)
print "Entered elements = ", elements
print "Enter Rotating postion"
pos = raw_input()
if not pos :
print "postion is wrong"
sys.exit(-1)
pos = int(pos)
if pos > len(elements):
print "entered postion is larger than element size"
sys.exit(-1)
rotate_res = elements[pos:]+elements[:pos]
print "After rotate %s" %str(rotate_res)
except Exception, ex:
print "rotate_elements: Exeption: %s" %str(ex)
| [
"branupama@gmail.com"
] | branupama@gmail.com |
9d15d8f6e11e71cdaa1e1fd5f39f363fc13f5201 | e62167fa2622a8ddb19c7ea1b9586938c5b4ecfc | /core/migrations/0004_auto_20181028_2304.py | 2ffbb5a4f618d6bd34e8bf39fffcc1e5a598f777 | [] | no_license | lmillar2i2/misperris_entrega_2 | 4e50b5e7d4eed79b007aa85ba107ae7b153e661f | aa0029f328daf94d53dae02372633cfecec1875e | refs/heads/master | 2021-09-26T13:54:26.188344 | 2018-10-30T21:45:20 | 2018-10-30T21:45:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 396 | py | # Generated by Django 2.1.2 on 2018-10-29 02:04
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0003_auto_20181028_2123'),
]
operations = [
migrations.AlterField(
model_name='rescatado',
name='fotografia',
field=models.ImageField(upload_to='media/'),
),
]
| [
"l.millar@alumnos.duoc.cl"
] | l.millar@alumnos.duoc.cl |
d39cecf19b930a068268f3a8b8b196e83ee284fb | 4178af016a66e156666aadfeae3cd9d34bc1f00d | /ex12.py | a3c8456b96fd7d1559a431d35d7654542c976251 | [] | no_license | SandarAungMyint/python-exercise | a03a54dab4e38ccc93b04d1e5110c293c352140a | b27ac9866ceae885224d76f8a36646d4e3d71d69 | refs/heads/master | 2020-03-19T19:41:26.740212 | 2018-06-29T15:40:47 | 2018-06-29T15:40:47 | 136,869,654 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 208 | py | age = raw_input("How old are you? ")
height = raw_input("How tall are you? ")
weight = raw_input("How much do you weight? ")
print "So, you're %r old, %r tall and %r heavy." % (
age, height, weight)
| [
"sandar27141995@gmail.com"
] | sandar27141995@gmail.com |
2c2f5c4f8873a1160414558e7986756aa09544e4 | 9e3fa472d810db1e4aef30b01bc6a2c3ca518604 | /random_Tk.py | 8c85ffd1349b3a4bcf7a3902f0906549397fa27b | [] | no_license | Davit50/Python | c403e26a32ed9c595f9498b7bc77ee584962f0f8 | 0cbf52911dbdb51b60edfab01bfda99a4183e0f0 | refs/heads/main | 2023-01-20T06:47:01.507226 | 2022-07-14T22:57:11 | 2022-07-14T22:57:11 | 317,472,616 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 647 | py | import tkinter
from random import randint as r, choice
def draw(event):
if h > w:
a = r(1, w)
else:
a = r(1, h)
xy = (r(1, h - a), r(1, h - a))
size = (xy[0] + a, xy[1] + a)
i = '1234567890ABCDEF'
a1 = choice(i)
a2 = choice(i)
a3 = choice(i)
a4 = choice(i)
a5 = choice(i)
a6 = choice(i)
canvas.create_oval(xy, size, fill=f'#{a1}{a2}{a3}{a4}{a5}{a6}')
print(xy)
print(size)
h = 600
w = 600
master = tkinter.Tk()
canvas = tkinter.Canvas(master, bg='blue', height=h, width=w)
canvas.pack()
master.bind("<KeyPress>", draw)
master.mainloop()
| [
"noreply@github.com"
] | Davit50.noreply@github.com |
8f8808d79b13456226c20d29fa09308ae24382df | cdf23a2b22b0d0643f9bf48fd8c7d0a8ef83945d | /qstrader/utils/console.py | beee1fa020b1139e7988a543dd9ea3de95049652 | [
"MIT"
] | permissive | PabloHebe/qstrader | 2e23d267e0e2cf6632011eaea486891c8eed4c17 | 81c9473fbb782220c5cced2e331fb7a7b0b0082d | refs/heads/master | 2022-08-27T10:28:27.411188 | 2019-12-16T14:17:40 | 2019-12-16T14:17:40 | 111,547,620 | 0 | 1 | MIT | 2020-01-05T12:54:16 | 2017-11-21T12:42:55 | Python | UTF-8 | Python | false | false | 258 | py | BLACK, RED, GREEN, YELLOW, BLUE, MAGENTA, CYAN, WHITE = range(8)
def string_colour(text, colour=WHITE):
"""
Create string text in a particular colour to the terminal.
"""
seq = "\x1b[1;%dm" % (30 + colour) + text + "\x1b[0m"
return seq
| [
"mike@quarkgluon.com"
] | mike@quarkgluon.com |
12f347b969357c233fc6872ea78c1264340b83e1 | 151de3f2ed6569a21ba277c62c8f72f83b5fff09 | /combine.py | f82e3393e6a42429e10599c42d498b572aff36fd | [] | no_license | radar-lab/mmfall | d769ea13c96898d4c4afdd32e959d29da9376e1e | da8fa193d5641cdc2eca7f36498d1fd6e382f621 | refs/heads/master | 2022-08-11T07:48:30.537434 | 2022-07-27T21:29:59 | 2022-07-27T21:29:59 | 243,595,330 | 79 | 28 | null | null | null | null | UTF-8 | Python | false | false | 2,059 | py | #!/usr/bin/env python
# Author: Feng Jin
# Comments: Combine all the .npy files and the timesheet
import argparse
import numpy as np
import os
class file_preproc:
def __init__(self):
pass
def combiner(self, filedir):
self.filecnt = 0
self.filedir = filedir
self.total_pointcloud = []
self.total_frameidx = []
num_frames = 0
for self.file in os.listdir(self.filedir):
if self.file.endswith(".npy") and self.file != 'total_pointcloud.npy':
self.filecnt += 1
# Load the .npy file
pointcloud = np.load(self.filedir+self.file, allow_pickle=True)
self.total_pointcloud.extend(pointcloud)
if os.path.exists(self.filedir+self.file[:-4] + '.csv'): # Ground truth time index file exist
# Load the ground truth timesheet .csv file
gt_frameidx = np.genfromtxt(self.filedir+self.file[:-4] + '.csv', delimiter=',').astype(int)
self.total_frameidx.extend((np.array(gt_frameidx)+num_frames))
num_frames += len(pointcloud)
print('*************************************************')
print('Done. The number of total processed files are:' + str(self.filecnt))
self.total_pointcloud_path = str(os.path.join(self.filedir,'total_pointcloud'))
print('Total pointcloud files are combined into:' + str(self.total_pointcloud_path) + '.npy')
np.save(self.total_pointcloud_path, self.total_pointcloud)
print('Total ground truth timesheets are combined into:' + str(self.total_pointcloud_path) + '.csv')
np.savetxt(self.total_pointcloud_path+'.csv', self.total_frameidx, fmt='%i', delimiter=',')
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--filedir', type=str, default=None, help='Load which file. Default: None.')
args = parser.parse_args()
file_preproc().combiner(args.filedir) | [
"noreply@github.com"
] | radar-lab.noreply@github.com |
c7d99b683e6acbbe80cbc85721394ac0f1c7323f | f999bc5a6e0da4f0904ef2112d7b6191f180ca5b | /Advent of code/Day2_Part1.py | 44f5dafb0aa805206e823978d61b1740a82b147f | [] | no_license | ritesh-deshmukh/Algorithms-and-Data-Structures | 721485fbe91a5bdb4d7f99042077e3f813d177cf | 2d3a9842824305b1c64b727abd7c354d221b7cda | refs/heads/master | 2022-11-09T00:18:51.203415 | 2018-10-08T22:31:05 | 2018-10-08T22:31:05 | 132,504,988 | 0 | 1 | null | 2022-10-23T00:51:15 | 2018-05-07T19:07:33 | Python | UTF-8 | Python | false | false | 1,297 | py | # f = open("elves_input", "r")
# if f.mode == "r":
# input_task = f.read()
# input_task = f.readlines()
# for symbol in input_task:
# dimensions = symbol.split("x")
# print(dimensions)
with open('elves_input') as f:
dimensions_data = []
for line in f:
line = line.split('x') # to deal with blank
if line: # lines (ie skip them)
line = [int(i) for i in line]
dimensions_data.append(line)
# product = dimensions_data[0][0]
# print(dimensions_data[0])
total_area = 0
for dimensions in dimensions_data:
# sorted = sorted(dimensions)
# small_side_1 = sorted[0]
# small_side_2 = sorted[1]
area = ((2* dimensions[0] * dimensions[1])
+ (2* dimensions[1] * dimensions[2])
+ (2* dimensions[0] * dimensions[2]))
total_area += area
# print(sorted)
print(f"Area total: {total_area}")
total_small_side = 0
for dimensions1 in dimensions_data:
area1 = sorted(dimensions1)
# print(area1[0] * area1[1])
small_side = area1[0] * area1[1]
total_small_side += small_side
print(f"Small side total: {total_small_side}")
answer = total_area + total_small_side
print(f"Total Square feet: {answer}") | [
"riteshdeshmukh260@gmail.com"
] | riteshdeshmukh260@gmail.com |
1e68f4426a5b3835594ad8792a036f353f9b5734 | 32eba552c1a8bccb3a329d3d152b6b042161be3c | /15_pj_pdf_merger.py | d316f0b6a7a805701c4abd4debff148e5b564734 | [] | no_license | ilmoi/ATBS | d3f501dbf4b1099b76c42bead3ec48de3a935a86 | 7f6993751e2ad18af36de04168d32b049d85a9c1 | refs/heads/master | 2022-07-11T21:56:23.284871 | 2020-05-15T05:26:06 | 2020-05-15T05:26:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,486 | py | """Finds all pdfs in cur dir > sorts alphabetically > merges together taking the first page only once."""
import PyPDF2
import os
import re
# prep the files list
files = os.listdir()
chosen = []
r = re.compile(r'.*\.pdf')
for file in files:
try:
mo = r.search(file)
# print(mo.group())
chosen.append(mo.group())
except:
pass
chosen.sort()
# manually removing the encrypted file (cba)
chosen.pop(1)
chosen.pop(1)
print(chosen)
# create writer
writer = PyPDF2.PdfFileWriter()
# iterate through files and pages and write them all down
for i, file in enumerate(chosen):
with open(file, 'rb') as f:
reader = PyPDF2.PdfFileReader(f)
# for first doc - add the first page too
if i == 0:
pageObj = reader.getPage(0)
writer.addPage(pageObj)
# for all docs
for p in range(1, reader.numPages):
pageObj = reader.getPage(p)
writer.addPage(pageObj)
# finally write
# NOTE this one needs to sit inside of the with open statement or the pages will be blank!
with open('longfile.pdf', 'wb') as f:
writer.write(f)
# lets check number of pages matches
for file in chosen:
with open(file, 'rb') as f:
reader = PyPDF2.PdfFileReader(f)
print(reader.numPages)
print('compare that to ----->')
with open('longfile.pdf', 'rb') as f:
reader = PyPDF2.PdfFileReader(f)
print(reader.numPages)
# sounds correct!
| [
"iljamoisejevs@gmail.com"
] | iljamoisejevs@gmail.com |
c183494b02007e4d273abcee4d96e0eeedca656c | 2c25262a750b6225f7f9df004de94fb942dc2a41 | /jobportal/views.py | 5196cfd12de52e3aade2fd134bd0eaa330f8555d | [] | no_license | milansoriya/job-portal-project | 4abee165f7727db660fcb89eb9379a58b17f02df | 3b130ce7dcbd071ae45d9a97d3a6dec8c52b923b | refs/heads/master | 2023-01-03T18:11:23.117586 | 2020-11-06T05:17:58 | 2020-11-06T05:17:58 | 310,498,827 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 29,321 | py | from django.shortcuts import render,redirect
from .models import Job,JobQualification,JobApplication,JobSeekerList
from accounts.models import User_Employeer,User_Employee
from django.views.generic import ListView
from django.contrib import messages
from django.core.mail import send_mail
from telusko import settings
import csv
from django.http import HttpResponse
from django.core.files.storage import FileSystemStorage
from django.http import JsonResponse
from django.core import serializers
def about(request):
if request.session.get('user_id') :
user_count=User_Employee.objects.all().count()
company_count=User_Employeer.objects.all().count()
job_count=Job.objects.all().count()
user_name=request.session.get('user_name')
context={
'user_count':user_count,
'company_count':company_count,
'job_count':job_count,
'user_name':user_name
}
return render(request,"about.html",context)
else:
return redirect('/')
def about1(request):
if request.session.get('company_name') :
user_count=User_Employee.objects.all().count()
company_count=User_Employeer.objects.all().count()
job_count=Job.objects.all().count()
user_name=request.session.get('company_name')
context={
'user_count':user_count,
'company_count':company_count,
'job_count':job_count,
'user_name':user_name
}
return render(request,"about.html",context)
else:
return redirect('/')
def auto_complete(request):
if 'term' in request.GET:
#print("Hii in term")
qs=User_Employee.objects.filter(e_current_role__icontains=request.GET.get('term'))
roles=list()
for user in qs:
roles.append(user.e_current_role)
#print(roles)
return JsonResponse(roles,safe=False)
def jobFilter(request):
if request.session.get('user_id'):
jobType=request.GET.get('jobType')
Experience=request.GET.get('Experience')
Salary=request.GET.get('Salary')
if Experience=='all':
min=-1
max=-1
elif Experience=='0to5':
min=0
max=5
elif Experience=='5to10':
min=5
max=10
elif Experience=='10to15':
min=10
max=15
elif Experience=='15to20':
min=15
max=20
if Salary=='all':
mins=-1
maxs=-1
elif Salary=='0to3':
mins=0
maxs=300000
elif Salary=='3to6':
mins=300000
maxs=600000
elif Salary=='6to9':
mins=600000
maxs=900000
elif Salary=='9plus':
mins=900000
maxs=999999900000
if 'title' in request.session:
title=request.session['title']
if 'location' in request.session:
location=request.session['location']
if title!="" and location!="":
jobs=Job.objects.filter(j_title=title,j_location=location)
elif title!="" and location=="":
jobs=Job.objects.filter(j_title=title)
elif title=="" and location!="":
jobs=Job.objects.filter(j_location=location)
else:
jobs=Job.objects.all()
jobs1=[]
for job in jobs:
if jobType=='0':
jobs1.append(job)
if job.j_type==jobType:
jobs1.append(job)
jobs2=[]
for job in jobs1:
if Experience=='all':
jobs2.append(job)
if job.j_experience >=min and job.j_experience<=max:
jobs2.append(job)
jobs3=[]
for job in jobs2:
if Salary=='all':
jobs3.append(job)
if int(job.j_salary) >=mins and int(job.j_salary)<=maxs:
jobs3.append(job)
fjob=jobs3
job_company=[]
for job in fjob:
company=User_Employeer.objects.get(id=job.j_c_id_id)
job_company.append([job,company])
context={'object_list':job_company}
return render(request,'jobList.html',context)
else:
return redirect('/')
def jobseekerFilter(request):
if request.session.get('company_id'):
jobType=request.GET.get('jobType')
Experience=request.GET.get('Experience')
current_role=request.GET.get('current_role')
if Experience=='all':
min=-1
max=-1
elif Experience=='0to5':
min=0
max=5
elif Experience=='5to10':
min=5
max=10
elif Experience=='10to15':
min=10
max=15
elif Experience=='15to20':
min=15
max=20
if 'Qualification' in request.session:
Qualification=request.session['Qualification']
if 'location' in request.session:
location=request.session['location']
if Qualification!="" and location!="":
users=User_Employee.objects.filter(e_qualification=Qualification,e_city=location)
elif Qualification!="" and location=="":
users=User_Employee.objects.filter(e_qualification=Qualification)
elif Qualification=="" and location!="":
users=User_Employee.objects.filter(e_city=location)
else:
users=User_Employee.objects.all()
users1=[]
for user in users:
if jobType=='0':
users1.append(user)
if user.e_jobType==jobType:
users1.append(user)
users2=[]
for user in users1:
if Experience=='all':
users2.append(user)
if user.e_experience >=min and user.e_experience<=max:
users2.append(user)
users3=[]
for user in users2:
if current_role:
if user.e_current_role==current_role:
users3.append(user)
else:
users3.append(user)
fuser=users3
context={'object_list':fuser}
return render(request,'jobseekerList.html',context)
else:
return redirect('/')
class jobListView(ListView):
model = Job
template_name = 'jobList.html'
def get_queryset(self):
q=""
q1=""
q = self.request.GET.get('title')
q1 = self.request.GET.get('location')
self.request.session['title']=q
self.request.session['location']=q1
if q!="" and q1!="":
jobs=Job.objects.filter(j_title=q,j_location=q1)
elif q!="" and q1=="":
jobs=Job.objects.filter(j_title=q)
elif q=="" and q1!="":
jobs=Job.objects.filter(j_location=q1)
else:
jobs=Job.objects.all()
job_company=[]
for job in jobs:
company=User_Employeer.objects.get(id=job.j_c_id_id)
job_company.append([job,company])
return job_company
class JobseekerListView(ListView):
model = User_Employee
template_name = 'JobseekerList.html'
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
company=User_Employeer.objects.get(id=self.request.session.get('company_id'))
jobs=Job.objects.filter(j_c_id_id=company)
context['jobs'] = jobs
return context
def get_queryset(self):
q=""
q1=""
q = self.request.GET.get('Qualification')
q1 = self.request.GET.get('location')
self.request.session['Qualification']=q
self.request.session['location']=q1
if q!="" and q1!="":
users=User_Employee.objects.filter(e_qualification=q,e_city=q1)
elif q!="" and q1=="":
users=User_Employee.objects.filter(e_qualification=q)
elif q=="" and q1!="":
users=User_Employee.objects.filter(e_city=q1)
else:
users=User_Employee.objects.all()
return users
def sendOneMail(request,id):
if request.session.get('company_id'):
company=User_Employeer.objects.get(id=request.session.get('company_id'))
addlist=JobSeekerList.objects.filter(c_id_id=company.id)
users=[]
for u in addlist:
users.append(User_Employee.objects.get(id=u.e_id_id))
user=User_Employee.objects.get(id=id)
jobs=Job.objects.filter(j_c_id_id=company)
#for user in users:
subject = "Regarding vacancy at "+company.c_name
msg = "Dear "+user.e_first_name+" "+user.e_last_name+",\n Greetings from "+company.c_name+" we have opening in following jobs kindly visit following link.\n"
for job in jobs:
msg=msg+job.j_title+":-http://127.0.0.1:8000/jobs/"+str(job.id)+" \n"
msg=msg+"if you find it suitable reach out to us at "+company.c_email
to = user.e_email
res = send_mail(subject, msg, settings.EMAIL_HOST_USER, [to])
#print(res)
context={'users':users}
return render(request,"Employeer/interestList.html",context)
else:
return redirect('/')
def sendAllMail(request):
if request.session.get('company_id'):
company=User_Employeer.objects.get(id=request.session.get('company_id'))
addlist=JobSeekerList.objects.filter(c_id_id=company.id)
users=[]
for u in addlist:
users.append(User_Employee.objects.get(id=u.e_id_id))
jobs=Job.objects.filter(j_c_id_id=company)
for user in users:
subject = "Regarding vacancy at "+company.c_name
msg = "Dear "+user.e_first_name+" "+user.e_last_name+",\n Greetings from "+company.c_name+" we have opening in following jobs kindly visit following link.\n"
for job in jobs:
msg=msg+job.j_title+":-http://127.0.0.1:8000/jobs/"+str(job.id)+" \n"
msg=msg+"if you find it suitable reach out to us at "+company.c_email
to = user.e_email
res = send_mail(subject, msg, settings.EMAIL_HOST_USER, [to])
context={'users':users}
return render(request,"Employeer/interestList.html",context)
else:
return redirect('/')
def home(request):
if request.session.get('user_id'):
j=Job.objects.all()[:6]
jobs=[]
user=User_Employee.objects.get(id=request.session.get('user_id'))
for job in j:
company=User_Employeer.objects.get(id=job.j_c_id_id)
jobs.append([job,company])
user_count=User_Employee.objects.all().count()
company_count=User_Employeer.objects.all().count()
job_count=Job.objects.all().count()
return render(request,"home.html",{'jobs': jobs,'user':user,'user_count':user_count,'company_count':company_count,'job_count':job_count})
else:
return redirect('/')
def Company_home(request):
if request.session.get('company_id'):
j=User_Employee.objects.all()[:6]
return render(request,"Company_home.html",{'jobs_seekers': j})
else:
return redirect('/')
def UserProfile(request):
if request.session.get('user_id'):
if request.method=="POST":
user_image=request.FILES.get('user_image')
user_resume=request.FILES.get('user_resume')
user=User_Employee.objects.get(id=request.session.get('user_id'))
if user_image:
fs = FileSystemStorage(base_url="pics/employee",location="media/pics/employee")
filename = fs.save(user_image.name, user_image)
user.e_image=fs.url(filename)
if user_resume:
fs1 = FileSystemStorage(base_url="resumes",location="media/resumes")
filename1 = fs1.save(user_resume.name, user_resume)
user.e_resume=fs.url(filename1)
user.e_first_name=request.POST.get('first_name')
user.e_last_name=request.POST.get('last_name')
user.e_email=request.POST.get('email')
user.e_mobileno=request.POST.get('mobile_no')
user.e_username=request.POST.get('username')
user.e_qualification=request.POST.get('qualification')
user.e_current_role=request.POST.get('current_role')
if request.POST.get('jobType')=='Full Time':
user.e_jobType='1'
elif request.POST.get('jobType')=='Part Time':
user.e_jobType='2'
elif request.POST.get('jobType')=='Internship':
user.e_jobType='3'
user.e_experience=request.POST.get('experience')
user.e_add1=request.POST.get('house_no')
user.e_city=request.POST.get('city')
user.e_state=request.POST.get('state')
user.e_country=request.POST.get('country')
user.save()
return render(request,"UserProfile.html",{'user':user})
else:
user=User_Employee.objects.get(id=request.session.get('user_id'))
return render(request,"UserProfile.html",{'user':user})
else:
return redirect('/')
def UserDetail(request,id):
user=User_Employee.objects.get(id=id)
return render(request,"UserDetail.html",{'user':user})
def JobDetailsView(request,id):
if request.session.get('user_id'):
job=Job.objects.get(id=id)
company=User_Employeer.objects.get(id=job.j_c_id_id)
qualification=JobQualification.objects.all().filter(j_id=id)
if request.method=='POST':
user=User_Employee.objects.get(id=request.session.get('user_id'))
if JobApplication.objects.filter(j_id=job,e_id=user).exists():
messages.info(request,"Already Applied")
return render(request,"JobProfile.html",{'job':job,'company':company,'qualification':qualification})
else:
application=JobApplication(j_id=job,e_id=user)
application.save()
messages.info(request,"Successfully Applied")
return render(request,"JobProfile.html",{'job':job,'company':company,'qualification':qualification})
else:
return render(request,"JobProfile.html",{'job':job,'company':company,'qualification':qualification})
else:
return redirect('/')
def AddJob(request):
if request.session.get('company_id'):
if request.method == 'POST':
j_title=request.POST.get('j_title')
j_location=request.POST.get('j_location')
j_salary=request.POST.get('j_salary')
j_experience=request.POST.get('j_experience')
j_type=request.POST.get('j_type')
j_sort_description=request.POST.get('j_sort_description')
j_c_id_id=request.session.get('company_id')
#print(j_sort_description)
if j_type=='Full Time':
type=1
elif j_type=='Part Time':
type=2
elif j_type=='Internship':
type=3
#print(type)
job=Job(j_title=j_title,
j_location=j_location,
j_salary=j_salary,
j_experience=j_experience,
j_type=type,
j_sort_description=j_sort_description,
j_c_id_id=j_c_id_id)
job.save()
j_qualification=request.POST.get('j_qualification')
qualifications=j_qualification.splitlines()
#print(j_qualification)
for q in qualifications:
j_id=job.id
jq_qualification=q
qua=JobQualification(j_id=job,jq_qualification=jq_qualification)
qua.save()
#print('job added')
return render(request,"AddJob.html")
else:
return render(request,"AddJob.html")
else:
return redirect('/')
def Company_profile(request):
if request.session.get('company_id'):
if request.method=="POST":
company=User_Employeer.objects.get(id=request.session.get('company_id'))
c_logo=request.FILES.get('c_logo')
if c_logo:
fs = FileSystemStorage(base_url="pics/employeer",location="media/pics/employeer")
filename = fs.save(c_logo.name, c_logo)
company.c_name=request.POST.get('c_name')
company.c_email=request.POST.get('c_email')
company.c_contact=request.POST.get('c_contact')
company.c_username=request.POST.get('c_username')
company.c_website=request.POST.get('c_website')
company.c_add1=request.POST.get('c_add1')
company.c_city=request.POST.get('c_city')
company.c_state=request.POST.get('c_state')
company.c_country=request.POST.get('c_country')
company.save()
return render(request,"Employeer/Company_profile.html",{'company':company})
else:
company=User_Employeer.objects.get(id=request.session.get('company_id'))
return render(request,"Employeer/Company_profile.html",{'company':company})
else:
return redirect('/')
def Company_jobApplications(request):
if request.session.get('company_id'):
if request.method=='POST':
company=User_Employeer.objects.get(id=request.session.get('company_id'))
jobs=Job.objects.filter(j_c_id_id=company)
job=Job.objects.get(j_title=request.POST.get('joblist'),j_c_id_id=company)
request.session['selectedjob']=request.POST.get('joblist')
jobapp=JobApplication.objects.filter(j_id_id=job)
users=[]
for app in jobapp:
jobseeker=User_Employee.objects.get(id=app.e_id_id)
#print(jobseeker)
users.append(jobseeker)
return render(request,"Employeer/Company_jobApplications.html",{'jobs':jobs,'users':users,'sj':job})
else:
company=User_Employeer.objects.get(id=request.session.get('company_id'))
jobs=Job.objects.filter(j_c_id_id=company)
return render(request,"Employeer/Company_jobApplications.html",{'jobs':jobs})
else:
return redirect('/')
def deleteRequest(request,id):
if request.session.get('company_id'):
duser=User_Employee.objects.get(id=id)
company=User_Employeer.objects.get(id=request.session.get('company_id'))
jobs=Job.objects.filter(j_c_id_id=company)
title=request.session.get('selectedjob')
job=Job.objects.get(j_title=title,j_c_id_id=company)
application=JobApplication.objects.get(e_id_id=duser,j_id_id=job)
application.delete()
jobapp=JobApplication.objects.filter(j_id_id=job)
users=[]
for app in jobapp:
jobseeker=User_Employee.objects.get(id=app.e_id_id)
users.append(jobseeker)
return render(request,"Employeer/Company_jobApplications.html",{'jobs':jobs,'users':users,'sj':job})
else:
return redirect('/')
def sendmail(request,id):
if request.session.get('company_id'):
suser=User_Employee.objects.get(id=id)
company=User_Employeer.objects.get(id=request.session.get('company_id'))
jobs=Job.objects.filter(j_c_id_id=company)
title=request.session.get('selectedjob')
job=Job.objects.get(j_title=title,j_c_id_id=company)
subject = "JOB PORTAL"
msg = "Congratulations you select for the interview process"
to = suser.e_email
res = send_mail(subject, msg, settings.EMAIL_HOST_USER, [to])
application=JobApplication.objects.get(e_id_id=suser,j_id_id=job)
application.delete()
jobapp=JobApplication.objects.filter(j_id_id=job)
users=[]
for app in jobapp:
jobseeker=User_Employee.objects.get(id=app.e_id_id)
users.append(jobseeker)
return render(request,"Employeer/Company_jobApplications.html",{'jobs':jobs,'users':users,'sj':job})
else:
return redirect('/')
def download_csv(request):
if request.session.get('company_id'):
response = HttpResponse(content_type='text/csv')
response['Content-Disposition'] = 'attachment; filename="Applications.csv"'
writer = csv.writer(response)
writer.writerow(['Sr. No.','Job Title','First Name','Last Name','User Name','Email','Mobile No.','Qualification','Address 1','City','State','Country'])
company=User_Employeer.objects.get(id=request.session.get('company_id'))
title=request.session.get('selectedjob')
job=Job.objects.get(j_title=title,j_c_id_id=company)
i=1
jobapp=JobApplication.objects.filter(j_id_id=job)
for app in jobapp:
jobseeker=User_Employee.objects.get(id=app.e_id_id)
writer.writerow([i,title,jobseeker.e_first_name,jobseeker.e_last_name,jobseeker.e_username,jobseeker.e_email,jobseeker.e_mobileno,jobseeker.e_qualification,jobseeker.e_add1,jobseeker.e_city,jobseeker.e_state,jobseeker.e_country])
i=i+1
return response
else:
return redirect('/')
def UserjobApplications(request):
if request.session.get('user_id'):
user=User_Employee.objects.get(id=request.session.get('user_id'))
jobApp=JobApplication.objects.filter(e_id_id=user)
jobs=[]
for j in jobApp:
job=Job.objects.get(id=j.j_id_id)
company=User_Employeer.objects.get(id=job.j_c_id_id)
jobs.append([job,company])
#print(jobs)
return render(request,'UserjobApplications.html',{'jobs':jobs})
else:
return redirect('/')
def deleteApplication(request,id):
if request.session.get('usere_id'):
job=Job.objects.get(id=id)
duser=User_Employee.objects.get(id=request.session.get('user_id'))
application=JobApplication.objects.get(e_id_id=duser,j_id_id=job)
application.delete()
user=User_Employee.objects.get(id=request.session.get('user_id'))
jobApp=JobApplication.objects.filter(e_id_id=user)
jobs=[]
for j in jobApp:
job=Job.objects.get(id=j.j_id_id)
company=User_Employeer.objects.get(id=job.j_c_id_id)
jobs.append([job,company])
#print(jobs)
return render(request,'UserjobApplications.html',{'jobs':jobs})
else:
return redirect('/')
def user_download_csv(request):
if request.session.get('user_id'):
response = HttpResponse(content_type='text/csv')
response['Content-Disposition'] = 'attachment; filename="Applications.csv"'
writer = csv.writer(response)
writer.writerow(['Sr. No.','Job Title','company'])
user=User_Employee.objects.get(id=request.session.get('user_id'))
jobApp=JobApplication.objects.filter(e_id_id=user)
i=1
for j in jobApp:
job=Job.objects.get(id=j.j_id_id)
company=User_Employeer.objects.get(id=job.j_c_id_id)
writer.writerow([i,job.j_title,company.c_name])
i=i+1
return response
else:
return redirect('/')
def EditUserProfile(request):
if request.session.get('user_id'):
user=User_Employee.objects.get(id=request.session.get('user_id'))
return render(request,"EditUserProfile.html",{'user':user})
else:
return redirect('/')
def EditCompany_profile(request):
if request.session.get('company_id'):
company=User_Employeer.objects.get(id=request.session.get('company_id'))
return render(request,"Employeer/EditCompany_profile.html",{'company':company})
else:
return redirect('/')
def posted_job(request):
if request.session.get('company_id'):
if request.method=="POST":
id=request.session.get('job_id')
del request.session['job_id']
job=Job.objects.get(id=id)
job.j_title=request.POST.get('j_title')
job.j_location=request.POST.get('j_location')
job.j_salary=request.POST.get('j_salary')
job.j_experience=request.POST.get('j_experience')
job.j_sort_description=request.POST.get('j_sort_description')
if request.POST.get('j_type')=='Full Time':
job.j_type=1
elif request.POST.get('j_type')=='Part Time':
job.j_type=2
elif request.POST.get('j_type')=='Internship':
job.j_type=3
job.save()
j_qualification=request.POST.get('j_qualification')
qualifications=j_qualification.splitlines()
qual=JobQualification.objects.filter(j_id_id=id)
for q in qual:
q.delete()
for q in qualifications:
j_id=job.id
jq_qualification=q
qua=JobQualification(j_id=job,jq_qualification=jq_qualification)
qua.save()
company=User_Employeer.objects.get(id=request.session.get('company_id'))
jobs=Job.objects.filter(j_c_id_id=company)
return render(request,"Employeer/posted_job.html",{'jobs':jobs})
else:
company=User_Employeer.objects.get(id=request.session.get('company_id'))
jobs=Job.objects.filter(j_c_id_id=company)
return render(request,"Employeer/posted_job.html",{'jobs':jobs})
else:
return redirect('/')
def Edit_job(request):
if request.session.get('company_id'):
if request.method=="POST":
company=User_Employeer.objects.get(id=request.session.get('company_id'))
job=Job.objects.get(j_title=request.POST.get('joblist'),j_c_id_id=company)
request.session['job_id']=job.id
qual=JobQualification.objects.filter(j_id_id=job)
qualification=""
for q in qual:
qualification=qualification+"\n"+q.jq_qualification
return render(request,"Employeer/Edit_job.html",{'job':job ,'qualification':qualification})
else:
company=User_Employeer.objects.get(id=request.session.get('company_id'))
jobs=Job.objects.filter(j_c_id_id=company)
return render(request,"Employeer/posted_job.html",{'jobs':jobs})
else:
return redirect('/')
def addintolist(request,id):
if request.session.get('company_id'):
q=""
q1=""
q = request.session.get('Qualification')
q1 = request.session.get('location')
user=User_Employee.objects.get(id=id)
company=User_Employeer.objects.get(id=request.session.get('company_id'))
if JobSeekerList.objects.filter(e_id_id=user.id,c_id_id=company.id):
print("already added")
else:
addlist=JobSeekerList(e_id_id=user.id,c_id_id=company.id)
addlist.save()
#print("SAVE")
return redirect('/JobseekerList/?Qualification='+q+'&location='+q1)
else:
return redirect('/')
def interestList(request):
if request.session.get('company_id'):
company=User_Employeer.objects.get(id=request.session.get('company_id'))
addlist=JobSeekerList.objects.filter(c_id_id=company.id)
users=[]
for u in addlist:
users.append(User_Employee.objects.get(id=u.e_id_id))
return render(request,"Employeer/interestList.html",{'users':users})
else:
return redirect('/')
def removeFromList(request,id):
if request.session.get('company_id'):
company=User_Employeer.objects.get(id=request.session.get('company_id'))
addlist=JobSeekerList.objects.filter(c_id_id=company.id)
user=JobSeekerList.objects.get(e_id_id=id)
user.delete()
users=[]
for u in addlist:
users.append(User_Employee.objects.get(id=u.e_id_id))
return render(request,"Employeer/interestList.html",{'users':users})
else:
return redirect('/')
def download_list(request):
if request.session.get('company_id'):
response = HttpResponse(content_type='text/csv')
response['Content-Disposition'] = 'attachment; filename="Applications.csv"'
writer = csv.writer(response)
writer.writerow(['Sr. No.','First Name','Last Name','User Name','Email','Mobile No.','Qualification','Address 1','City','State','Country'])
company=User_Employeer.objects.get(id=request.session.get('company_id'))
addlist=JobSeekerList.objects.filter(c_id_id=company.id)
i=1
for u in addlist:
jobseeker=User_Employee.objects.get(id=u.e_id_id)
writer.writerow([i,jobseeker.e_first_name,jobseeker.e_last_name,jobseeker.e_username,jobseeker.e_email,jobseeker.e_mobileno,jobseeker.e_qualification,jobseeker.e_add1,jobseeker.e_city,jobseeker.e_state,jobseeker.e_country])
i=i+1
return response
else:
return redirect('/') | [
"58843519+parth101999@users.noreply.github.com"
] | 58843519+parth101999@users.noreply.github.com |
24837110275379698b9325d07e209cbb9b7b7609 | d4e9ef18d006b84f82f47c3ea791be3424ab3d63 | /code/sagepay/core.py | 2164a02b4669b718fbc9e27706824d40950b0e28 | [
"BSD-3-Clause"
] | permissive | udox/oscar-sagepay | 7b3d4a342c43021121cd0d26b7cf465f22bc7e7e | bc802ff67198a56b279d97538a31e6dedbbfe47f | refs/heads/master | 2021-01-24T22:59:29.781500 | 2013-12-03T14:47:07 | 2013-12-03T14:47:07 | 14,896,552 | 1 | 2 | null | null | null | null | UTF-8 | Python | false | false | 5,263 | py | class TransactionNotificationPostResponse(object):
"""
Encapsulate the notification of results of transactions into an object
(page 63 of sage manual)
:param response: content of the SagePay server notification post
:type response: dictionary
"""
def __init__(self, response):
self.response = response
def __getitem__(self, key):
return self.response[key]
def __contains__(self, key):
return key in self.response
def get(self, key, default):
"""
Return the corresponding value to the key or default if the key is not found
:param key: key to lookup
:type key: str
:param default: default value to return
:type default: everything
:returns: dictionary value or default
"""
try:
return self.response[key]
except KeyError:
return default
def post_format(self, vendor_name, security_key):
"""
Reconstruct the POST response content to be MD5 hashed and matched for preventing tampering
:param vendor_name: SagePay vendor name
:type vendor_name: str
:param security_key: security key saved associated to the transaction
:type security_key: :class:`sagepay.models.SagePayTransaction` security key field
:returns: str
"""
values = (
self.response.get('VPSTxId', ''),
self.response.get('VendorTxCode', ''),
self.response.get('Status', ''),
self.response.get('TxAuthNo', ''),
vendor_name,
self.response.get('AVSCV2', ''),
security_key.strip(),
self.response.get('AddressResult', ''),
self.response.get('PostCodeResult', ''),
self.response.get('CV2Result', ''),
self.response.get('GiftAid', ''),
self.response.get('3DSecureStatus', ''),
self.response.get('CAVV', ''),
#self.response.get('AddressStatus', ''),
#self.response.get('PayerStatus', ''),
self.response.get('CardType', ''),
self.response.get('Last4Digits', ''),
self.response.get('DeclineCode', ''),
self.response.get('ExpiryDate', ''),
#self.response.get('FraudResponse', ''),
self.response.get('BankAuthCode', ''),
)
return ''.join(values)
@property
def ok(self):
"""
True if the transaction status is ok
"""
if self.response['Status'] == 'OK':
return True
else:
return False
@property
def pending(self):
"""
True if the transaction status is pending
"""
if self.response['Status'] == 'PENDING':
return True
else:
return False
@property
def notauthed(self):
"""
True if the transaction status is notauthed
"""
if self.response['Status'] == 'NOTAUTHED':
return True
else:
return False
@property
def abort(self):
"""
True if the transaction status is abort
"""
if self.response['Status'] == 'ABORT':
return True
else:
return False
@property
def rejected(self):
if self.response['Status'] == 'REJECTED':
return True
else:
return False
@property
def authenticated(self):
"""
True if the transaction status is authenticated
"""
if self.response['Status'] == 'AUTHENTICATED':
return True
else:
return False
@property
def registered(self):
"""
True if the transaction status is registered
"""
if self.response['Status'] == 'REGISTERED':
return True
else:
return False
@property
def error(self):
"""
True if the transaction status is error
"""
if self.response['Status'] == 'ERROR':
return True
else:
return False
class Response(object):
"""
Encapsulate SagePay response into a Python object
:param response: :class:`requests.Response` instance
"""
def __init__(self, response):
self.response = response
self.data = self._convert_data(response)
def _convert_data(self, response):
sage_response = {}
for i in response.split('\n'):
line = i.split('=')
if 'NextURL' in line[0]:
sage_response[line[0]] = '%s=%s' % (line[1].strip(), line[2].strip())
else:
sage_response[line[0]] = line[1].strip()
return sage_response
def __getitem__(self, key):
return self.data[key]
def __contains__(self, key):
return key in self.data
def __str__(self):
return self.__unicode__()
def __unicode__(self):
return self.response
@property
def is_successful(self):
"""
Check if the status of the response is OK
:returns: Boolean
"""
if 'OK' in self.data['Status']:
return True
else:
return False
| [
"alessandro@u-dox.com"
] | alessandro@u-dox.com |
e5917255d85b4af9daecc23c83518e37d1f27d7e | e5c4e7dba54860f5f7b46817490b957181a7029e | /desafiocumplo/indices/urls.py | 21ce7287f83c89c27c54f379d5fc567cecbb6dc4 | [] | no_license | tdiazv/desafio_cumplo | 52aa34339eaf45db320a51dcea7ae93e5f9d3b20 | 641b852457eaac123a9cc11f482afaa2ef08a827 | refs/heads/master | 2021-09-28T08:25:02.148041 | 2018-11-16T04:02:07 | 2018-11-16T04:02:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 112 | py | from django.urls import path
from . import views
urlpatterns = [
path('', views.indices, name="indices"),
] | [
"tdiaz81@gmail.com"
] | tdiaz81@gmail.com |
1934bc89ca74f80dae068ca08e4b567549aa20fa | 67d1190219feca6bb79e0459e33904e8210f12df | /realtors/migrations/0001_initial.py | 0f117f139853ae7a205f2245c175bf98d31b0302 | [] | no_license | Rushik-Gohel/btre_project | a0f2ade5c109b3f0b11af1d79e9b885cd764592d | 4168f5c2e13fcd8af1107fba9ed136b79bb589ee | refs/heads/master | 2022-12-31T11:27:40.909274 | 2020-10-28T13:30:00 | 2020-10-28T13:30:00 | 308,024,966 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 922 | py | # Generated by Django 3.1.2 on 2020-10-22 13:40
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Realtor',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=200)),
('photo', models.ImageField(upload_to='photos/%Y/%m/%d/')),
('description', models.TextField(blank=True)),
('phone', models.CharField(max_length=20)),
('email', models.CharField(max_length=50)),
('is_mvp', models.BooleanField(default=False)),
('hire_date', models.DateTimeField(blank=True, default=datetime.datetime.now)),
],
),
]
| [
"gohel.rushik.btech2018@sitpune.edu.in"
] | gohel.rushik.btech2018@sitpune.edu.in |
48ddb464174cc5846a09b6ed05567ec728d2e200 | 6e26b72ac6774bdb25f71d0e6f428bd84effb73f | /fizzbuzz/fizzbuzz.py | 61c2b5edafed5711e7296e8ac6bea222a768493e | [] | no_license | SamuelEllertson/various-projects | 17dc80a09de0d895da14f69eb0bf98ff55f6667e | dcd89cb2500e32a08afb8665ff096562d4d66f2c | refs/heads/master | 2021-08-04T07:05:01.729624 | 2020-07-25T22:55:42 | 2020-07-25T22:55:42 | 201,556,239 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 938 | py | from collections import OrderedDict
def main():
for i in range(20):
print(fizzbuzz2(i))
def fizzbuzz(n):
#Add custom condition functions here
def divisible(n, x):
return lambda: n % x == 0
#add conditions here
conditions = [
("fizz", divisible(n, 3)),
("buzz", divisible(n, 5))
]
#business logic, no modifications necessary
workingList = []
for key, condition in conditions:
if condition():
workingList.append(key)
return "".join(workingList) or n
def fizzbuzz2(n):
conditions = OrderedDict()
#add conditions here
conditions["fizz"] = 3
conditions["buzz"] = 5
#business logic, no modifications necessary
workingList = []
for string, value in conditions.items():
if n % value == 0:
workingList.append(string)
return "".join(workingList) or n
if __name__ == '__main__':
main() | [
"samuelEllertson@hotmail.com"
] | samuelEllertson@hotmail.com |
9dedd846ed49f891c3ea2109f26b3eed81fcdf88 | 320bf3ddd6233577d9f2f08f046eaef96f881e4e | /simplemooc/core/urls.py | eb0570de064c9f271570646c26f555b2bce99b28 | [
"MIT"
] | permissive | leorzz/simplemooc | 057ba3e220c20907017edfd8d0fc0422f9a6d99c | 8b1c5e939d534b1fd729596df4c59fc69708b896 | refs/heads/master | 2022-10-22T02:24:46.733062 | 2017-12-17T16:37:04 | 2017-12-17T16:37:04 | 112,488,280 | 0 | 1 | MIT | 2022-10-08T17:50:17 | 2017-11-29T14:52:23 | Python | UTF-8 | Python | false | false | 523 | py | from django.conf.urls import include, url
from django.contrib import admin
admin.autodiscover()
import simplemooc.core.views
urlpatterns = [
url(r'^$', simplemooc.core.views.home, name='home'),
url(r'^contact/$',simplemooc.core.views.contact, name='contact'),
url(r'^about/$',simplemooc.core.views.about, name='about'),
]
#urlpatterns = patterns('simplemooc.core.views',
# url(r'^$','home', name='home'),
# url(r'^contact/$','contact', name='contact'),
# url(r'^about/$','about', name='about'),
#)
| [
"rizzi.leo@gmail.com"
] | rizzi.leo@gmail.com |
967134ceb03da771c4b132732b27fe8a44f308f0 | a41d9d15f5a91565ee513e0782081371df4607a4 | /lesson_002/00_distance.py | d5a3172e9b34c47f1119d8c32ba0d74c8b8c6364 | [] | no_license | lalecsey/python_base | 0591e16ba0a880660d2fb1ae7abde33192de5695 | e94efdfd43a7583ca7dc13eb5063cffc58c286ae | refs/heads/master | 2022-11-26T09:50:52.616777 | 2020-07-29T21:00:56 | 2020-07-29T21:00:56 | 258,837,323 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,051 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Есть словарь координат городов
import pprint
sites = {
'Moscow': (550, 370),
'London': (510, 510),
'Paris': (480, 480),
}
# Составим словарь словарей расстояний между ними
# расстояние на координатной сетке - корень из (x1 - x2) ** 2 + (y1 - y2) ** 2
moscow = sites['Moscow']
london = sites['London']
paris = sites['Paris']
moscow_london = ((moscow[0] - london[0]) ** 2 + (moscow[1] - london[1]) ** 2)) ** 0.5
moscow_paris = ((moscow[0] - paris[0]) ** 2 + (moscow[1] - paris[1]) ** 2)) ** 0.5
london_paris = ((london[0] - paris[0]) ** 2 + (london[1] - paris[1]) ** 2)) ** 0.5
distances = {}
distances['Moscow']['London'] = moscow_london
distances['Moscow']['paris'] = moscow_paris
distances['London']['Moscow'] = moscow_london
distances['London']['Paris'] = london_paris
distances['paris']['Moscow'] = moscow_paris
distances['paris']['London'] = london_paris
print(distances)
| [
"lalecsey@gmail.com"
] | lalecsey@gmail.com |
4cc3f07242bbd0aabdbe930007f5dd2f5d588ef5 | 908bfbb0f4ac863a55585783c48efd5583ae52a2 | /oodd/layers/deterministic/residual.py | a524c227514d713ca0b0a0f13d20272eec7caeee | [] | no_license | JakobHavtorn/hvae-oodd | 021f2a1ceb4489a4ac7c70087ce09068d9a5098b | 8aff1e258963aee59256b82d67634304bb24f628 | refs/heads/main | 2023-04-09T08:50:17.285010 | 2022-01-18T14:19:19 | 2022-01-18T14:19:19 | 373,464,051 | 23 | 4 | null | 2022-01-18T11:38:19 | 2021-06-03T10:11:11 | Python | UTF-8 | Python | false | false | 9,343 | py | from typing import *
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from .base_module import DeterministicModule
from ..convolutions import SameConv2dWrapper, TransposeableNormedSameConv2d
class ResBlockConv2d(DeterministicModule):
def __init__(
self,
in_shape: Tuple,
kernel_size: int,
out_channels: int = None,
stride: int = 1,
aux_shape: Optional[Tuple] = None,
downsampling_mode: str = "convolutional",
upsampling_mode: str = "convolutional",
transposed: bool = False,
residual: bool = True,
weightnorm: bool = True,
gated: bool = True,
activation: nn.Module = nn.ReLU,
dropout: Optional[float] = None,
):
"""A Gated Residual Network with stride and transposition, auxilliary input merging, weightnorm and dropout.
Args:
in_shape (tuple): input tensor shape (B x C x *D)
out_channels (int): number of out_channels in convolution output
kernel_size (int): size of convolution kernel
stride (int): size of the convolution stride
aux_shape (tuple): auxiliary input tensor shape (B x C x *D). None means no auxialiary input
transposed (bool): transposed or not
residual (bool): use residual connections
weightnorm (bool): use weight normalization
activation (nn.Module): activation function class
dropout (float): dropout value. None is no dropout
"""
super().__init__(in_shape=in_shape, transposed=transposed, residual=residual, aux_shape=aux_shape)
# some parameters
self.channels_in = in_shape[0]
self.channels_out = out_channels
self.kernel_size = kernel_size
self.stride = stride
self.resample_mode = upsampling_mode if transposed else downsampling_mode
self.transposed = transposed
self.residual = residual
self.gated = gated
self.activation_pre = activation() if self.residual else None
# first convolution is always non-transposed and stride 1
self.conv1 = TransposeableNormedSameConv2d(
in_shape=in_shape,
out_channels=out_channels,
kernel_size=kernel_size,
stride=1,
transposed=False,
resample_mode="convolutional",
weightnorm=weightnorm,
)
# aux op
if aux_shape is not None:
self.activation_aux = activation()
if list(aux_shape[1:]) > list(self.conv1.out_shape[1:]):
# Downsample height and width (and match channels)
aux_stride = tuple(np.asarray(aux_shape[1:]) // np.asarray(self.conv1.out_shape[1:]))
self.aux_op = TransposeableNormedSameConv2d(
in_shape=aux_shape,
out_channels=self.conv1.out_shape[0],
kernel_size=kernel_size,
stride=aux_stride,
transposed=False,
resample_mode=self.resample_mode,
weightnorm=weightnorm,
)
elif list(aux_shape[1:]) < list(self.conv1.out_shape[1:]):
# Upsample height and width (and match channels)
aux_stride = tuple(np.asarray(self.conv1.out_shape[1:]) // np.asarray(aux_shape[1:]))
self.aux_op = TransposeableNormedSameConv2d(
in_shape=aux_shape,
out_channels=self.conv1.out_shape[0],
kernel_size=kernel_size,
stride=aux_stride,
transposed=True,
resample_mode=self.resample_mode,
weightnorm=weightnorm,
)
elif aux_shape[0] != self.conv1.out_shape[0]:
# Change only channels using 1x1 convolution
self.aux_op = TransposeableNormedSameConv2d(
in_shape=aux_shape,
out_channels=self.conv1.out_shape[0],
kernel_size=1,
stride=1,
transposed=False,
resample_mode=self.resample_mode,
weightnorm=weightnorm,
)
else:
# aux_shape and out_shape are the same
assert aux_shape == self.conv1.out_shape
self.aux_op = None
else:
self.aux_op = None
self.activation_mid = activation()
# dropout
self.dropout = nn.Dropout(dropout) if dropout else dropout
# second convolution is potentially transposed and potentially resampling
gated_channels = 2 * out_channels if self.gated else out_channels
self.conv2 = TransposeableNormedSameConv2d(
in_shape=self.conv1.out_shape,
out_channels=gated_channels,
kernel_size=kernel_size,
stride=self.stride,
weightnorm=weightnorm,
transposed=transposed,
resample_mode=self.resample_mode,
) # doubled out channels for gating
# output shape
self._out_shape = (out_channels, *self.conv2.out_shape[1:]) # always out_channels regardless of gating
# residual connections
self.residual_op = ResidualConnectionConv2d(self._in_shape, self._out_shape, residual)
def forward(self, x: torch.Tensor, aux: Optional[torch.Tensor] = None, **kwargs: Any) -> torch.Tensor:
# input activation: x = activation(x)
x_act = self.activation_pre(x) if self.residual else x
# conv 1: y = conv(x)
y = self.conv1(x_act)
# merge aux with x: y = y + f(aux)
y = y + self.aux_op(self.activation_aux(aux)) if self.aux_op is not None else y
# y = activation(y)
y = self.activation_mid(y)
# dropout
y = self.dropout(y) if self.dropout else y
# conv 2: y = conv(y)
y = self.conv2(y)
# gate: y = y_1 * sigmoid(y_2)
if self.gated:
h_stack1, h_stack2 = y.chunk(2, 1)
sigmoid_out = torch.sigmoid(h_stack2)
y = h_stack1 * sigmoid_out
# resiudal connection: y = y + x
y = self.residual_op(y, x)
return y
class ResidualConnectionConv2d(nn.Module):
"""
Handles residual connections for tensors with different shapes.
Apply padding and/or avg pooling to the input when necessary
"""
def __init__(self, in_shape, out_shape, residual=True):
"""
args:
in_shape (tuple): input module shape x
out_shape (tuple): output module shape y=f(x)
residual (bool): apply residual conenction y' = y+x = f(x)+x
"""
super().__init__()
self.residual = residual
self.in_shape = in_shape
self.out_shape = out_shape
is_1d = len(in_shape) == 2
# residual: channels
if residual and self.out_shape[0] < self.in_shape[0]:
# More channels in input than output: Simply remove as many as needed
pad = int(self.out_shape[0]) - int(self.in_shape[0])
self.residual_padding = [0, 0, 0, pad] if is_1d else [0, 0, 0, 0, 0, pad]
elif residual and self.out_shape[0] > self.in_shape[0]:
# Fewer channels in the input than output: Padd zero channels onto input
pad = int(self.out_shape[0]) - int(self.in_shape[0])
self.residual_padding = [0, 0, 0, pad] if is_1d else [0, 0, 0, 0, 0, pad]
# warnings.warn(
# "The input has fewer feature maps than the output. "
# "There will be no residual connection for this layer: "
# f"{in_shape=}, {out_shape=}"
# )
# self.residual = False
else:
self.residual_padding = None
# residual: height and width
if residual and list(out_shape)[1:] < list(in_shape)[1:]:
# Smaller hieight/width in output than input
pool_obj = nn.AvgPool1d if len(out_shape[1:]) == 1 else nn.AvgPool2d
stride = tuple((np.asarray(in_shape)[1:] // np.asarray(out_shape)[1:]).tolist())
self.residual_op = SameConv2dWrapper(in_shape, pool_obj(3, stride=stride))
elif residual and list(out_shape)[1:] > list(in_shape)[1:]:
# Larger height/width in output than input
# warnings.warn(
# "The height and width of the output are larger than the input. "
# "There will be no residual connection for this layer: "
# f"{in_shape=}, {out_shape=}"
# )
self.residual_op = nn.Upsample(size=self.out_shape[1:], mode="nearest")
self.residual = False
else:
self.residual_op = None
def forward(self, y, x):
if not self.residual:
return y
x = F.pad(x, self.residual_padding) if self.residual_padding is not None else x
x = self.residual_op(x) if self.residual_op is not None else x
return y + x
def __repr__(self):
residual = self.residual
residual_padding = self.residual_padding
return f"ResidualConnectionConv2d({residual=}, {residual_padding=})"
| [
"jdh@corti.ai"
] | jdh@corti.ai |
fab770eb4d1f763e13b1b76d2afa8e6ba0c43d3f | bd238aa1ef55a731b55431f596f7d06d88188aec | /latihansql.py | 708dad61c6a2cb2fc7f16728cb128a136a350054 | [] | no_license | ilman79/Tugas_DSU | aa3ecaa1b43b3497c12faf5668ed66a5ea260005 | aabfffbb94290126c0b5601f201a7533b8cfe92e | refs/heads/main | 2023-05-30T18:47:37.738360 | 2021-07-02T13:35:42 | 2021-07-02T13:35:42 | 382,356,326 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,847 | py | # nomor 1
print("===============================================")
print('NOMOR 1')
print("===============================================")
import psycopg2
#establishing the connection
try :
conn = psycopg2.connect(
database="latihan", user='postgres', password='easy',
host='localhost', port= '5433'
)
print('sukses')
except :
print('gagal')
cursor = conn.cursor()
cursor.execute("SELECT * FROM _offices ORDER BY country, state, city")
rows =cursor.fetchall()
print(rows)
print("===============================================")
print('NOMOR 2')
print("===============================================")
# nomor 2
cursor.execute("SELECT customernumber FROM _customers")
total_compeny = cursor.fetchall()
print(len(total_compeny))
print("===============================================")
print('NOMOR 3')
print("===============================================")
# nomor 3
cursor.execute("SELECT sum (amount) FROM _payments")
price = cursor.fetchall()
print(price)
print("===============================================")
print("NOMOR 4")
print("===============================================")
# nomor 4
cursor.execute("SELECT productline FROM _productlines WHERE productline @@ to_tsquery('Cars') ")
cars = cursor.fetchall()
print(cars)
print("===============================================")
print("NOMOR 5")
print("===============================================")
# nomor 5
cursor.execute("SELECT sum(amount) FROM _payments WHERE paymentdate @@ to_tsquery('2004-10-28')")
tot_okt = cursor.fetchall()
print(tot_okt)
print("===============================================")
print("NOMOR 6")
print("===============================================")
# nomor 6
cursor.execute("SELECT amount FROM _payments WHERE amount > 100000")
pay_greater = cursor.fetchall()
print(pay_greater)
print("===============================================")
print("NOMOR 7")
print("===============================================")
# nomor 7
cursor.execute("SELECT productline FROM _products ")
prod_line = cursor.fetchall()
print(prod_line)
print("===============================================")
print("NOMOR 8")
print("===============================================")
# nomor 8
cursor.execute("SELECT count(distinct productline) FROM _products ")
jenis = cursor.fetchall()
print(jenis)
print("===============================================")
print("NOMOR 9")
print("===============================================")
# nomor 9
cursor.execute("SELECT min(amount) FROM _payments")
minimum = cursor.fetchall()
print(minimum)
print("===============================================")
print("NOMOR 10")
print("===============================================")
# nomor 10
cursor.execute("SELECT (customernumber,checknumber) FROM _payments WHERE amount > 5000")
pay_5000 = cursor.fetchall()
print(pay_5000)
| [
"gifariilman79@gmail.com"
] | gifariilman79@gmail.com |
c9d370f3a6c1c0789eba572688667183135b18a1 | 67bc56df690a45d8c575087336dead5238e6e41f | /todos/.~c9_invoke_WcXKvD.py | 1f79ad05b1d4c1f3c268d08130cac9180d109090 | [] | no_license | ArturoGarciaRegueiro/caso-practico-1 | 9e032c9ee224a1ad27209aaf3d771b5ae79fc8b6 | 40b07f80043f7fc084ce6939df195804e6dce789 | refs/heads/main | 2023-02-07T13:07:35.116897 | 2021-01-03T20:00:28 | 2021-01-03T20:00:28 | 326,159,725 | 0 | 0 | null | 2021-01-03T20:00:29 | 2021-01-02T10:36:39 | Python | UTF-8 | Python | false | false | 953 | py | import os
import json
import base64
from todos import decimalencoder
import boto3
dynamodb = boto3.resource('dynamodb')
def translate(event, context):
table = dynamodb.Table(os.environ['DYNAMODB_TABLE'])
# fetch todo from the database
result = table.get_item(
Key={
'id': event['pathParameters']['id']
}
)
# create a response
entry = {
"statusCode": 200,
"body": json.dumps(result['Item'],
cls=decimalencoder.DecimalEncoder)
}
comprehend = boto3.client(service_name='comprehend', region_name='region')
text = "It is raining today in Seattle"
print('Calling DetectDominantLanguage')
print(json.dumps(comprehend.detect_dominant_language(Text = text), sort_keys=True, indent=4))
print("End of DetectDominantLanguage\n")
response = {
"statusCode": 200,
"body": "HY"
}
return response | [
"ec2-user@ip-172-31-69-206.ec2.internal"
] | ec2-user@ip-172-31-69-206.ec2.internal |
32d737cebe0b547f8e878688dbf6cd4cf7d87b91 | 63cccc5d4e462bb764b6fccd6ae42405d3620995 | /application/views.py | 9d3f934835215935f93ac2eb0c04a17d7658a23a | [] | no_license | millalin/Kids-Say-the-Darndest-Things | acc13178bd4e179851df27328bdf32bc47719b25 | 06fd7bd9aa61fb2fd70c3f8b5a64f68f73315f6e | refs/heads/master | 2023-03-23T01:57:45.382896 | 2023-03-06T12:49:21 | 2023-03-06T12:49:21 | 198,590,952 | 1 | 0 | null | 2021-03-20T01:27:07 | 2019-07-24T08:21:44 | Python | UTF-8 | Python | false | false | 133 | py | from flask import render_template
from application import app
@app.route("/")
def index():
return render_template("index.html")
| [
"milla.lintunen@hotmail.com"
] | milla.lintunen@hotmail.com |
00def0089a0884f90a9b4480a924f1d5219455ca | b902f5bbbd19660e6bb52f3595daf4126a759cd0 | /abstractGraph.py | 39246dcfe6f6550963ea9176c82fb2701df6c8e3 | [] | no_license | Sp1keeeee/MaMadroid | 0ce85afb2019c45464c79a27f519a7e2cee137fe | a80a619bbcccacc7fdd22b9cd1d4d281b28d72ed | refs/heads/master | 2023-03-21T19:22:45.660287 | 2021-03-17T06:29:46 | 2021-03-17T06:29:46 | 348,250,912 | 8 | 1 | null | null | null | null | UTF-8 | Python | false | false | 5,600 | py | #coding=utf-8
'''
info: 把API调用抽象为class和family模式,主要过程是首先通过class.txt把每个文件调用图对应的txt文件抽象成class然后通过Packages.txt和Families.txt将其抽象为包和家族文件存放到package和family文件
夹中对应的文件中
'''
import os
from multiprocessing import Process
def _preprocess_graph(app, _dir):
''' gets and clean the callers and callees'''
appl = app.split("/")[-1] #appl:当前目录下的一个临时文件
with open(appl, 'w') as fp:
with open(app) as fh:
for lines in fh:
# 例如某一行为<com.gionee.account.sdk.GioneeAccount: Z isTnLogin()> ==> ['<com.gionee.account.sdk.GioneeAccount: Ljava/lang/String; getUsername()>\n', '<android.text.TextUtils: isEmpty(Ljava/lang/CharSequence;)>\n']
caller = ""
callee = []
line = lines.split(" ==> ")#将调用者 与 被调用者分开
caller = line[0].split(":")[0].replace("<", "")#取调用者的class名
if "," in str(line[1]): # 被调用者存在多个
subc = line[1].split("\\n',")#将多个被调者分开
for i in subc:
subCallees = i.split(":") #提取被调用者中的class
if "[" in subCallees[0]: #处理后放入callee中
callee.append(subCallees[0].replace("['<", "").strip())
else:
callee.append(subCallees[0].replace("'<", "").strip())
else: #只存在一个被调用者
callee.append(line[1].split(":")[0].replace("['<", "").strip())
fp.write(caller + "\t") #调用者写入临时文件
_length = len(callee)
for a in range(_length): #将被调用者写入临时文件
if a < _length - 1:
fp.write(str(callee[a]).strip('"<') + "\t")
else:
fp.write(str(callee[a]).strip('"<') + "\n")
selfDefined(appl, _dir)
def selfDefined(f, _dir): #f:包含调用者和被调用者的临时文件 _dir:当前目录文件
''' calls all three modes of abstraction '''
Package = []
Family = []
Class = []
#将自定义的包、家族以及类加入到上面的数组中
with open("Packages.txt") as fh:
for l in fh:
if l.startswith('.'):
Package.append(l.strip('\n').lstrip('.'))
else:
Package.append(l.strip('\n').strip())
with open("Families.txt") as fh:
for l in fh:
Family.append(l.strip('\n').strip())
with open("classes.txt") as fh:
for l in fh:
Class.append(l.strip('\n').strip())
ff = abstractToClass(Class, f, _dir) #ff为提取的class的文件
os.remove(f)#删除临时文件
Package.reverse()
fam = Process(target = abstractToMode, args=(Family, ff, _dir))
fam.start()
pack = Process(target=abstractToMode, args=(Package, ff, _dir))
pack.start()
pack.join()
def _repeat_function(lines, P, fh, _sep): #lines:处理过后的对应文件中每一行包含的每一个数据 P:自定义的class文件(相当于一个名单)fh:某一个APP对应的class文件夹中的文件 _sep:制表符
if lines.strip() in P: #如果在名单中写入class文件夹中的文件
fh.write(lines.strip() + _sep)
else: #如果不在名单中
if "junit." in lines: #对一些特殊字符串的处理
return
if '$' in lines:
if lines.replace('$', '.') in P:
fh.write(lines.replace('$', '.') + _sep)
return
elif lines.split('$')[0] in P:
fh.write(lines.split('$')[0] + _sep)
return
items = lines.strip().split('.')
item_len = len(items)
count_l = 0
for item in items:
if len(item) < 3:
count_l += 1
if count_l > (item_len / 2):#字符小于3个的大于整体个数的二分之一 就认定为混淆
fh.write("obfuscated" + _sep)
else:
fh.write("self-defined" + _sep) #否则为自定义
def abstractToClass(_class_whitelist, _app, _dir):#_class_whitelist:自定义的class文件 _app:包含调用者和被调用者的临时文件 _dir:当前目录文件
''' abstracts the API calls to classes '''
newfile = _dir + "/class/" + _app.split('/')[-1]
with open(newfile, 'w') as fh:
with open(_app) as fp:
for line in fp:
lines = line.strip('\n').split('\t')
lines = [jjj for jjj in lines if len(jjj) > 1] # ensures each caller or callee is not a single symbol e.g., $
num = len(lines)
for a in range(num): #将得到的class写入class文件夹下的文件中
if a < num - 1:
_repeat_function(lines[a], _class_whitelist, fh, "\t")
else:
_repeat_function(lines[a], _class_whitelist, fh, "\n")
return newfile
def abstractToMode(_whitelist, _app, _dir): #_whitelist:自定义的名单 _app:抽象的class文件 _dir:当前文件目录
''' abstracts the API calls to either package or family '''
dico = {"org.xml": 'xml', "com.google":'google', "javax": 'javax', "java": 'java', "org.w3c.dom": 'dom', "org.json": 'json',\
"org.apache": 'apache', "android": 'android', "dalvik": 'dalvik'}
family = False
if len(_whitelist) > 15: #通过名单长度判断是family模式还是package模式 然后在对应文件夹创建对应APP对应模式的文件
newfile = _dir + "/package/" + _app.split('/')[-1]
else:
newfile = _dir + "/family/" + _app.split('/')[-1]
family = True
with open(newfile, 'w') as fh:
with open(_app) as fp:
for line in fp:
lines = line.strip('\n').split('\t')
for items in lines:
if "obfuscated" in items or "self-defined" in items:
fh.write(items + '\t')
else:
for ab in _whitelist:
if items.startswith(ab):#通过startwith进行判断
if family: # if True, family, otherwise, package
fh.write(dico[ab] + '\t')
else:
fh.write(ab + '\t')
break
fh.write('\n')
| [
"2679076617@qq.com"
] | 2679076617@qq.com |
fdd3840ef0244ac6f5ad142757b5b277fb14263f | dd6c468d4ea2653fed24607ecd4f6eceb925b557 | /make_db_shelve.py | f3b11537a72108230a45ffaf43a0139e5eaf9023 | [] | no_license | yangbaoguo1314/python | 3015ea5ec4d559276fddd14f2e9dee5b69f0f344 | 91ea00844d81ba2efd8fcbfb2852d51cdf3900a1 | refs/heads/master | 2021-07-10T20:01:08.908083 | 2020-07-04T10:29:52 | 2020-07-04T10:29:52 | 160,601,425 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 120 | py | from initdata import bob,sue
import shelve
db=shelve.open('people-shelve')
db['bob']=bob
db['sue'] = sue
db.close() | [
"noreply@github.com"
] | yangbaoguo1314.noreply@github.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.