seq_id stringlengths 4 11 | text stringlengths 113 2.92M | repo_name stringlengths 4 125 ⌀ | sub_path stringlengths 3 214 | file_name stringlengths 3 160 | file_ext stringclasses 18
values | file_size_in_byte int64 113 2.92M | program_lang stringclasses 1
value | lang stringclasses 93
values | doc_type stringclasses 1
value | stars int64 0 179k ⌀ | dataset stringclasses 3
values | pt stringclasses 78
values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
15807183248 | # --------------------------------------------------------
# PYTHON PROGRAM
# Here is where we are going to define our set of...
# - Imports
# - Global Variables
# - Functions
# ...to achieve the functionality required.
# When executing > python 'this_file'.py in a terminal,
# the Python interpreter will load our program,
# but it will execute nothing yet.
# --------------------------------------------------------
import pyspark
import pyspark.streaming
import os
import shutil
import time
# ------------------------------------------
# FUNCTION process_line
# ------------------------------------------
def process_line(line, bad_chars):
# 1. We create the output variable
res = []
# 2. We clean the line by removing the bad characters
for c in bad_chars:
line = line.replace(c, '')
# 3. We clean the line by removing each tabulator and set of white spaces
line = line.replace('\t', ' ')
line = line.replace(' ', ' ')
line = line.replace(' ', ' ')
line = line.replace(' ', ' ')
# 4. We clean the line by removing any initial and final white spaces
line = line.strip()
line = line.rstrip()
# 5. We split the line by words
words = line.split(" ")
# 6. We append each valid word to the list
for word in words:
if (word != ''):
if ((ord(word[0]) > 57) or (ord(word[0]) < 48)):
res.append(word)
# 7. We return res
return res
# ------------------------------------------
# FUNCTION my_model
# ------------------------------------------
def my_model(ssc, monitoring_dir, result_dir, bad_chars):
# We are basically reusing the code example of word_count for Spark Core
# For each operation, we comment the code written in such example and rewrite it now.
# Most of the times, this rewrite is nothing but an aesthetic replace of the surname RDD by DStream,
# just to remember the code declared here will be applied per micro-batch, generating
# an RDD per micro-batch. Thus, the DStream here is nothing but the sequence of RDDs being generated.
# 1. Operation C1: Creation 'textFileStream', so as to store the novel content of monitoring_dir for this time step into a new RDD within DStream.
# inputRDD = sc.textFile(dataset_dir)
inputDStream = ssc.textFileStream(monitoring_dir)
# 2. Operation T1: Transformation 'flatMap', so as to get a new DStream where each underlying RDD contains all the words of its equivalent
# RDD in inputDStream.
# allWordsRDD = inputRDD.flatMap(lambda x: process_line(x, bad_chars))
allWordsDStream = inputDStream.flatMap(lambda x: process_line(x, bad_chars))
# 3. Operation T2: Transformation 'map', so as to get a new DStream where each underlying RDD contains pair items, versus the single String items of
# its equivalent RDD in allWordsDStream.
# pairWordsRDD = allWordsRDD.map(lambda x: (x, 1))
pairWordsDStream = allWordsDStream.map(lambda x: (x, 1))
# 4. Operation T3: Transformation 'reduceByKey', so as to get a new DStream where each underlying RDD aggregates the amount of times each word
# appears in its equivalent RDD in pairWordsDStream.
# solutionRDD = pairWordsRDD.reduceByKey(lambda x, y: x + y)
solutionDStream = pairWordsDStream.reduceByKey(lambda x, y: x + y)
# 5. Operation S1: Output Operation saveAsTextFiles so as to Store the DStream solutionDStream into the desired folder from the DBFS.
# Each time step the new micro-batch being computed will be stored in a new directory.
# Each directory is similar to the ones we got with Core Spark.
solutionDStream.cache()
solutionDStream.pprint()
# solutionRDD.saveAsTextFile(o_file_dir)
solutionDStream.saveAsTextFiles(result_dir)
# ------------------------------------------
# FUNCTION create_ssc
# ------------------------------------------
def create_ssc(sc, monitoring_dir, result_dir, max_micro_batches, time_step_interval, bad_chars):
# 1. We create the new Spark Streaming context.
# This is the main entry point for streaming functionality. It requires two parameters:
# (*) The underlying SparkContext that it will use to process the data.
# (**) A batch interval, specifying how often it will check for the arrival of new data,
# so as to process it.
ssc = pyspark.streaming.StreamingContext(sc, time_step_interval)
# 2. We configure the maximum amount of time the data is retained.
# Think of it: If you have a SparkStreaming operating 24/7, the amount of data it is processing will
# only grow. This is simply unaffordable!
# Thus, this parameter sets maximum time duration past arrived data is still retained for:
# Either being processed for first time.
# Being processed again, for aggregation with new data.
# After the timeout, the data is just released for garbage collection.
# We set this to the maximum amount of micro-batches we allow before considering data
# old and dumping it times the time_step_interval (in which each of these micro-batches will arrive).
ssc.remember(max_micro_batches * time_step_interval)
# 3. We model the ssc.
# This is the main function of the Spark application:
# On it we specify what do we want the SparkStreaming context to do once it receives data
# (i.e., the full set of transformations and ouptut operations we want it to perform).
my_model(ssc, monitoring_dir, result_dir, bad_chars)
# 4. We return the ssc configured and modelled.
return ssc
# ------------------------------------------
# FUNCTION get_source_dir_file_names
# ------------------------------------------
def get_source_dir_file_names(local_False_databricks_True, source_dir, verbose):
# 1. We create the output variable
res = []
# 2. We get the FileInfo representation of the files of source_dir
fileInfo_objects = []
if local_False_databricks_True == False:
fileInfo_objects = os.listdir(source_dir)
else:
fileInfo_objects = dbutils.fs.ls(source_dir)
# 3. We traverse the fileInfo objects, to get the name of each file
for item in fileInfo_objects:
# 3.1. We get a string representation of the fileInfo
file_name = str(item)
# 3.2. If the file is processed in DBFS
if local_False_databricks_True == True:
# 3.2.1. We look for the pattern name= to remove all useless info from the start
lb_index = file_name.index("name='")
file_name = file_name[(lb_index + 6):]
# 3.2.2. We look for the pattern ') to remove all useless info from the end
ub_index = file_name.index("',")
file_name = file_name[:ub_index]
# 3.3. We append the name to the list
res.append(file_name)
if verbose == True:
print(file_name)
# 4. We sort the list in alphabetic order
res.sort()
# 5. We return res
return res
# ------------------------------------------
# FUNCTION streaming_simulation
# ------------------------------------------
def streaming_simulation(local_False_databricks_True, source_dir, monitoring_dir, time_step_interval, verbose):
# 1. We get the names of the files on source_dir
files = get_source_dir_file_names(local_False_databricks_True, source_dir, verbose)
# 2. We get the starting time of the process
time.sleep(time_step_interval * 0.1)
start = time.time()
# 2.1. If verbose mode, we inform of the starting time
if (verbose == True):
print("Start time = " + str(start))
# 3. We set a counter in the amount of files being transferred
count = 0
# 4. We simulate the dynamic arriving of such these files from source_dir to dataset_dir
# (i.e, the files are moved one by one for each time period, simulating their generation).
for file in files:
# 4.1. We copy the file from source_dir to dataset_dir#
if local_False_databricks_True == False:
shutil.copyfile(source_dir + file, monitoring_dir + file)
else:
dbutils.fs.cp(source_dir + file, monitoring_dir + file)
# 4.2. We increase the counter, as we have transferred a new file
count = count + 1
# 4.3. If verbose mode, we inform from such transferrence and the current time.
if (verbose == True):
print("File " + str(count) + " transferred. Time since start = " + str(time.time() - start))
# 4.4. We wait the desired transfer_interval until next time slot.
time.sleep((start + (count * time_step_interval)) - time.time())
# ------------------------------------------
# FUNCTION my_main
# ------------------------------------------
def my_main(sc,
local_False_databricks_True,
source_dir,
monitoring_dir,
checkpoint_dir,
result_dir,
max_micro_batches,
time_step_interval,
verbose,
bad_chars):
# 1. We setup the Spark Streaming context
# This sets up the computation that will be done when the system receives data.
ssc = pyspark.streaming.StreamingContext.getActiveOrCreate(checkpoint_dir,
lambda: create_ssc(sc,
monitoring_dir,
result_dir,
max_micro_batches,
time_step_interval,
bad_chars
)
)
# 2. We start the Spark Streaming Context in the background to start receiving data.
# Spark Streaming will start scheduling Spark jobs in a separate thread.
# Very important: Please note a Streaming context can be started only once.
# Moreover, it must be started only once we have fully specified what do we want it to do
# when it receives data (i.e., the full set of transformations and ouptut operations we want it
# to perform).
ssc.start()
# 3. As the jobs are done in a separate thread, to keep our application (this thread) from exiting,
# we need to call awaitTermination to wait for the streaming computation to finish.
ssc.awaitTerminationOrTimeout(time_step_interval)
# 4. We simulate the streaming arrival of files (i.e., one by one) from source_dir to monitoring_dir.
streaming_simulation(local_False_databricks_True, source_dir, monitoring_dir, time_step_interval, verbose)
# 5. Once we have transferred all files and processed them, we are done.
# Thus, we stop the Spark Streaming Context
ssc.stop(stopSparkContext=False)
# 6. Extra security stop command: It acts directly over the Java Virtual Machine,
# in case the Spark Streaming context was not fully stopped.
# This is crucial to avoid a Spark application working on the background.
# For example, Databricks, on its private version, charges per cluster nodes (virtual machines)
# and hours of computation. If we, unintentionally, leave a Spark application working, we can
# end up with an unexpected high bill.
if (not sc._jvm.StreamingContext.getActive().isEmpty()):
sc._jvm.StreamingContext.getActive().get().stop(False)
# ---------------------------------------------------------------
# PYTHON EXECUTION
# This is the main entry point to the execution of our program.
# It provides a call to the 'main function' defined in our
# Python program, making the Python interpreter to trigger
# its execution.
# ---------------------------------------------------------------
if __name__ == '__main__':
# 1. Extra input arguments
bad_chars = ['?', '!', '.', ',', ';', '_', '-', '\'', '|', '--',
'(', ')', '[', ']', '{', '}', ':', '&', '\n']
# 2. Local or Databricks
local_False_databricks_True = False
# 3. We set the path to my_dataset and my_result
my_local_path = "/home/nacho/CIT/Tools/MyCode/Spark/"
my_databricks_path = "/"
source_dir = "FileStore/tables/2_Spark_Streaming/my_dataset/"
monitoring_dir = "FileStore/tables/2_Spark_Streaming/my_monitoring/"
checkpoint_dir = "FileStore/tables/2_Spark_Streaming/my_checkpoint/"
result_dir = "FileStore/tables/2_Spark_Streaming/my_result/"
if local_False_databricks_True == False:
source_dir = my_local_path + source_dir
monitoring_dir = my_local_path + monitoring_dir
checkpoint_dir = my_local_path + checkpoint_dir
result_dir = my_local_path + result_dir
else:
source_dir = my_databricks_path + source_dir
monitoring_dir = my_databricks_path + monitoring_dir
checkpoint_dir = my_databricks_path + checkpoint_dir
result_dir = my_databricks_path + result_dir
# 4. We set the Spark Streaming parameters
# 4.1. We specify the number of micro-batches (i.e., files) of our dataset.
dataset_micro_batches = 6
# 4.2. We specify the time interval each of our micro-batches (files) appear for its processing.
time_step_interval = 3
# 4.3. We specify the maximum amount of micro-batches that we want to allow before considering data
# old and dumping it.
max_micro_batches = dataset_micro_batches + 1
# 4.4. We configure verbosity during the program run
verbose = False
# 5. We remove the directories
if local_False_databricks_True == False:
# 5.1. We remove the monitoring_dir
if os.path.exists(monitoring_dir):
shutil.rmtree(monitoring_dir)
# 5.2. We remove the result_dir
if os.path.exists(result_dir):
shutil.rmtree(result_dir)
# 5.3. We remove the checkpoint_dir
if os.path.exists(checkpoint_dir):
shutil.rmtree(checkpoint_dir)
else:
# 5.1. We remove the monitoring_dir
dbutils.fs.rm(monitoring_dir, True)
# 5.2. We remove the result_dir
dbutils.fs.rm(result_dir, True)
# 5.3. We remove the checkpoint_dir
dbutils.fs.rm(checkpoint_dir, True)
# 6. We re-create the directories again
if local_False_databricks_True == False:
# 6.1. We re-create the monitoring_dir
os.mkdir(monitoring_dir)
# 6.2. We re-create the result_dir
os.mkdir(result_dir)
# 6.3. We re-create the checkpoint_dir
os.mkdir(checkpoint_dir)
else:
# 6.1. We re-create the monitoring_dir
dbutils.fs.mkdirs(monitoring_dir)
# 6.2. We re-create the result_dir
dbutils.fs.mkdirs(result_dir)
# 6.3. We re-create the checkpoint_dir
dbutils.fs.mkdirs(checkpoint_dir)
# 7. We configure the Spark Context
sc = pyspark.SparkContext.getOrCreate()
sc.setLogLevel('WARN')
print("\n\n\n")
# 8. We call to our main function
my_main(sc,
local_False_databricks_True,
source_dir,
monitoring_dir,
checkpoint_dir,
result_dir,
max_micro_batches,
time_step_interval,
verbose,
bad_chars
)
| segunar/BIG_data_sample_code | Spark/Workspace/2_Spark_Streaming/2_Stateless_Transformations/02_word_count.py | 02_word_count.py | py | 15,508 | python | en | code | 0 | github-code | 36 |
19631354561 | from __future__ import unicode_literals
from zope.component.interfaces import ObjectEvent, IObjectEvent
from zope.interface import Attribute, implements
class IGSJoinSiteEvent(IObjectEvent):
""" An event issued after someone has joined a site."""
siteInfo = Attribute('The site that is being joined')
memberInfo = Attribute('The new site member')
class IGSLeaveSiteEvent(IObjectEvent):
""" An event issued after someone has left a site."""
siteInfo = Attribute('The site that is being left')
memberInfo = Attribute('The old site member')
class GSJoinSiteEvent(ObjectEvent):
implements(IGSJoinSiteEvent)
def __init__(self, context, siteInfo, memberInfo):
ObjectEvent.__init__(self, context)
self.siteInfo = siteInfo
self.memberInfo = memberInfo
class GSLeaveSiteEvent(ObjectEvent):
implements(IGSLeaveSiteEvent)
def __init__(self, context, siteInfo, memberInfo):
ObjectEvent.__init__(self, context)
self.siteInfo = siteInfo
self.memberInfo = memberInfo
| groupserver/gs.site.member.base | gs/site/member/base/event.py | event.py | py | 1,050 | python | en | code | 0 | github-code | 36 |
27453713164 | class Mafia():
def __init__(self, player_id, player_name):
self.name = "Mafia"
self.changed_name = self.name
self.can_act = True
self.act_time = "Night"
self.alignment = "Mafia"
self.need_await = False
self.player_id = player_id
self.player_name = player_name
self.last_will = ""
def act(self, narrator, message):
voter_id = message.author.id
print(message.content)
index = int(message.content.split(" ")[1])
act_id = narrator.get_index_id_map()[index]
narrator.add_vote(voter_id, act_id)
def get_act_time(self):
return self.act_time
def whoami(self):
me_string = (
"Type `!act <number>` to vote to kill <number>.\n"
"For example, `!act 0` will vote to kill 0.\n"
"A majority vote is required to kill someone.\n"
"All mafia `must` vote.\n"
)
return me_string
def set_will(self, message):
self.last_will = " ".join(message.content.split(" ")[1: ])
async def broadcast_will(self, narrator):
if len(self.last_will) > 0:
await narrator.broadcast_message("Town Hall", "{}'s last will: {}".format(self.player_name, self.last_will))
else:
await narrator.broadcast_message("Town Hall", "{} had no last will.".format(self.player_name)) | 0h90/Mafioso | Mafia.py | Mafia.py | py | 1,400 | python | en | code | 0 | github-code | 36 |
73583260585 | import phunspell
import inspect
import unittest
class TestItIT(unittest.TestCase):
pspell = phunspell.Phunspell('it_IT')
def test_word_found(self):
self.assertTrue(self.pspell.lookup("fisciù"))
def test_word_not_found(self):
self.assertFalse(self.pspell.lookup("phunspell"))
def test_lookup_list_return_not_found(self):
words = "fisciù gianna associazione osservatore torneggiato borken"
self.assertListEqual(
self.pspell.lookup_list(words.split(" ")), ["borken"]
)
if __name__ == "__main__":
unittest.main()
| dvwright/phunspell | phunspell/tests/test__it_IT.py | test__it_IT.py | py | 590 | python | en | code | 4 | github-code | 36 |
6363206566 | from itertools import count
global_index = 1
global_bank_fee = 1
global_bank_win = 2
global_bank_lose = 3
class smartPlayer:
_ids = count(0)
def __init__(self, trustor_or_trustee, trust_coefficient, beta):
global global_bank_fee
global_bank_fee = beta
self.id = next(self._ids)
self.trustor = trustor_or_trustee
self.trustingCoefficient = trust_coefficient
self.memory = {}
self.currency = 0
def changeTrustStatus(self):
self.trustor = not self.trustor
def reciprocate(self, other):
alreadyIn = False
for key in self.memory.keys():
if key == other.id:
alreadyIn = True
if not alreadyIn:
self.memory[other.id] = self.trustingCoefficient
ans = self.memory[other.id] >= 0.66 * (1 + global_bank_fee)
if not ans and self.trustor:
self.currency -= global_bank_fee
return ans
def updateCurrency(self, win_lose):
if self.trustor:
if win_lose:
self.currency += global_bank_win
else:
self.currency -= global_bank_fee
else:
if win_lose:
self.currency += global_bank_win
else:
self.currency += global_bank_lose
def updateTrustStatus(self, other, result):
if result:
self.memory[other.id] *= self.trustingCoefficient
else:
self.memory[other.id] *= (1 - self.trustingCoefficient)
def memoryPrint(self):
repstr = []
i = 0
for pid, mem in self.memory.items():
repstr[i] = "Player ID: " + pid + ", Trusting Status: " + mem
return repstr
def __repr__(self):
return "Player ID: " + str(self.id) + "\n" + "Currency: " + str(
self.currency) + "\n" + "Self trusting coefficient: " + self.trustingCoefficient + "\n" + "Is truster? " + self.trustor
def __str__(self):
trustorStr = "No"
if self.trustor:
trustorStr = "Yes"
return "Player ID: " + str(self.id) + ", Currency: {0}".format(self.currency) + ", Self trusting coefficient: {0}".format(self.trustingCoefficient) + ", Is truster? " + trustorStr + "\n"
| snirsh/TrustGame | SmartPlayer.py | SmartPlayer.py | py | 2,264 | python | en | code | 0 | github-code | 36 |
15987055483 | def assign_to_projects(self, data):
result = []
for x in data:
user = self.users.find_one({'email': x['email']})
if not user:
result.append((False, 'User not found!', 404))
continue
project = self.projects.find_one({'name': x['project']})
if not project:
result.append((False, 'Project not found!', 404))
continue
connection = self.connections.find_one({'user': user['_id'], 'project': project['_id']})
if connection:
result.append((False, 'Project already assigned!', 400))
continue
self.connections.insert_one({'user': user['_id'], 'project': project['_id']})
result.append((True, 'Project has been assigned!', 200))
return result | DvaMishkiLapa/diplom_se_2019 | code/assign_to_projects_server_func.py | assign_to_projects_server_func.py | py | 780 | python | en | code | 0 | github-code | 36 |
74838938664 |
# environment
import sys, os
import argparse
import json
from board import Tiles, Board
from player import Player
import shape
def pprint(thing):
sys.stdout.write(thing + '\n')
sys.stdout.flush()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
player = []
parser.add_argument("--players_allocate", default = "AI,AI", help = "indicate player type and order")
parser.add_argument("--extra", help = "extra info")
args = parser.parse_args()
if args.players_allocate:
pa = args.players_allocate.split(',')
if len(pa) != 2:
raise ValueError("--player_allocate must have two arguments!")
player.append(Player(0, 0, -1))
player.append(Player(0, 1, -1))
if not args.extra is None:
pass
board = Board()
history = {}
history['step'] = []
output = {}
output["status"] = "Success"
output["action_player_id"] = 0
output["state"] = board.board.tolist()
pprint(json.dumps(output))
isOver = False
while True:
jsInfo = sys.stdin.readline().rstrip()
info = json.loads(jsInfo)
act = info['action']
isPass = info['is_pass']
playerOrder = info['action_player_id']
output = {}
if isPass:
if isOver:
output['status'] = "Over"
output['result'] = {
"record" : json.dumps(history),
"score" : [p.score for p in player],
"winner_id" : 0
}
if player[0].score < player[1].score:
output['result']['winner_id'] = 1
elif player[0].score == player[1].score:
output['result']['winner_id'] = -1
pprint(json.dumps(output))
break
output["status"] = "Success"
output["action_player_id"] = playerOrder ^ 1
output["state"] = board.board.tolist()
pprint(json.dumps(output))
isOver = True
continue
isOver = False
tile = []
tileSize = len(act)
minx = 14
miny = 14
for i in range(tileSize):
x = act[i]['row']
y = act[i]['col']
tile.append([x, y])
minx = min(minx, x)
miny = min(miny, y)
try:
result = board.dropTile(playerOrder, tile)
except Exception as e:
output['status'] = "Error"
output['reason'] = str(e)
pprint(json.dumps(output))
break
else:
if result:
output = {}
step = {}
step["player"] = playerOrder
step["action"] = act
step["state"] = {}
history["step"].append(step)
for i in range(tileSize):
tile[i][0] -= minx
tile[i][1] -= miny
tile.sort()
rotf = 0
for t in range(21):
if shape.tileSizes[t] != tileSize:
continue
if tile in shape.shapeSet[t]:
player[playerOrder].used[t] = True
rotf = shape.shapeSet[t].index[tile]
break
player[playerOrder].score += tileSize
output['status'] = "Success"
output['action_player_id'] = playerOrder ^ 1
output['state'] = board.board.tolist()
pprint(json.dumps(output))
| FineArtz/Game3_Blokus | environment.py | environment.py | py | 3,602 | python | en | code | 1 | github-code | 36 |
3685311565 | # coding: utf-8
import collections
import os
try:
import StringIO
except:
from io import StringIO
import sys
import tarfile
import tempfile
import urllib
import numpy as np
from PIL import Image, ImageDraw
import collections
import tensorflow as tf
import random
if tf.__version__ < '1.5.0':
raise ImportError('Please upgrade your tensorflow installation to v1.5.0 or newer!')
# Needed to show segmentation colormap labels
from lib import get_dataset_colormap
# In[11]:
# LABEL_NAMES = np.asarray([
# 'background', 'aeroplane', 'bicycle', 'bird', 'boat', 'bottle',
# 'bus', 'car', 'cat', 'chair', 'cow', 'diningtable', 'dog',
# 'horse', 'motorbike', 'person', 'pottedplant', 'sheep', 'sofa',
# 'train', 'tv'
# ])
class BackgroundSubtractor(object):
"""docstring for BackgroundSubtractor"""
def __init__(self, graph_name):
super(BackgroundSubtractor, self).__init__()
self.model = DeepLabModel(graph_name)
self.has_person = False
def extract_image(self,image, mask_array, dst):
background = Image.new('RGB', (mask_array.shape[1],mask_array.shape[0]) , (255, 255, 255))
foreground = image
mask_tmp = []
for i in range(0,len(mask_array)):
mask_tmp.append([])
for j in range(0, len(mask_array[i])):
if mask_array[i][j] == 15:
mask_tmp[i].append([255,255,255,0])
self.has_person = True
else:
mask_tmp[i].append([0,0,0,255])
if self.has_person:
mask_tmp = np.array(mask_tmp)
mask = Image.fromarray(mask_tmp.astype('uint8'))
result = Image.composite(background, foreground, mask)
result.save(dst)
return True
return False
def execute(self, image_name, dst):
try:
orignal_im = Image.open(image_name)
except IOError:
print('Failed to read image from %s.' % image_path)
return None
#print 'running deeplab on image %s...' % image_name
resized_im, seg_map = self.model.run(orignal_im)
self.extract_image(resized_im, seg_map, dst)
def run(self, src, dest):
self.has_person = False
#interact(self.execute, image_name=src, dst=dest)
return self.execute(src, dest)
class DeepLabModel(object):
"""Class to load deeplab model and run inference."""
INPUT_TENSOR_NAME = 'ImageTensor:0'
OUTPUT_TENSOR_NAME = 'SemanticPredictions:0'
INPUT_SIZE = 513
def __init__(self, graph_path):
"""Creates and loads pretrained deeplab model."""
self.graph = tf.Graph()
with open(graph_path, "rb") as f:
graph_def = tf.GraphDef.FromString(f.read())
with self.graph.as_default():
tf.import_graph_def(graph_def, name='')
self.sess = tf.Session(graph=self.graph)
def run(self, image):
"""Runs inference on a single image.
Args:
image: A PIL.Image object, raw input image.
Returns:
resized_image: RGB image resized from original input image.
seg_map: Segmentation map of `resized_image`.
"""
width, height = image.size
resize_ratio = 1.0 * self.INPUT_SIZE / max(width, height)
target_size = (int(resize_ratio * width), int(resize_ratio * height))
resized_image = image.convert('RGB').resize(target_size, Image.ANTIALIAS)
batch_seg_map = self.sess.run(
self.OUTPUT_TENSOR_NAME,
feed_dict={self.INPUT_TENSOR_NAME: [np.asarray(resized_image)]})
seg_map = batch_seg_map[0]
return resized_image, seg_map | MatthieuBlais/tensorflow-clothing-detection | background.py | background.py | py | 3,761 | python | en | code | 11 | github-code | 36 |
1700285316 | #!/usr/bin/python3
"""
@author : Chris Phibbs
@created : Wednesday Nov 18, 2020 21:12:41 AEDT
@file : buySell
"""
# TC: O(N) - We make one pass of the list
# SC: O(1) - We use same amount of space regardless of list size
class Solution:
def maxProfit(self, prices):
# If there's no prices or only one price,
# we can't make any profit
if len(prices) < 2: return 0
# TL;DR want to find difference between max and min value
# However, we only keep track of minimums as we go along
i = 0
maxProfit = 0
minVal = prices[0]
while (i + 1) < len(prices):
minVal = min(minVal, (prices[i]))
diff = prices[i+1] - minVal
if diff > 0:
maxProfit = max(maxProfit, diff)
i += 1
return maxProfit
| phibzy/InterviewQPractice | Solutions/BuySellStockI/buySell.py | buySell.py | py | 884 | python | en | code | 0 | github-code | 36 |
26090442688 | import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from basic.bupt_2017_11_28.type_deco import prt
import joblib
from sklearn import preprocessing
from sklearn.metrics import accuracy_score
from sklearn.model_selection import train_test_split
from basic.bupt_2017_11_28.type_deco import prt
import seaborn as sns
'''
User:waiting
Date:2018-01-23
Time:16:54
'''
# def shortpalin(s:str):
# l = []
# for x in s:
# l.append('#')
# l.append(x)
# l.append('#')
# p = [0] * len(l)
# center,max_right = 0,0
# for i in range(len(l)):
# if i < max_right:
# p[i] = min(max_right - i,p[2 * center - i])
# else:
# p[i] = 1
# while i + p[i] < len(p) and i - p[i] >= 0 and l[i+p[i]] == l[i-p[i]]:
# p[i] += 1
# if i + p[i] - 1> max_right:
# max_right = i + p[i] - 1
# center = i
# print([str(x) for x in p])
# print(l)
# print(''.join(l))
'''
brute force
'''
def shortpalin(s:str):
for i in range(len(s),0,-1):
x = s[:i]
if x == x[::-1]:
break
print(i)
return s[i:][::-1] + s
def shortpalin2(s:str):
t = '{}#{}*'.format(s,s[::-1])
_next_ = next(t)
print(_next_[-1])
print(_next_)
return '{}{}'.format(s[_next_[-1]:][::-1],s)
def next(s:str):
_next_ = [0] * len(s)
_next_[0] = -1
for i in range(2,len(s)):
k = _next_[i-1]
while k != -1 and s[k] != s[i-1]:
k = _next_[k]
_next_[i] = k + 1
return _next_
if __name__ == '__main__':
# a = shortpalin('ass')
# print(next('abaa'))
a = shortpalin2('')
print(a)
pass
| Mr-cpc/idea_wirkspace | learnp/basic/bupt_2018_1_23/shortpalin.py | shortpalin.py | py | 1,697 | python | en | code | 0 | github-code | 36 |
24843380202 | import os
import pandas as pd
def renameProteins(cols_to_rename,somadict):
# Rename proteins
new_cols = []
for s in cols_to_rename:
if 'seq' in s:
if 'ratio' in s:
s1 = s.split('_seq')[1]
new_s1 = '-'.join(s1.split('.')[1:])
try:
this_gene1 = somadict[somadict['SeqID'] == new_s1].loc[:, 'GeneID'].values[0]
except:
this_gene2 = s1
s2 = s.split('_seq')[2]
new_s2 = '-'.join(s2.split('.')[1:])
try:
this_gene2 = somadict[somadict['SeqID'] == new_s2].loc[:, 'GeneID'].values[0]
except:
this_gene2 = s2
new_cols.append(this_gene1 + '/' + this_gene2)
else:
try:
new_s = '-'.join(s.split('.')[1:])
this_gene = somadict[somadict['SeqID'] == new_s].loc[:, 'GeneID'].values[0]
new_cols.append(this_gene)
except:
new_cols.append(s)
else:
# clinical feature
new_cols.append(s)
return new_cols
# Which data should be evaluated
output = 'Prediction_output'
model = 'RF'
names = ['PACS_6M_woDys_from_6MProteomics_withHealthy_RF_withFCorr_mutualProteins']
# 'PACS_6M_woDys_from_1MClinicalProteomics_withHealthy_'+model+'_withFCorr',
# 'PACS_6M_woDys_from_6MClinicalProteomics_withHealthy_'+model+'_withFCorr',
# 'PACS_6M_woDys_from_1Mand6MClinicalProteomics_withHealthy_'+model+'_withFCorr',
# 'PACS_12M_from_6MClinicalProteomics_withHealthy_'+model+'_withFCorr']
n_splits = 5
somadict = pd.read_excel('Data/SomaScanDict.xlsx')
somadict.columns = somadict.iloc[0,:]
somadict.drop(0,inplace = True)
for name in names:
folder = os.path.join(output,name)
# Load shap analysis results
this_importance_all = pd.read_csv(os.path.join(folder, name +'_cv'+str(1) + '_importance_val_all.csv'), index_col=0)
this_importance_all.set_index('col_name', inplace=True)
df_importance = pd.DataFrame(index = this_importance_all.index, columns = [cv for cv in range(0,n_splits)])
for cv in range(0,n_splits):
this_importance = pd.read_csv(os.path.join(folder, name+'_cv'+str(cv) + '_importance_val_all.csv'), index_col=0)
this_importance.set_index('col_name', inplace=True)
df_importance.loc[this_importance.index,cv] = this_importance.loc[:,'feature_importance_vals'].rank(ascending = False)
df_importance['sum'] = df_importance.loc[:,[0,1,2,3,4]].sum(axis =1)
df_importance= df_importance.sort_values('sum',ascending = True)
# Column names
new_cols = renameProteins(df_importance.index,somadict)
df_importance.index = new_cols
df_importance.to_csv(os.path.join(output,'eval', name +'_importance_val_all.csv')) | BorgwardtLab/LongCOVID | combineInterpretations.py | combineInterpretations.py | py | 2,880 | python | en | code | 0 | github-code | 36 |
13510402136 | ################################################################################
'''
Name : powerBy
Purpose : Function to get the exponential value for a value for an value.
'''
################################################################################
import sys
print("Current value of recursion limit is",sys.getrecursionlimit())
sys.setrecursionlimit(10000)
print("Setting the recursion limit as ",sys.getrecursionlimit())
print(sys.__dict__)
def power(base, expo):
# Unintented cases
assert expo>=0 and int(expo) == expo, 'Exponential component should be positive integer'
# base condition
if expo == 0:
return 1
if expo == 1:
return base
# Recursive call
return base * power(base, expo-1)
print(power(2,44))
print(power(1,0))
print(power(3.2,-2))
| gopinathrajamanickam/DSA | Recursion/powerBy.py | powerBy.py | py | 821 | python | en | code | 0 | github-code | 36 |
36059095725 | #!/usr/bin/env python
# coding: utf-8
# In[1]:
import torch as torch
# In[2]:
import torch.nn as nn
import pandas as pd
from torch.autograd import Variable
from sklearn.model_selection import train_test_split
from torch.utils.data import DataLoader, TensorDataset
# In[3]:
df = pd.read_csv("yoochoose-clicks.dat",
names=["session", "timestamp", "item", "category"],
parse_dates=["timestamp"])
# In[9]:
df_percent = df.head(50000)
# In[10]:
df_percent = df_percent[['session','item']]
# In[30]:
df_percent = df_percent.sort_values(by = 'session')
# In[35]:
test_data_size = 10004 #20 percent
train_data = df_percent[:-test_data_size]
test_data = df_percent[-test_data_size:]
# In[237]:
#getting target dataset from training dataset
target_dataset=train_data.loc[(train_data["session"]!=train_data["session"].shift(-1))]
# In[254]:
train_data['session'].isin(target_dataset['session']).value_counts()
# In[217]:
target_numpy = target_dataset.to_numpy(dtype = 'int64')
# In[109]:
train_clicks_numpy = train_data.to_numpy(dtype = 'int64') #Creating training df as numpy int64 type
test_clicks_numpy = test_data.to_numpy(dtype = 'int64') #Creating testing df as numpy int64 type
# In[ ]:
# In[218]:
featuresTrain = torch.from_numpy(train_clicks_numpy)
featuresTest = torch.from_numpy(test_clicks_numpy)
featuresTarget = torch.from_numpy(target_numpy)
# In[114]:
# batch_size, epoch and iteration
batch_size = 100
n_iters = 10000
num_epochs = n_iters / (len(featuresTrain) / batch_size)
num_epochs = int(num_epochs)
# In[111]:
# Pytorch train set
train = TensorDataset(featuresTrain)
# In[112]:
# Pytorch test set
test = TensorDataset(featuresTest)
# In[115]:
# data loader
train_loader = DataLoader(train, batch_size = batch_size, shuffle = False)
test_loader = DataLoader(test, batch_size = batch_size, shuffle = False)
# In[221]:
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
train_arr = scaler.fit_transform(featuresTrain)
val_arr = scaler.transform(featuresTarget)
test_arr = scaler.transform(featuresTest)
# In[207]:
optimizer = torch.optim.Adam(model.parameters(), lr=0.001)
# In[209]:
#####################
input_dim = 2
hidden_dim = 100
num_layers = 2
output_dim = 1
class LSTM(nn.Module):
def __init__(self, input_dim, hidden_dim, num_layers, output_dim):
super(LSTM, self).__init__()
# Hidden dimensions
self.hidden_dim = hidden_dim
# Number of hidden layers
self.num_layers = num_layers
# Building your LSTM
# batch_first=True causes input/output tensors to be of shape
# (batch_dim, seq_dim, feature_dim)
self.lstm = nn.LSTM(input_dim, hidden_dim, num_layers, batch_first=True)
# Readout layer
self.fc = nn.Linear(hidden_dim, output_dim)
def forward(self, x):
# Initialize hidden state with zeros
h0 = torch.zeros(self.num_layers,0, self.hidden_dim).requires_grad_()
# Initialize cell state
c0 = torch.zeros(self.num_layers, 0, self.hidden_dim).requires_grad_()
# One time step
# We need to detach as we are doing truncated backpropagation through time (BPTT)
# If we don't, we'll backprop all the way to the start even after going through another batch
out, (hn, cn) = self.lstm(x, (h0.detach(), c0.detach()))
# Index hidden state of last time step
# out.size() --> 100, 28, 100
# out[:, -1, :] --> 100, 100 --> just want last time step hidden states!
out = self.fc(out[:, -1, :])
# out.size() --> 100, 10
return out
model = LSTM(input_dim=input_dim, hidden_dim=hidden_dim, output_dim=output_dim, num_layers=num_layers)
loss_fn = torch.nn.MSELoss(size_average=True)
print(model)
print(len(list(model.parameters())))
for i in range(len(list(model.parameters()))):
print(list(model.parameters())[i].size())
# In[212]:
# Train model
#####################
import numpy as np
look_back = 20
hist = np.zeros(num_epochs)
# Number of steps to unroll
seq_dim =look_back-1
for t in range(num_epochs):
# Initialise hidden state
# Don't do this if you want your LSTM to be stateful
#model.hidden = model.init_hidden()
# Forward pass
y_train_pred = model(train_inout_seq)
loss = loss_fn(y_train_pred, train)
if t % 10 == 0 and t !=0:
print("Epoch ", t, "MSE: ", loss.item())
hist[t] = loss.item()
# Zero out gradient, else they will accumulate between epochs
optimiser.zero_grad()
# Backward pass
loss.backward()
# Update parameters
optimiser.step()
# In[ ]:
| fahadkh2019/Capstone_Project | LSTM Modeling-updated.py | LSTM Modeling-updated.py | py | 4,722 | python | en | code | 0 | github-code | 36 |
72644950504 | import os
import subprocess
from itertools import chain
from pathlib import Path
import pytest
from netCDF4 import Dataset
from pkg_resources import resource_filename
from compliance_checker.cf import util
from compliance_checker.suite import CheckSuite
def glob_down(pth, suffix, lvls):
"""globs down up to (lvls: int) levels of subfolders\n
suffix in the form ".ipynb"\n
pth: Path"""
return list(chain(*[pth.glob(f'*{"/*"*lvl}{suffix}') for lvl in range(lvls)]))
def generate_dataset(cdl_path, nc_path):
subprocess.call(["ncgen", "-4", "-o", str(nc_path), str(cdl_path)])
def static_files(cdl_stem):
"""
Returns the Path to a valid nc dataset\n
replaces the old STATIC_FILES dict
"""
datadir = Path(resource_filename("compliance_checker", "tests/data")).resolve()
assert datadir.exists(), f"{datadir} not found"
cdl_paths = glob_down(datadir, f"{cdl_stem}.cdl", 3)
assert (
len(cdl_paths) > 0
), f"No file named {cdl_stem}.cdl found in {datadir} or its subfolders"
assert (
len(cdl_paths) == 1
), f"Multiple candidates found with the name {cdl_stem}.cdl:\n{cdl_paths}\nPlease reconcile naming conflict"
cdl_path = cdl_paths[0] # PurePath object
nc_path = cdl_path.parent / f"{cdl_path.stem}.nc"
if not nc_path.exists():
generate_dataset(cdl_path, nc_path)
assert (
nc_path.exists()
), f"ncgen CLI utility failed to produce {nc_path} from {cdl_path}"
return str(nc_path)
# ---------Fixtures-----------
# class scope:
@pytest.fixture
def cs(scope="class"):
"""
Initialize the dataset
"""
cs = CheckSuite()
cs.load_all_available_checkers()
return cs
@pytest.fixture
def std_names(scope="class"):
"""get current std names table version (it changes)"""
_std_names = util.StandardNameTable()
return _std_names
# func scope:
@pytest.fixture
def loaded_dataset(request):
"""
Return a loaded NC Dataset for the given path\n
nc_dataset_path parameterized for each test
"""
nc_dataset_path = static_files(request.param)
nc = Dataset(nc_dataset_path, "r")
yield nc
nc.close()
@pytest.fixture
def new_nc_file(tmpdir):
"""
Make a new temporary netCDF file for the scope of the test
"""
nc_file_path = os.path.join(tmpdir, "example.nc")
if os.path.exists(nc_file_path):
raise OSError("File Exists: %s" % nc_file_path)
nc = Dataset(nc_file_path, "w")
# no need for cleanup, built-in tmpdir fixture will handle it
return nc
@pytest.fixture
def tmp_txt_file(tmpdir):
file_path = os.path.join(tmpdir, "output.txt")
if os.path.exists(file_path):
raise OSError("File Exists: %s" % file_path)
return file_path
@pytest.fixture
def checksuite_setup():
"""For test_cli"""
CheckSuite.checkers.clear()
CheckSuite.load_all_available_checkers()
| ioos/compliance-checker | compliance_checker/tests/conftest.py | conftest.py | py | 2,919 | python | en | code | 92 | github-code | 36 |
24797529159 | #coding=utf-8
"""
PGCNet batch data generator
two different type input :point cloud and multi-view image
__author__ = Cush shen
"""
import numpy as np
from tqdm import tqdm
import h5py
import time
import tensorflow as tf
image_color_gray = 158
image_color_white = 255
def getDataFiles(list_filename):
return [line.rstrip() for line in open(list_filename)]
def load_h5(h5_filename):
f = h5py.File(h5_filename)
data = f['data'][:]
label = f['label'][:]
return data, label
def loadDataFile(filename):
return load_h5(filename)
def get_model_learning_rate(
learning_policy, base_learning_rate, learning_rate_decay_step,
learning_rate_decay_factor, training_number_of_steps, learning_power,
slow_start_step, slow_start_learning_rate):
"""Gets model's learning rate.
Computes the model's learning rate for different learning policy.
Right now, only "step" and "poly" are supported.
(1) The learning policy for "step" is computed as follows:
current_learning_rate = base_learning_rate *
learning_rate_decay_factor ^ (global_step / learning_rate_decay_step)
See tf.train.exponential_decay for details.
(2) The learning policy for "poly" is computed as follows:
current_learning_rate = base_learning_rate *
(1 - global_step / training_number_of_steps) ^ learning_power
Args:
learning_policy: Learning rate policy for training.
base_learning_rate: The base learning rate for model training.
learning_rate_decay_step: Decay the base learning rate at a fixed step.
learning_rate_decay_factor: The rate to decay the base learning rate.
training_number_of_steps: Number of steps for training.
learning_power: Power used for 'poly' learning policy.
slow_start_step: Training model with small learning rate for the first
few steps.
slow_start_learning_rate: The learning rate employed during slow start.
Returns:
Learning rate for the specified learning policy.
Raises:
ValueError: If learning policy is not recognized.
"""
global_step = tf.train.get_or_create_global_step()
if learning_policy == 'step':
learning_rate = tf.train.exponential_decay(
base_learning_rate,
global_step,
learning_rate_decay_step,
learning_rate_decay_factor,
staircase=True)
elif learning_policy == 'poly':
learning_rate = tf.train.polynomial_decay(
base_learning_rate,
global_step,
training_number_of_steps,
end_learning_rate=0,
power=learning_power)
else:
raise ValueError('Unknown learning policy.')
return tf.where(global_step < slow_start_step, slow_start_learning_rate,
learning_rate)
def _gather_loss(regularization_losses, scope):
"""
Gather the loss.
Args:
regularization_losses: Possibly empty list of regularization_losses
to add to the losses.
Returns:
A tensor for the total loss. Can be None.
"""
sum_loss = None
# Individual components of the loss that will need summaries.
loss = None
regularization_loss = None
# Compute and aggregate losses on the clone device.
all_losses = []
losses = tf.get_collection(tf.GraphKeys.LOSSES, scope)
if losses:
loss = tf.add_n(losses, name='losses')
all_losses.append(loss)
if regularization_losses:
regularization_loss = tf.add_n(regularization_losses,
name='regularization_loss')
all_losses.append(regularization_loss)
if all_losses:
sum_loss = tf.add_n(all_losses)
# Add the summaries out of the clone device block.
if loss is not None:
tf.summary.scalar('/'.join(filter(None, ['Losses', 'loss'])), loss)
if regularization_loss is not None:
tf.summary.scalar('Losses/regularization_loss', regularization_loss)
return sum_loss
def _optimize(optimizer, regularization_losses, scope, **kwargs):
"""
Compute losses and gradients.
Args:
optimizer: A tf.Optimizer object.
regularization_losses: Possibly empty list of regularization_losses
to add to the losses.
**kwargs: Dict of kwarg to pass to compute_gradients().
Returns:
A tuple (loss, grads_and_vars).
- loss: A tensor for the total loss. Can be None.
- grads_and_vars: List of (gradient, variable). Can be empty.
"""
sum_loss = _gather_loss(regularization_losses, scope)
grad = None
if sum_loss is not None:
grad = optimizer.compute_gradients(sum_loss, **kwargs)
return sum_loss, grad
def _gradients(grad):
"""
Calculate the sum gradient for each shared variable across all clones.
This function assumes that the grad has been scaled appropriately by
1 / num_clones.
Args:
grad: A List of List of tuples (gradient, variable)
Returns:
tuples of (gradient, variable)
"""
sum_grads = []
for grad_and_vars in zip(*grad):
# Note that each grad_and_vars looks like the following:
# ((grad_var0_clone0, var0), ... (grad_varN_cloneN, varN))
grads = []
var = grad_and_vars[0][1]
for g, v in grad_and_vars:
assert v == var
if g is not None:
grads.append(g)
if grads:
if len(grads) > 1:
sum_grad = tf.add_n(grads, name=var.op.name + '/sum_grads')
else:
sum_grad = grads[0]
sum_grads.append((sum_grad, var))
return sum_grads
def optimize(optimizer, scope=None, regularization_losses=None, **kwargs):
"""
Compute losses and gradients
# Note: The regularization_losses are added to losses.
Args:
optimizer: An `Optimizer` object.
regularization_losses: Optional list of regularization losses. If None it
will gather them from tf.GraphKeys.REGULARIZATION_LOSSES. Pass `[]` to
exclude them.
**kwargs: Optional list of keyword arguments to pass to `compute_gradients`.
Returns:
A tuple (total_loss, grads_and_vars).
- total_loss: A Tensor containing the average of the losses including
the regularization loss.
- grads_and_vars: A List of tuples (gradient, variable) containing the sum
of the gradients for each variable.
"""
grads_and_vars = []
losses = []
if regularization_losses is None:
regularization_losses = tf.get_collection(
tf.GraphKeys.REGULARIZATION_LOSSES, scope)
# with tf.name_scope(scope):
loss, grad = _optimize(optimizer,
regularization_losses,
scope,
**kwargs)
if loss is not None:
losses.append(loss)
grads_and_vars.append(grad)
# Compute the total_loss summing all the losses.
total_loss = tf.add_n(losses, name='total_loss')
# Sum the gradients across clones.
grads_and_vars = _gradients(grads_and_vars)
return total_loss, grads_and_vars
def rotate_around_point(angle,data,point):
"""
:param angle: rotation angele
:param data: point
:param point: rotation center point
:return:
"""
rotate_x = (data[:, 0] - point[0])*np.cos(angle) - (data[:, 1] - point[1])*np.sin(angle) + point[0]
rotate_y = (data[:, 0] - point[0])*np.sin(angle) + (data[:, 1] - point[1])*np.cos(angle) + point[1]
rotate_z = data[:, 2]
return np.c_[rotate_x, rotate_y, rotate_z]
def rotate_around_point_x(angle, data, point):
"""
:param angle: rotation angle
:param data: point
:param point: rotation center point
:return:
"""
rotate_x = data[:, 0]
rotate_y = (data[:, 1] - point[1])*np.cos(angle) - (data[:, 2] - point[2])*np.sin(angle) + point[1]
rotate_z = (data[:, 1] - point[1])*np.sin(angle) + (data[:, 2] - point[2])*np.cos(angle) + point[2]
return np.c_[rotate_x, rotate_y, rotate_z]
def rotate_around_point_y(angle, data, point):
"""
:param angle: rotation angle
:param data: point
:param point: rotation center point
:return:
"""
rotate_x = (data[:, 2] - point[2])*np.sin(angle) + (data[:, 0] - point[0])*np.cos(angle) + point[0]
rotate_y = data[:, 1]
rotate_z = (data[:, 2] - point[2])*np.cos(angle) - (data[:, 0] - point[0])*np.sin(angle) + point[2]
return np.c_[rotate_x, rotate_y, rotate_z]
def get_profile_data(input_data, grid_x, grid_z, number, char):
"""
:param input_data:
:param grid_x:
:param grid_z:
:param number:
:param char:
:return:
"""
# rotate_nums = int(360 / angle)
# angle_nD = 360 / number
profile_vector = np.zeros((1, number*grid_x*grid_z))
points_pixel_num_zx = []
pts1 = 0
# for i in range(rotate_nums):
num_profile_vector = 0
for i_1 in range(number):
if i_1 == 0:
# input_data1 = input_data
pts1 += input_data.shape[0]
max_x = np.max(input_data[:, 0])
min_x = np.min(input_data[:, 0])
max_z = np.max(input_data[:, 2])
min_z = np.min(input_data[:, 2])
deta_x = max_x - min_x
deta_z = max_z - min_z
deta_deta_xz = np.abs(deta_x - deta_z)/2
for j in range(pts1):
point = input_data[j,:]
if (deta_x > deta_z):
if (j == 0):
pedeta_x = deta_x/grid_x
pedeta_z = deta_x/grid_z
attachment_z = np.ceil(deta_deta_xz/pedeta_z)
x_num = np.ceil((point[0]-min_x)/pedeta_x)
z_num = (np.ceil((point[2] - min_z) / pedeta_z) + attachment_z)
if (x_num == 0):
x_num = 1
if (z_num == 0):
z_num = 1
z_num = (grid_z + 1) - z_num
else:
if(j == 0):
pedeta_x = deta_z / grid_x
pedeta_z = deta_z / grid_z
attachment_x = np.ceil(deta_deta_xz / pedeta_x)
x_num = (np.ceil((point[0] - min_x) / pedeta_x) + attachment_x)
z_num = np.ceil((point[2] - min_z) / pedeta_z)
if (x_num == 0):
x_num = 1
if (z_num == 0):
z_num = 1
z_num = (grid_z + 1) - z_num
points_pixel_num_zx.append([z_num, x_num])
points_pixel_num_zx = np.array(points_pixel_num_zx)
matrix_value_y = np.zeros((grid_z,grid_x))
bar = tqdm(range(grid_z))
for k in bar:
bar.set_description("Processing %s" % char)
for h in range(grid_x):
n_z = [in_z for in_z,z_ in enumerate(points_pixel_num_zx[:, 0]) if z_ == (k+1)]
n_x = [in_x for in_x,x_ in enumerate(points_pixel_num_zx[:, 1]) if x_ == (h+1)]
grid_ij_points_num_zx = list(set(n_z).intersection(set(n_x)))
if grid_ij_points_num_zx != []:
matrix_value_y[k,h] = 1
profile_vector[0,num_profile_vector] = matrix_value_y[k,h]
num_profile_vector +=1
return np.array(profile_vector)
def get_xoy_profile_data(index_1, index_2, input_data, grid_x, grid_y):
"""
:param input_data:
:param grid_x:
:param grid_y:
:param number:
:param char:
:return:
"""
# rotate_nums = int(360 / angle)
# angle_nD = 360 / number
number = 1
profile_vector = np.zeros((1, number*grid_x*grid_y))
points_pixel_num_yx = []
pts1 = 0
# for i in range(rotate_nums):
num_profile_vector = 0
for i_1 in range(number):
if i_1 == 0:
# input_data1 = input_data
pts1 += input_data.shape[0]
max_x = np.max(input_data[:, 0])
min_x = np.min(input_data[:, 0])
max_y = np.max(input_data[:, 1])
min_y = np.min(input_data[:, 1])
deta_x = max_x - min_x
deta_y = max_y - min_y
deta_deta_xy = np.abs(deta_x - deta_y)/2
for j in range(pts1):
point = input_data[j, :]
if deta_x > deta_y:
if j == 0:
pedeta_x = deta_x/grid_x
pedeta_y = deta_x/grid_y
attachment_y = np.ceil(deta_deta_xy/pedeta_y)
x_num = np.ceil((point[0]-min_x)/pedeta_x)
y_num = (np.ceil((point[1] - min_y) / pedeta_y) + attachment_y)
if x_num == 0:
x_num = 1
if y_num == 0:
y_num = 1
y_num = (grid_y + 1) - y_num
else:
if j == 0:
pedeta_x = deta_y / grid_x
pedeta_y = deta_y / grid_y
attachment_x = np.ceil(deta_deta_xy / pedeta_x)
x_num = (np.ceil((point[0] - min_x) / pedeta_x) + attachment_x)
y_num = np.ceil((point[1] - min_y) / pedeta_y)
if (x_num == 0):
x_num = 1
if (y_num == 0):
y_num = 1
y_num = (grid_y + 1) - y_num
points_pixel_num_yx.append([y_num, x_num])
points_pixel_num_yx = np.array(points_pixel_num_yx)
matrix_value_y = np.zeros((grid_y,grid_x))
bar = tqdm(range(grid_y))
for k in bar:
bar.set_description("Processing %d of current batch, index %d" % (index_1, index_2))
for h in range(grid_x):
n_y = [in_y for in_y,y_ in enumerate(points_pixel_num_yx[:, 0]) if y_ == (k+1)]
n_x = [in_x for in_x,x_ in enumerate(points_pixel_num_yx[:, 1]) if x_ == (h+1)]
grid_ij_points_num_yx = list(set(n_y).intersection(set(n_x)))
if grid_ij_points_num_yx:
matrix_value_y[k, h] = 1
profile_vector[0, num_profile_vector] = matrix_value_y[k, h]
num_profile_vector += 1
return np.array(profile_vector)
def pointcloud_multiview_generate(index_1, data_curr, grid_x, grid_z, angle):
angle_ = angle * (np.pi / 180)
local_ori = (np.max(data_curr, axis=0) - np.min(data_curr, axis=0)) / 2 + np.min(data_curr, axis=0)
center_point = local_ori
multi_view_array = []
for i in range(int(360 / angle)):
rotate_angle_ = i * angle_
rotated_data = rotate_around_point_y(rotate_angle_, data_curr, center_point)
profile_xoz1 = np.array(get_xoy_profile_data(index_1, i, rotated_data, grid_x, grid_z)).reshape((1, -1))
Image_r = profile_xoz1.reshape(-1, grid_z)
nor_image_color_gray = image_color_gray*(1. / 255) - 0.5
nor_image_color_white = image_color_white*(1. / 255) - 0.5
rgbArray = np.zeros((grid_x, grid_z, 3))
rgbArray[..., 0] = Image_r * nor_image_color_gray
index_0 = (rgbArray[..., 0] == 0)
rgbArray[index_0, 0] = nor_image_color_white
rgbArray[..., 1] = Image_r * nor_image_color_gray
rgbArray[index_0, 1] = nor_image_color_white
rgbArray[..., 2] = Image_r * nor_image_color_gray
rgbArray[index_0, 2] = nor_image_color_white
multi_view_array.append(rgbArray)
return multi_view_array
def mini_batch_pointcloud_multiview_generate(batch_data, im_width, im_height, rotate_angle):
batch_size = batch_data.shape[0]
batch_data_multi_view = []
for i in range(batch_size):
current_pointcloud = batch_data[i]
current_multi_view = pointcloud_multiview_generate(i, current_pointcloud, im_width, im_height, rotate_angle)
batch_data_multi_view.append(current_multi_view)
return batch_data_multi_view
def fast_confusion(true, pred, label_values=None):
"""
Fast confusion matrix (100x faster than Scikit learn). But only works if labels are la
:param true:
:param false:
:param num_classes:
:return:
"""
true = np.squeeze(true)
pred = np.squeeze(pred)
if len(true.shape) != 1:
raise ValueError('Truth values are stored in a {:d}D array instead of 1D array'. format(len(true.shape)))
if len(pred.shape) != 1:
raise ValueError('Prediction values are stored in a {:d}D array instead of 1D array'. format(len(pred.shape)))
if true.dtype not in [np.int32, np.int64]:
raise ValueError('Truth values are {:s} instead of int32 or int64'.format(true.dtype))
if pred.dtype not in [np.int32, np.int64]:
raise ValueError('Prediction values are {:s} instead of int32 or int64'.format(pred.dtype))
true = true.astype(np.int32)
pred = pred.astype(np.int32)
if label_values is None:
label_values = np.unique(np.hstack((true, pred)))
else:
if label_values.dtype not in [np.int32, np.int64]:
raise ValueError('label values are {:s} instead of int32 or int64'.format(label_values.dtype))
if len(np.unique(label_values)) < len(label_values):
raise ValueError('Given labels are not unique')
label_values = np.sort(label_values)
num_classes = len(label_values)
if label_values[0] == 0 and label_values[-1] == num_classes - 1:
vec_conf = np.bincount(true * num_classes + pred)
if vec_conf.shape[0] < num_classes ** 2:
vec_conf = np.pad(vec_conf, (0, num_classes ** 2 - vec_conf.shape[0]), 'constant')
return vec_conf.reshape((num_classes, num_classes))
else:
if label_values[0] < 0:
raise ValueError('Unsupported negative classes')
label_map = np.zeros((label_values[-1] + 1,), dtype=np.int32)
for k, v in enumerate(label_values):
label_map[v] = k
pred = label_map[pred]
true = label_map[true]
vec_conf = np.bincount(true * num_classes + pred)
# Add possible missing values due to classes not being in pred or true
if vec_conf.shape[0] < num_classes ** 2:
vec_conf = np.pad(vec_conf, (0, num_classes ** 2 - vec_conf.shape[0]), 'constant')
# Reshape confusion in a matrix
return vec_conf.reshape((num_classes, num_classes))
if __name__ == '__main__':
start = time.time()
data_path = './data/train_files.txt'
TRAIN_FILES = getDataFiles(data_path)
train_file_idxs = np.arange(0, len(TRAIN_FILES))
for fn in range(len(TRAIN_FILES)):
current_data, current_label = loadDataFile(TRAIN_FILES[train_file_idxs[fn]])
file_size = current_data.shape[0]
num_batches = file_size // 2
for batch_idx in range(num_batches):
start_idx = batch_idx * 2
end_idx = (batch_idx+1) * 2
current_batch_train_data = current_data[start_idx:end_idx, :, :]
current_batch_data_label = current_label[start_idx:end_idx]
current_train_multi_views = mini_batch_pointcloud_multiview_generate(current_batch_train_data, 299, 299, 360)
current_train_multi_views = np.array(current_train_multi_views)
print(current_train_multi_views.shape)
print("running time:{:.2f} s\n".format(time.time() - start)) | conzyou/PGVNet | train_utils.py | train_utils.py | py | 19,697 | python | en | code | 3 | github-code | 36 |
35376158594 | # fit to time dependent function of chance of having activity of any length during a single labeling window
# infer k_on parameter based on single window for 4SU (though here it is the 2nd window)
# based on different window lengths
# window_lengths = [15, 30, 45, 60, 120, 180]
# fit based on (hidden) presence of active state, on real simulated counts and on sampled simulated counts
# TO DO
# three categories of k_syn:
# only change k_on with fixed (k_off, k_syn, k_d)
import os
import matplotlib.pyplot as plt
import seaborn as sns
from scipy.optimize import curve_fit
from simulator.Experiment import *
from simulator.Transcription import *
import numpy as np
from utils.utils import round_sig
if os.name == 'nt':
dir_sep = "\\"
out_dir = r"D:\26 Battich Oudenaarden transcriptional bursts\runs"
else:
dir_sep = "/"
out_dir = "sc_runs"
plot_dir = out_dir + dir_sep + "infer_parameters_example.plots"
os.makedirs(plot_dir, exist_ok=True)
df_filename = "counts_infer_parameters_example.csv"
k_on = 0.01
k_off = 0.04
k_d = 0.02
k_syn = 0.2
k_eff = 0.1
# window_lengths = [r*15 for r in range(1, 24)]
window_lengths = [15, 30, 45, 60, 120, 180]
k_offs = [k * 0.005 for k in range(1, 6)] # for some examples in theoretical plots
def p_1(t, k_on, k_off):
p_on = k_on/(k_on + k_off)
p_off = k_off/(k_on + k_off)
p_1 = p_on + p_off * (1 - np.exp(-k_on * t))
return p_1
# simplified model
def p_1_model(t, k_on, p_on, p_off):
p_1 = p_on + p_off * (1 - np.exp(-k_on * t))
return p_1
def nr_molecules_in_window_no_decay(t, k_on, k_off, k_syn, k_eff):
p_on = k_on/(k_on + k_off)
nr_mrna = p_on * k_syn * k_eff * t
return nr_mrna
def plot_theoretical_chance_of_active_state():
t = np.linspace(0, 400, 100)
for k_off in k_offs:
sns.lineplot(x=t, y=p_1(t, k_on, k_off))
plt.legend(k_offs)
plt.ylim(0, 1)
plt.title("k_on={k_on}".format(k_on=k_on))
plt.ylabel("chance of some active state (any length)")
plt.xlabel("minutes")
plt.vlines(x=window_lengths, ymin=0, ymax=1, linestyles='dashed', colors='black')
plt.savefig(plot_dir + dir_sep + "theoretical_chance_active_{k_on}_{k_off}_{k_syn}.svg".format(
k_on=k_on, k_off=k_off, k_syn=k_syn))
plt.close(1)
def plot_production_of_mrna():
t = np.linspace(0, 400, 100)
for k_off in k_offs:
y = nr_molecules_in_window_no_decay(t, k_on, k_off, k_syn, k_eff)
sns.lineplot(x=t, y=y, label="k_off={k_off}".format(k_off=k_off))
plt.legend()
plt.title("k_on={k_on}".format(k_on=k_on))
plt.ylabel("average nr of molecules produced")
plt.xlabel("minutes")
plt.vlines(x=window_lengths, ymin=0, ymax=max(y), linestyles='dashed', colors='black')
plt.savefig(plot_dir + dir_sep + "theoretical_production_mrna_{k_on}_{k_off}_{k_syn}.svg".format(
k_on=k_on, k_off=k_off, k_syn=k_syn))
plt.close(1)
def run_active_state_is_present_simulations(label, nr_runs):
l_counts = []
for w in window_lengths:
nr_runs_active = 0
nr_real_label = 0
nr_signal_label = 0
windows, fix_time = get_windows_and_fix_time(length_window=w, gap=0)
params = TranscriptParams(k_on=k_on, k_off=k_off, nr_refractions=1,
tm_id=np.nan,
k_syn=k_syn, k_d=k_d,
coord_group=0,
name="test",
tran_type="S")
trans = Transcription(params)
# set complete_trace=True to retrieve the complete trace of transcripts counts (for plotting)
for run in range(0, nr_runs):
df_dtmc, dtmc_list = trans.run_bursts(fix_time, windows, new_dtmc_trace=True, complete_trace=False)
df_transcripts = trans.df_transcripts
df_labeled_transcripts = df_transcripts[df_transcripts.label == label]
if len(df_labeled_transcripts) > 0:
nr_real_label = nr_real_label + 1
# TODO: sampling should be done differently
# here we are taking a fixed percentage
len_sample = int(k_eff * len(df_labeled_transcripts))
df_sampled = df_transcripts.sample(len_sample, replace=False)
if len(df_sampled) > 0:
nr_signal_label = nr_signal_label + 1
# example of calculating percentage active
perc = Experiment.perc_active_state(windows, df_dtmc, label)
# print("Percentage active state: {perc}".format(perc=perc))
if perc > 0:
nr_runs_active = nr_runs_active + 1
print("{label} window contains {nr_runs_active} runs with active state(s) for k_off {k_off} and window {window}".
format(label=label, k_off=k_off, window=w, nr_runs_active=nr_runs_active))
l_counts.append([w, nr_runs_active, nr_real_label, nr_signal_label])
df_counts = pd.DataFrame(l_counts, columns=["window", "active", "real", "signal"])
df_counts.to_csv(out_dir + dir_sep + df_filename, sep=';', index=False)
return df_counts
def plot_chance_of_switching_to_active_state(df_counts, nr_runs):
# we want to convert to
plt.plot(df_counts.window, df_counts.active/nr_runs, label='with active state')
plt.plot(df_counts.window, df_counts.real/nr_runs, label='with real counts')
plt.plot(df_counts.window, df_counts.signal/nr_runs, label='with detected counts')
plt.plot(df_counts.window, df_counts.theoretical, color="red", label="theoretical")
plt.xlim(0, max(window_lengths) + 15)
# plt.ylim(0, 1)
plt.xlabel("window size (minutes)")
plt.ylabel("nr of runs")
plt.legend()
plt.savefig(plot_dir + dir_sep + "counts_{k_on}_{k_off}_{k_syn}.svg".format(
k_on=k_on, k_off=k_off, k_syn=k_syn))
plt.close(1)
def fit_to_model_p1(nr_runs):
expected = (0.1, 0.5, 0.5)
# divide by nr_runs for getting chance
popt, pcov = curve_fit(p_1_model, df_counts.window, df_counts.active / nr_runs, expected)
popt_active = popt
error_k_on_active = abs(popt_active[0] / k_on - 1) * 100
popt, pcov = curve_fit(p_1_model, df_counts.window, df_counts.real / nr_runs, expected)
popt_real = popt
error_k_on_real = abs(popt_real[0] / k_on - 1) * 100
popt, pcov = curve_fit(p_1_model, df_counts.window, df_counts.signal / nr_runs, expected)
popt_signal = popt
error_k_on_signal = abs(popt_signal[0] / k_on - 1) * 100
print("fitting to hidden state: k_on={k_on}; error={error}%".format(
k_on=round_sig(popt_active[0], 4), error=round_sig(error_k_on_active, 3)))
print("fitting to real counts: k_on={k_on}; error={error}%".format(
k_on=round_sig(popt_real[0], 4), error=round_sig(error_k_on_real, 3)))
print("fitting to sampled counts: k_on={k_on}; error={error}%".format(
k_on=round_sig(popt_signal[0]), error=round_sig(error_k_on_signal, 3)))
run_sim = False
nr_runs = 500
if run_sim:
label = "4SU"
df_counts = run_active_state_is_present_simulations(label, nr_runs)
else:
df_counts = pd.read_csv(out_dir + dir_sep + df_filename, sep=';')
plot_theoretical_chance_of_active_state()
plot_production_of_mrna()
df_counts["theoretical"] = p_1(df_counts["window"], k_on, k_off)
plot_chance_of_switching_to_active_state(df_counts, nr_runs)
fit_to_model_p1(nr_runs)
| resharp/scBurstSim | analysis/infer_parameters_example.py | infer_parameters_example.py | py | 7,414 | python | en | code | 3 | github-code | 36 |
71873731623 | import pygame
from Helper.global_variables import *
from Helper.text_helper import drawTextcenter, drawText
pygame.init()
def update_display(win, height, color_height, numswaps, algorithm, number_of_elements, speed, time, running):
win.fill(BLACK)
# call show method to display the list items
show(win, height, color_height, number_of_elements)
for i in range(15):
pygame.draw.line(win, TURQUOISE, (0, 165+i), (WIDTH, 165+i))
pygame.draw.line(win, TURQUOISE, (1060+i,0), (1060+i,165))
pygame.draw.line(win, TURQUOISE, (730+i,0), (730+i,165))
pygame.draw.line(win, TURQUOISE, (230+i,0), (230+i,165))
drawTextcenter("Number of swaps: " + str(numswaps), pygame.font.SysFont('Calibri', 20), win, 100, 25, WHITE)
drawTextcenter("Time elapsed: " + str(format(time, ".1f")) + "s", pygame.font.SysFont('Calibri', 20), win, 100, 75, WHITE)
drawTextcenter("Algorithm used: " + algorithm, pygame.font.SysFont('Calibri', 20), win, 475, 25, WHITE)
drawTextcenter("Number of elements: " + str(number_of_elements), pygame.font.SysFont('Calibri', 20), win, 900, 25, WHITE)
drawTextcenter("Algorithm speed: " + speed, pygame.font.SysFont('Calibri', 20), win, 1225, 25, WHITE)
button_start.draw(win)
button_reset.draw(win)
button_bubble_sort.draw(win)
button_insertion_sort.draw(win)
button_selection_sort.draw(win)
button_merge_sort.draw(win)
button_heap_sort.draw(win)
button_quick_sort.draw(win)
button_radix_sort.draw(win)
button_todo4.draw(win)
button_20.draw(win)
button_50.draw(win)
button_75.draw(win)
button_100.draw(win)
button_slow.draw(win)
button_medium.draw(win)
button_fast.draw(win)
button_instant.draw(win)
# create a time delay
if(running == True):
delay = 0
if(speed == "Slow"):
delay = 5000
pygame.time.delay(delay)
if(speed == "Medium"):
delay = 50
pygame.time.delay(delay)
if(speed == "Fast"):
delay = 25
pygame.time.delay(delay)
if(speed == "No delay"):
delay = 0
# update the display
pygame.display.update()
# method to show the list of height
def show(win, height, color_height, number_of_elements):
if(number_of_elements != -1 and len(height) != 0):
maximum_value = max(height)
step = (WIDTH/len(height))
for i in range(len(height)):
x = Button(step * (i+1), HEIGHT, -(step), -(height[i]/maximum_value)*3*HEIGHT/4, BLACK, color_height[i], str(height[i]), int(round(step - 20)))
x.draw(win) | andreidumitrescu95/Python-Sorting-Algorithm-Visualizer | Display/display.py | display.py | py | 2,692 | python | en | code | 3 | github-code | 36 |
40799230806 | case_num = int(input())
for c_num in range(1, case_num+1):
input_len = int(input())
price_lst = list(map(int, input().split()))
my_profit = 0
while True:
if len(price_lst) == 0:
break
max_idx = price_lst.index(max(price_lst))
p_left = price_lst[:max_idx+1]
price_lst = price_lst[max_idx+1:] # right. right의 len이 영이라면 while문 break이 가능
# left에 대해 처리 해주기
# process_tst = [max_tmp - i for i in max_tst[:len(max_tst)-1]]
# [1 3 5 7]
max_tmp = p_left[-1]
profit_list_left = [max_tmp - i for i in p_left[:len(p_left)-1]]
my_profit += sum(profit_list_left)
print(f'#{c_num} {my_profit}')
| devjunmo/PythonCodingTest | SWEA/D2/1859. 백만 장자 프로젝트.py | 1859. 백만 장자 프로젝트.py | py | 746 | python | en | code | 0 | github-code | 36 |
36375251491 | from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from moderation.moderator import GenericModerator
from moderation.tests.apps.test_app1.models import UserProfile,\
ModelWithModeratedFields
from moderation.tests.utils.testsettingsmanager import SettingsTestCase
from moderation.tests.utils import setup_moderation, teardown_moderation
class ExcludeAcceptanceTestCase(SettingsTestCase):
'''
As developer I want to have way to ignore/exclude model fields from
moderation
'''
fixtures = ['test_users.json', 'test_moderation.json']
test_settings = 'moderation.tests.settings.generic'
urls = 'moderation.tests.urls.default'
def setUp(self):
self.client.login(username='admin', password='aaaa')
class UserProfileModerator(GenericModerator):
fields_exclude = ['url']
setup_moderation([(UserProfile, UserProfileModerator)])
def tearDown(self):
teardown_moderation()
def test_excluded_field_should_not_be_moderated_when_obj_is_edited(self):
'''
Change field that is excluded from moderation,
go to moderation admin
'''
profile = UserProfile.objects.get(user__username='moderator')
profile.url = 'http://dominno.pl'
profile.save()
url = reverse('admin:moderation_moderatedobject_change',
args=(profile.moderated_object.pk,))
response = self.client.get(url, {})
changes = [change.change for change in response.context['changes']]
self.assertFalse((u'http://www.google.com',
u'http://dominno.pl') in changes)
def test_non_excluded_field_should_be_moderated_when_obj_is_edited(self):
'''
Change field that is not excluded from moderation,
go to moderation admin
'''
profile = UserProfile.objects.get(user__username='moderator')
profile.description = 'New description'
profile.save()
url = reverse('admin:moderation_moderatedobject_change',
args=(profile.moderated_object.pk,))
response = self.client.get(url, {})
changes = [change.change for change in response.context['changes']]
self.assertTrue(("Old description", 'New description') in changes)
def test_excluded_field_should_not_be_moderated_when_obj_is_created(self):
'''
Create new object, only non excluded fields are used
by moderation system
'''
profile = UserProfile(description='Profile for new user',
url='http://www.dominno.com',
user=User.objects.get(username='user1'))
profile.save()
url = reverse('admin:moderation_moderatedobject_change',
args=(profile.moderated_object.pk,))
response = self.client.get(url, {})
changes = [change.change for change in response.context['changes']]
self.assertFalse((u'http://www.dominno.com',
u'http://www.dominno.com') in changes)
class ModeratedFieldsAcceptanceTestCase(SettingsTestCase):
'''
Test that `moderated_fields` model argument excludes all fields not listed
'''
test_settings = 'moderation.tests.settings.generic'
urls = 'moderation.tests.urls.default'
def setUp(self):
setup_moderation([ModelWithModeratedFields])
def tearDown(self):
teardown_moderation()
def test_moderated_fields_not_added_to_excluded_fields_list(self):
from moderation import moderation
moderator = moderation._registered_models[ModelWithModeratedFields]
self.assertTrue('moderated' not in moderator.fields_exclude)
self.assertTrue('also_moderated' not in moderator.fields_exclude)
def test_unmoderated_fields_added_to_excluded_fields_list(self):
from moderation import moderation
moderator = moderation._registered_models[ModelWithModeratedFields]
self.assertTrue('unmoderated' in moderator.fields_exclude)
| arowla/django-moderation | src/moderation/tests/acceptance/exclude.py | exclude.py | py | 4,091 | python | en | code | null | github-code | 36 |
483706012 | import hashlib
import json
import os
import struct
import sys
import textwrap
from fnmatch import fnmatch
from pathlib import Path
from typing import Dict, List, Union
import cryptography
from cryptography.fernet import Fernet
if sys.version_info < (3, 8):
TypedDict = dict
else:
from typing import TypedDict
__version__ = "0.1.0"
#
# Helpers
#
def md5_hash_for_file(filepath):
return hashlib.md5(open(filepath, "rb").read()).hexdigest()
def encrypt(key: str, fin: Union[str, Path], fout: Union[str, Path], *, block=1 << 16):
"""
Encrypts a file in chunks to support large file sizes.
:param key: The key to use for encryption
:param fin: The file to encrypt
:param fout: The encrypted file to write to
"""
fernet = cryptography.fernet.Fernet(key)
with open(fin, "rb") as fi, open(fout, "wb") as fo:
while True:
chunk = fi.read(block)
if len(chunk) == 0:
break
enc = fernet.encrypt(chunk)
fo.write(struct.pack("<I", len(enc)))
fo.write(enc)
if len(chunk) < block:
break
def decrypt(key: str, fin: Union[str, Path], fout: Union[str, Path]):
"""
Decrypts a file in chunks to support large file sizes.
:param key: The key to use for decryption
:param fin: The encrypted file to decrypt
:param fout: The decrypted file to write to
"""
fernet = cryptography.fernet.Fernet(key)
with open(fin, "rb") as fi, open(fout, "wb") as fo:
while True:
size_data = fi.read(4)
if len(size_data) == 0:
break
chunk = fi.read(struct.unpack("<I", size_data)[0])
dec = fernet.decrypt(chunk)
fo.write(dec)
class VaultManifest(TypedDict):
"""
A VaultManifest is a dictionary of files and their hashes.
"""
# Used as a notice to indicate the file is machien generated
_: str
# The version of the manifest, used for backwards compatibility
version: str
# The list of file hashes in the vault
files: Dict[str, str]
class VaultChangeSet(TypedDict):
total: int
additions: List[str]
deletions: List[str]
updates: List[str]
unchanged: List[str]
#
# DataVault
#
class DataVault:
VERSION = 1
MANIFEST_FILENAME = "vault_manifest.json"
ENCRYPTED_NAMESPACE = ".encrypted"
@staticmethod
def find_all(path: Union[str, Path]) -> List["DataVault"]:
"""
Returns a list of all vaults in the given path.
"""
# Search path for vault manifests
manifest_paths = [
path
for path in Path(path).rglob(
f"{DataVault.ENCRYPTED_NAMESPACE}/{DataVault.MANIFEST_FILENAME}"
)
if DataVault._verify_manifest(path)
]
vault_dirs = [Path(path).parent.parent for path in manifest_paths]
vaults = [DataVault(path) for path in sorted(vault_dirs)]
return vaults
@staticmethod
def _verify_manifest(vault_manifest_path: Union[str, Path]) -> bool:
"""
Verifies that the vault manifest is valid.
"""
try:
with open(vault_manifest_path, "r") as f:
manifest = json.load(f)
except Exception as e:
return False
if not isinstance(manifest.get("_"), str):
return False
if not isinstance(manifest.get("files"), dict):
return False
return manifest.get("version") == DataVault.VERSION
@staticmethod
def generate_secret() -> str:
"""
Generates a fresh vault key. Keep this some place safe! If you lose it
you'll no longer be able to decrypt vaults; if anyone else gains
access to it, they'll be able to decrypt all of your messages, and
they'll also be able forge arbitrary messages that will be
authenticated and decrypted.
Uses Fernet to generate a key. See:
https://cryptography.io/en/latest/fernet/
"""
return Fernet.generate_key().decode("utf-8")
def __init__(self, path: Union[str, Path]):
self.root_path = Path(path)
self.encrypted_path = self.root_path / DataVault.ENCRYPTED_NAMESPACE
self.vault_manifest_path = self.encrypted_path / DataVault.MANIFEST_FILENAME
def create(self) -> str:
"""
Creates the file paths for a new vault with an empty manifest.
This method will not work if there are already files in the
vaults standard paths.
"""
# Create vault storage paths
self.root_path.mkdir(exist_ok=False)
self.encrypted_path.mkdir(exist_ok=False)
self._create_gitignore()
self._reset_manifest()
self._verify_or_explode()
def encrypt(self, secret_key: str) -> None:
"""
Encrypts all decrypted files in the data vault that have changed
since the last encryption.
"""
self._create_gitignore() # Just in case
self._verify_or_explode()
changes = self.changes()
for f in changes["additions"]:
encrypt(secret_key, self.root_path / f, self.encrypted_path / f)
for f in changes["updates"]:
os.remove(os.path.join(self.encrypted_path, f))
encrypt(secret_key, self.root_path / f, self.encrypted_path / f)
for f in changes["deletions"]:
os.remove(os.path.join(self.encrypted_path, f))
# Write the new manifest
with open(self.vault_manifest_path, "w") as f:
json.dump(self._next_manifest(), f, indent=2)
def decrypt(self, secret_key: str) -> None:
"""
Decrypts all the encrypted files in the data vault.
"""
self._create_gitignore() # Just in case
self._verify_or_explode()
# Delete all decrypted files
for f in self.files():
os.remove(os.path.join(self.root_path, f))
for f in self.encrypted_files():
decrypt(secret_key, self.encrypted_path / f, self.root_path / f)
def verify(self) -> bool:
"""
Returns True if a valid vault exists for the given path.
"""
try:
self._verify_or_explode()
return True
except:
return False
def files(self) -> List[str]:
"""
Returns a list of all files in the vault recursively.
"""
files = []
# Enumerate all files skippping the ones in the encrypted
# directory
for f in os.listdir(self.root_path):
# Skip the encrypted directory
if f in (DataVault.ENCRYPTED_NAMESPACE, ".gitignore"):
continue
# Walk all other directories
elif os.path.isdir(os.path.join(self.root_path, f)):
for dp, dn, filenames in os.walk("."):
for f in filenames:
if os.path.splitext(f)[1]:
# files.append(os.path.join(dp, f))
files.append(
f"{Path(os.path.join(dp, f)).relative_to(self.encrypted_path)}"
)
# Append other files
else:
files.append(f)
# Collect gitignore files
ignore_files = []
if (Path.home() / ".gitignore").exists():
with open(Path.home() / ".gitignore", "r") as f:
ignore_files.append(f.read())
if (Path.cwd() / ".gitignore").exists():
with open(Path.cwd() / ".gitignore", "r") as f:
ignore_files.append(f.read())
# Filter out ignored files
return [
n for n in files if not any(fnmatch(n, ignore) for ignore in ignore_files)
]
def encrypted_files(self):
"""
Returns a list of all encrypted files in the vault.
"""
files = []
for dp, dn, filenames in os.walk(self.encrypted_path):
for f in filenames:
if f != DataVault.MANIFEST_FILENAME:
if os.path.splitext(f)[1]:
files.append(
f"{Path(os.path.join(dp, f)).relative_to(self.encrypted_path)}"
)
return files
def is_empty(self) -> bool:
"""
Returns True if the vault is empty.
"""
return len(self.files()) == 0
def changes(self) -> VaultChangeSet:
"""
Returns a list of the changes to the vault since the last encryption.
"""
updates, additions, deletions = (
self.updates(),
self.additions(),
self.deletions(),
)
return {
"total": len(updates) + len(additions) + len(deletions),
"additions": additions,
"deletions": deletions,
"updates": updates,
"unchanged": [
f for f in self.files() if f not in set(updates + additions + deletions)
],
}
def has_changes(self):
"""
Returns True if there are changes to the data in the vault.
"""
return self.changes()["total"] > 0
def additions(self) -> List[str]:
"""
Returns a list of files that are in the decrypted directory but not
in the vault manifest.
"""
manifest_files = set(self.manifest()["files"])
return [f for f in self.files() if f not in manifest_files]
def deletions(self) -> List[str]:
"""
Returns a list of files that are in the vault manifest but not in
the decrypted directory.
"""
return [f for f in self.manifest()["files"] if f not in self.files()]
def updates(self) -> List[str]:
"""
Returns a list of files that have changed since the last encryption.
We accomplish this by investigating the hashes of the files in the
decrypted directory. If the hash of the file in the decrypted directory
is different than the hash of the file in the vault manifest, we
consider the file to have changed.
"""
current_manifest = self.manifest()["files"]
next_manifest = self._next_manifest()["files"]
updates = []
for file, hash in current_manifest.items():
if not next_manifest.get(file):
continue
if hash == next_manifest[file]:
continue
updates.append(file)
return updates
def manifest(self) -> VaultManifest:
"""
Reads the currently persisted vault manifest file.
"""
with open(self.vault_manifest_path, "r") as f:
return json.load(f)
def no_encypted_files(self) -> bool:
"""
Returns True if the encrypted directory is empty.
"""
return len(self.encrypted_files()) == 0
def clear(self) -> None:
"""
Clears the data vault.
"""
for f in self.files():
os.remove(os.path.join(self.root_path, f))
def clear_encrypted(self) -> None:
"""
Clears the encrypted directory.
"""
for f in self.encrypted_files():
os.remove(os.path.join(self.encrypted_path, f))
# You must clear the manifest otherwise the vault will
# be invalid
self._reset_manifest()
def _verify_or_explode(self) -> None:
"""
Verifies the vault has the correct structure and vault manifest.
It also checks that all of the files in the manifest are encrypted.
"""
if not self.root_path.exists():
raise FileNotFoundError(
f"Vault does not exist at given path: {self.root_path}"
)
if not self.encrypted_path.exists():
raise FileNotFoundError(
f"Vault encrypted directory does not exist at given path: {self.encrypted_path}"
)
if not DataVault._verify_manifest(self.vault_manifest_path):
raise FileNotFoundError(
f"Vault manifest is invalid at given path: {self.vault_manifest_path}"
)
if not (self.root_path / ".gitignore").exists():
raise FileNotFoundError(
f"Vault .gitignore file does not exist at given path: {self.root_path / '.gitignore'}"
)
# All files in the manifest must be encrypted
missing_files = []
for f in self.manifest()["files"]:
if not os.path.exists(os.path.join(self.encrypted_path, f)):
missing_files.append(f)
if len(missing_files) > 0:
raise FileNotFoundError(
textwrap.deindent(
f"""
Vault manifest contains files that are not encrypted: {missing_files}
>>> THIS SHOULD NOT HAPPEN AND IS CONSIDERED A SERIOUS ISSUE. <<<
Check your vault directory {self.root_path} for the decrypted
version of these files. If you can't find them there, you may need
to search for an older version of the vault in version control. Otherwise,
these files have likely been entirely lost.
Once the files have been found, there are several ways to recover the vault:
1. Recreate the vault from scratch.
2. Remove the files from the autogenerated vault manifest ({self.vault_manifest_path})
and rerun the vault encryption.
If you do not need these files, you can simply delete them from the manifest.
"""
)
)
#
# Private helpers
#
def _create_gitignore(self):
"""
Creates a .gitignore file in the vault root directory.
"""
with open(os.path.join(self.root_path, ".gitignore"), "w") as f:
f.write("/*\n")
f.write(f"!/{DataVault.ENCRYPTED_NAMESPACE}\n")
def _reset_manifest(self):
"""
Generate an empty vault manifest
"""
#
with open(self.vault_manifest_path, "w") as f:
json.dump(self._empty_vault_manifest(), f, indent=2)
def _empty_vault_manifest(self) -> VaultManifest:
"""
Returns an empty vault config as a dict.
"""
return {
"_": "DO NOT EDIT THIS FILE. IT IS AUTOMATICALLY GENERATED.",
"version": self.VERSION,
"files": {},
}
def _next_manifest(self) -> VaultManifest:
"""
Returns the next version of the vault manifest that should be persisted
after the next encryption.
"""
return {
"_": "DO NOT EDIT THIS FILE. IT IS AUTOMATICALLY GENERATED.",
"version": self.VERSION,
"files": {f: md5_hash_for_file(self.root_path / f) for f in self.files()},
}
| dihi/datavault | dihi_datavault/__init__.py | __init__.py | py | 14,958 | python | en | code | 0 | github-code | 36 |
28356972055 |
import logging
import sys
from kodi_interface import KodiObj
LOGGING = logging.getLogger(__name__)
def get_input(prompt: str = "> ", choices: list = [], required = False) -> str:
ret_val = input(prompt)
if choices:
while not ret_val in choices:
print(f'Invalid selection. Valid entries: {"/".join(choices)}')
ret_val = input(prompt)
elif required:
while not ret_val:
print('You MUST enter a value.')
ret_val = input(prompt)
return ret_val
def setup_logging(log_level = logging.ERROR):
lg_format='[%(levelname)-5s] %(message)s'
logging.basicConfig(format=lg_format, level=log_level,)
def set_loglevel(log_level:str):
if log_level == "E":
lg_lvl = logging.ERROR
elif log_level == "I":
lg_lvl = logging.INFO
else:
lg_lvl = logging.DEBUG
logging.getLogger().setLevel(lg_lvl)
def dump_methods(kodi: KodiObj):
namespaces = kodi.get_namespace_list()
for ns in namespaces:
resp = get_input(f"Display: {ns} (y|n|q)> ",['y','n','Y','N','Q','q']).lower()
if resp == "q":
break
elif resp == 'y':
ns_methods = kodi.get_namespace_method_list(ns)
for method in ns_methods:
resp = get_input(f'{ns}.{method} (E,I,D,n,q)> ',['E','I','D','y','n','q',''])
if resp in ['E','I','D']:
set_loglevel(resp)
elif resp == 'q':
sys.exit()
elif resp == 'n':
break
cmd = f'{ns}.{method}'
print(cmd)
kodi.help(cmd)
print()
print('\n=========================================================================')
def main():
setup_logging()
log_level = "E"
set_loglevel(log_level)
kodi = KodiObj()
# kodi.help("")
# pause()
# kodi.help("Application")
# pause()
# kodi.help('AudioLibrary.GetArtists')
dump_methods(kodi)
if __name__ == "__main__":
main()
| JavaWiz1/kodi-cli | kodi_help_tester.py | kodi_help_tester.py | py | 2,077 | python | en | code | 6 | github-code | 36 |
19033902872 | """Module contains functionality for parsing HTML page of a particular vulnerability."""
import re
import urllib.request
from lxml import etree
from cve_connector.vendor_cve.implementation.parsers.general_and_format_parsers\
.html_parser import HtmlParser
from cve_connector.vendor_cve.implementation.parsers.vendor_parsers.cisco_parsers\
.cisco_cvrf import CiscoXmlParser
from cve_connector.vendor_cve.implementation.vendors_storage_structures.cisco import Cisco
from cve_connector.vendor_cve.implementation.vulnerability_metrics.cvss_v3_metrics import CvssV3
from cve_connector.vendor_cve.implementation.utilities.check_correctness \
import is_correct_cve_id, is_correct_cwe, is_correct_score, \
is_correct_vector_v3
from cve_connector.vendor_cve.implementation.utilities.utility_functions \
import normalize_string, concat_strings, get_current_date, \
string_to_date, get_number_from_string
class CiscoVulnerabilityParser(HtmlParser):
"""
Contains functionality for parsing HTML of specific CVE.
"""
def __init__(self, url, logger, from_date=None, to_date=None):
super().__init__(url, from_date, to_date)
self.date_format = '%Y %B %d' # 2018 January 4
self.load_content()
self.cve_details_dict = {}
self.parsed_cve_ids = []
self.parsed_summary = ''
self.parsed_advisory_id = ''
self.parsed_cwes = []
self.parsed_cvss_base = ''
self.parsed_cvss_temporal = ''
self.parsed_attack_vector = ''
self.parsed_severity = ''
self.parsed_analysis = ''
self.parsed_date = get_current_date()
self.patched = False
self.logger = logger
def get_content_from_ulr(self):
"""
Gets and returns content from URL.
:return: content
"""
response = urllib.request.urlopen(self.url)
if response.getcode() != 200:
self.logger.info("Cisco - get_content_from_url()")
raise ConnectionError('Unable to load ', self.url)
content = response.read()
response.close()
return content
def parse(self):
"""
Provides parsing functionality.
:return: None
"""
content_list = self.data.xpath(
'.//div[@id="advisorycontentcontainer"]//div[@class="mainContent"]')
if not content_list:
return False
content = content_list[0]
advisory_header_list = content.xpath('.//div[@id="advisorycontentheader"]')
if not advisory_header_list:
return False
advisory_header = advisory_header_list[0]
self.parse_header_items(advisory_header)
link_to_xml_content = self.get_xml_link(advisory_header)
correct_parsed_xml = False
if link_to_xml_content != '':
correct_parsed_xml = self.parse_xml(link_to_xml_content)
if link_to_xml_content == '' or not correct_parsed_xml:
advisory_content_body = content.xpath('.//div[@id="advisorycontentbody"]')[0]
self.parse_header_items(advisory_header)
self.parsed_summary = self.parse_summary(advisory_content_body)
self.parse_analysis(advisory_content_body)
self.check_patched(advisory_content_body)
if len(self.parsed_cve_ids) == 1:
i = self.parsed_cve_ids[0]
self.cve_details_dict[i] = self.parse_details_one_cve(content)
else:
details_dict = self.parse_details_more_cves(content)
self.complete_cve_dictionary(details_dict)
if correct_parsed_xml:
self.complete_xml_parsing()
self.complete_entities()
def complete_xml_parsing(self):
"""
Assigns values to each particular property.
:return: None
"""
for item in self.entities:
item.severity = self.parsed_severity
item.cwes.extend(self.parsed_cwes)
item.advisory_id = self.parsed_advisory_id
item.attack_vector = self.parsed_attack_vector
if self.parsed_cvss_base != '' and is_correct_score(self.parsed_cvss_base):
cvss_v3 = CvssV3(base_sc=self.parsed_cvss_base)
if self.parsed_cvss_temporal != '' \
and is_correct_score(self.parsed_cvss_temporal):
cvss_v3.temporal_sc = self.parsed_cvss_temporal
item.cvss_v3 = cvss_v3
item.cvss_base_sc_v3 = self.parsed_cvss_base
item.cvss_temporal_score_v3 = self.parsed_cvss_temporal
item.published = self.parsed_date
def complete_entities(self):
"""
Creates list of Cisco vulnerabilities as a property.
:return: None
"""
for item in self.cve_details_dict:
cisco = Cisco(cve=item)
cisco.details = self.cve_details_dict[item]
cisco.summary = self.parsed_summary
cisco.advisory_id = self.parsed_advisory_id
cisco.attack_vector = self.parsed_attack_vector
cisco.cvss_temporal_score_v3 = self.parsed_cvss_temporal
cisco.cvss_base_sc_v3 = self.parsed_cvss_base
if self.parsed_cvss_base != '' and is_correct_score(self.parsed_cvss_base):
cvss_v3 = CvssV3(base_sc=self.parsed_cvss_base)
if self.parsed_cvss_temporal != '' and is_correct_score(self.parsed_cvss_temporal):
cvss_v3.temporal_sc = self.parsed_cvss_temporal
cisco.cvss_v3 = cvss_v3
cisco.severity = self.parsed_severity
cisco.analysis = self.parsed_analysis
cisco.description = self.parsed_summary + ' ' \
+ self.parsed_analysis + ' ' + self.cve_details_dict[item]
cisco.published = self.parsed_date
cisco.patch_available = self.patched
for cwe in self.parsed_cwes:
if is_correct_cwe(cwe):
cisco.cwes.append(cwe)
if cisco.is_valid_entity():
self.entities.append(cisco)
def complete_cve_dictionary(self, dct):
"""
Sets complete dictionary of parsed CVEs as a property.
:param dct: properties of CVEs to be set (dictionary)
:return: None
"""
for cve in self.parsed_cve_ids:
dict_value = ''
if cve in dct:
dict_value = dct[cve]
self.cve_details_dict[cve] = dict_value
def get_xml_link(self, content):
"""
Extract from the content link for XML file.
:param content: downloaded content
:return: XML link or empty string
"""
xml_link_list = content.xpath('.//a[contains(text(), "Download CVRF")]/@href')
return xml_link_list[0] if xml_link_list else ''
def parse_xml(self, link):
"""
Parses XML downloaded from link.
:param link: download link
:return: True if successful
"""
parser = CiscoXmlParser(link)
try:
parser.load_content()
except ConnectionError as conn_err:
self.logger.error('Cisco Parser - Error: ', str(conn_err))
return False
except etree.ParseError as parse_err:
self.logger.error('Cisco Parser - Error: ', str(parse_err))
return False
parser.parse()
entities = parser.entities
self.entities.extend(entities)
self.patched = True
return True
def parse_details_one_cve(self, content):
"""
Parse properties of particular CVE.
:param content: downloaded content
:return: string containing details of CVE
"""
details_list = content.xpath('.//div[@id="detailfield"]/span//text()')
return concat_strings(details_list, ' ')
def parse_details_more_cves(self, content):
"""
Extracts and returns CVEs from the content.
:param content: downloaded content
:return: string containing details
"""
result = {}
detail = ''
header_appeared = False
vuln_headers = content.xpath('.//*[self::strong or self::h3]/text()')
details_list = content.xpath('.//div[@id="detailfield"]/span//text()')
for item in details_list:
item = normalize_string(item)
if item == '':
continue
if item in vuln_headers:
header_appeared = True
detail = ''
elif header_appeared:
cve_match = self.cve_match(item)
if cve_match == '':
detail += item
else:
result[cve_match] = detail
detail = ''
return result
def cve_match(self, string):
"""
Extracts CVE ID from the string.
:param string: raw string that might contain CVE ID
:return: cve or empty string
"""
pattern_list = [r'assigned the following CVE ID: (CVE-\d+-\d+)',
r'ID for this vulnerability is: (CVE-\d+-\d+)']
for pattern in pattern_list:
match = re.search('{0}'.format(pattern), string)
if match:
cve = match.group(1)
if is_correct_cve_id(cve):
return cve
return ''
def parse_analysis(self, content):
"""
Extracts and returns analysis from the content.
:param content: downloaded content
:return: analysis
"""
analysis_list = content.xpath('.//div[@id="analysisfield"]//text()')
analysis = ''
for text in analysis_list:
analysis += normalize_string(text)
return str(analysis)
def parse_summary(self, content):
"""
Extracts and returns summary from the content.
:param content: downloaded content
:return: summary
"""
summary_list = content.xpath('.//div[@id="summaryfield"]//text()')
summary = ''
for text in summary_list:
summary += normalize_string(text)
return summary
def parse_severity(self, content):
"""
Extracts and returns severity from the content.
:param content: downloaded content
:return: severity
"""
severity_list = content.xpath('.//div[@id="severitycirclecontent"]/text()')
if len(severity_list) != 1:
raise ValueError("Wrong parsed severity")
return str(severity_list[0])
def parse_header_items(self, header):
"""
Parses header item from downloaded tables.
:param header: header of table
:return: None
"""
self.parsed_severity = self.parse_severity(header)
self.parsed_date = self.get_published_date(header)
advisory_id_list = header.xpath('.//div[@id="ud-advisory-identifier"]'
'/div[@class="divLabelContent"]/text()')
if len(advisory_id_list) != 1:
raise ValueError("Wrong parsed advisory id")
self.parsed_advisory_id = str(advisory_id_list[0])
cve_list = header.xpath(
'.//div[@class="cve-cwe-containerlarge"]//div[@class="CVEList"]/div/text()')
self.parsed_cve_ids.extend(i for i in cve_list if is_correct_cve_id(i))
cwe_list = header.xpath(
'.//div[@class="cve-cwe-containerlarge"]//div[@class="CWEList"]//text()')
self.parsed_cwes.extend(c for c in cwe_list if is_correct_cwe(c))
score_list = header.xpath('.//div[contains(@class, "ud-CVSSScore")]//input/@value')
if score_list:
base = re.search(r'Base (\d{1,2}\.\d)', score_list[0])
if base:
base_sc = get_number_from_string(base.group(1))
self.parsed_cvss_base = base_sc
temporal = re.search(r'Temporal (\d.\d)', score_list[0])
if temporal:
temp_sc = get_number_from_string(temporal.group(1))
self.parsed_cvss_temporal = temp_sc
cvss_vector = re.search(
r'CVSS:3\.0/AV:\S+/AC:\S+/PR:\S+/UI:\S+/S:\S+/C:\S+/I:\S+/A:\S+/E:\S+/RL:\S+'
r'/RC:\S+', score_list[0])
if cvss_vector and is_correct_vector_v3(cvss_vector.group(0)):
self.parsed_attack_vector = str(cvss_vector.group(0))
def get_published_date(self, content):
"""
Extracts and returns published date from the content.
:param content: downloaded content
:return: date
"""
date_list = content.xpath(
'.//div[@id="ud-published"]//div[@class="divLabelContent"]/text()')
if not date_list:
return get_current_date()
date_string_list = re.findall(r'\d{4}\xa0\w+\xa0\d+', str(date_list[0]))
if not date_string_list:
return get_current_date()
date_string = date_string_list[0].replace('\xa0', ' ')
date = string_to_date(date_string, self.date_format)
return date
def check_patched(self, content):
"""
Sets property patched according to the tested information.
:param content: downloaded content
:return: None
"""
vendor_ann_text = concat_strings(content.xpath(
'.//div[@id="vendorannouncefield"]//text()'))
fixed_sw_text = concat_strings(content.xpath('.//div[@id="fixedsoftfield"]//text()'))
if 'has released' in vendor_ann_text:
self.patched = True
return
if 'has released' not in fixed_sw_text or 'not released'in fixed_sw_text:
self.patched = False
else:
self.patched = True
| CSIRT-MU/CRUSOE | crusoe_observe/cve-connector/cve_connector/vendor_cve/implementation/parsers/vendor_parsers/cisco_parsers/cisco_vulnerability_parser.py | cisco_vulnerability_parser.py | py | 13,807 | python | en | code | 9 | github-code | 36 |
42583588575 | # -*- coding: utf-8 -*-
"""
Created on Sun Dec 1 20:41:25 2019
@author: hp
"""
import aiml
# Create the kernel and learn AIML files
kernel = aiml.Kernel()
kernel.learn("custom.aiml")
# Press CTRL-C to break this loop
while True:
userinput = input("Enter your message >> ")
output = kernel.respond(userinput)
print(output) | syeda-mahrukh-wajid/assignment | chatbot1.py | chatbot1.py | py | 357 | python | en | code | 0 | github-code | 36 |
28613178416 | #!/usr/bin/env python
"""PySide port of the network/http example from Qt v4.x"""
import sys
from PySide import QtCore, QtGui, QtNetwork
class HttpWindow(QtGui.QDialog):
def __init__(self, parent=None):
QtGui.QDialog.__init__(self, parent)
self.urlLineEdit = QtGui.QLineEdit("http://www.ietf.org/iesg/1rfc_index.txt")
self.urlLabel = QtGui.QLabel(self.tr("&URL:"))
self.urlLabel.setBuddy(self.urlLineEdit)
self.statusLabel = QtGui.QLabel(self.tr("Please enter the URL of a file "
"you want to download."))
self.quitButton = QtGui.QPushButton(self.tr("Quit"))
self.downloadButton = QtGui.QPushButton(self.tr("Download"))
self.downloadButton.setDefault(True)
self.progressDialog = QtGui.QProgressDialog(self)
self.http = QtNetwork.QHttp(self)
self.outFile = None
self.httpGetId = 0
self.httpRequestAborted = False
self.connect(self.urlLineEdit, QtCore.SIGNAL("textChanged(QString &)"),
self.enableDownloadButton)
self.connect(self.http, QtCore.SIGNAL("requestFinished(int, bool)"),
self.httpRequestFinished)
self.connect(self.http, QtCore.SIGNAL("dataReadProgress(int, int)"),
self.updateDataReadProgress)
self.connect(self.http, QtCore.SIGNAL("responseHeaderReceived(QHttpResponseHeader &)"),
self.readResponseHeader)
self.connect(self.progressDialog, QtCore.SIGNAL("canceled()"),
self.cancelDownload)
self.connect(self.downloadButton, QtCore.SIGNAL("clicked()"),
self.downloadFile)
self.connect(self.quitButton, QtCore.SIGNAL("clicked()"),
self, QtCore.SLOT("close()"))
topLayout = QtGui.QHBoxLayout()
topLayout.addWidget(self.urlLabel)
topLayout.addWidget(self.urlLineEdit)
buttonLayout = QtGui.QHBoxLayout()
buttonLayout.addStretch(1)
buttonLayout.addWidget(self.downloadButton)
buttonLayout.addWidget(self.quitButton)
mainLayout = QtGui.QVBoxLayout()
mainLayout.addLayout(topLayout)
mainLayout.addWidget(self.statusLabel)
mainLayout.addLayout(buttonLayout)
self.setLayout(mainLayout)
self.setWindowTitle(self.tr("HTTP"))
self.urlLineEdit.setFocus()
def downloadFile(self):
url = QtCore.QUrl(self.urlLineEdit.text())
fileInfo = QtCore.QFileInfo(url.path())
fileName = fileInfo.fileName()
if QtCore.QFile.exists(fileName):
QtGui.QMessageBox.information(self, self.tr("HTTP"), self.tr(
"There already exists a file called %s "
"in the current directory.") % (fileName))
return
self.outFile = QtCore.QFile(fileName)
if not self.outFile.open(QtCore.QIODevice.WriteOnly):
QtGui.QMessageBox.information(self, self.tr("HTTP"),
self.tr("Unable to save the file %(name)s: %(error)s.")
% {'name': fileName,
'error': self.outFile.errorString()})
self.outFile = None
return
if url.port() != -1:
self.http.setHost(url.host(), url.port())
else:
self.http.setHost(url.host(), 80)
if url.userName():
self.http.setUser(url.userName(), url.password())
self.httpRequestAborted = False
self.httpGetId = self.http.get(url.path(), self.outFile)
self.progressDialog.setWindowTitle(self.tr("HTTP"))
self.progressDialog.setLabelText(self.tr("Downloading %s.") % (fileName))
self.downloadButton.setEnabled(False)
def cancelDownload(self):
self.statusLabel.setText(self.tr("Download canceled."))
self.httpRequestAborted = True
self.http.abort()
self.downloadButton.setEnabled(True)
def httpRequestFinished(self, requestId, error):
if self.httpRequestAborted:
if self.outFile is not None:
self.outFile.close()
self.outFile.remove()
self.outFile = None
self.progressDialog.hide()
return
if requestId != self.httpGetId:
return
self.progressDialog.hide()
self.outFile.close()
if error:
self.outFile.remove()
QtGui.QMessageBox.information(self, self.tr("HTTP"),
self.tr("Download failed: %s.")
% (self.http.errorString()))
else:
fileName = QtCore.QFileInfo(QtCore.QUrl(self.urlLineEdit.text()).path()).fileName()
self.statusLabel.setText(self.tr("Downloaded %s to current directory.") % (fileName))
self.downloadButton.setEnabled(True)
self.outFile = None
def readResponseHeader(self, responseHeader):
if responseHeader.statusCode() != 200:
QtGui.QMessageBox.information(self, self.tr("HTTP"),
self.tr("Download failed: %s.")
% (responseHeader.reasonPhrase()))
self.httpRequestAborted = True
self.progressDialog.hide()
self.http.abort()
return
def updateDataReadProgress(self, bytesRead, totalBytes):
if self.httpRequestAborted:
return
self.progressDialog.setMaximum(totalBytes)
self.progressDialog.setValue(bytesRead)
def enableDownloadButton(self):
self.downloadButton.setEnabled(not self.urlLineEdit.text())
if __name__ == '__main__':
app = QtGui.QApplication(sys.argv)
httpWin = HttpWindow()
sys.exit(httpWin.exec_())
| pyside/Examples | examples/network/http.py | http.py | py | 5,973 | python | en | code | 357 | github-code | 36 |
43867560541 | n = int(input())
job = [list(map(int, input().split())) for _ in range(n)]
job.sort(key=lambda x: x[1])
ans = True
time = 0
for i, j in job:
time += i
if time > j:
ans = False
break
print("Yes") if ans else print("No")
| cocoinit23/atcoder | abc/abc131/D - Megalomania.py | D - Megalomania.py | py | 245 | python | en | code | 0 | github-code | 36 |
14772991298 | class Audit(object):
def __init__(self):
"""
Constructor method for audit.
Attributes
==========
global_audit (dictionary):
Audit of high level metrics
unit_audit (dictionary):
Audit at unit level
"""
# Initialise global audits
self.global_audit_index_count = 0
self.global_audit = []
self.audit_unit_occupancy = []
self.audit_unit_occupancy_percent = []
self.audit_unit_occupancy_displaced_preferred = []
self.audit_unit_occupancy_displaced_destination = []
self.audit_unit_occupancy_waiting_preferred = []
def perform_global_audit(self, _model):
"""
Perform audit of high level model parameters/metrics
"""
while True:
if _model.env.now >= _model.params.sim_warmup:
# Global tracker audit
self.global_audit_index_count += 1
item = dict()
item['index'] = self.global_audit_index_count
item['time'] = _model.env.now
item['total_patients'] = _model.tracker['total_patients']
item['total_patients_asu'] = _model.tracker['total_patients_asu']
item['total_patients_waited'] = _model.tracker['total_patients_waited']
item['total_patients_displaced'] = _model.tracker['total_patients_displaced']
item['current_patients'] = _model.tracker['current_patients']
item['asu_patients_all'] = _model.tracker['current_asu_patients_all']
item['asu_patients_allocated'] = _model.tracker['current_asu_patients_allocated']
item['asu_patients_unallocated'] = _model.tracker['current_asu_patients_unallocated']
item['asu_patients_displaced'] = _model.tracker['current_asu_patients_displaced']
self.global_audit.append(item)
# Occupancy, displaced and waiting patients
self.audit_unit_occupancy.append(_model.unit_occupancy)
self.audit_unit_occupancy_percent.append(
(_model.unit_occupancy/_model.data.units_capacity)*100)
self.audit_unit_occupancy_displaced_preferred.append(
_model.unit_occupancy_displaced_preferred )
self.audit_unit_occupancy_displaced_destination.append(
_model.unit_occupancy_displaced_destination)
self.audit_unit_occupancy_waiting_preferred .append(
_model.unit_occupancy_waiting_preferred)
# Wait for next audit
yield _model.env.timeout(1)
| MichaelAllen1966/2105_london_acute_stroke_unit | sim_utils/audit.py | audit.py | py | 2,714 | python | en | code | 0 | github-code | 36 |
42211647592 | import tensorflow as tf
session = tf.Session()
state = tf.placeholder("float", [None, 3])
weights = tf.Variable(tf.constant(0., shape=[3, 2]))
value_function = tf.matmul(state, weights)
session.run(tf.initialize_all_variables())
ans = session.run(value_function, feed_dict={state: [[1., 0., 0.]]})
print(ans) | RhysJMartin/reinforcement_learning | break_out/temp.py | temp.py | py | 314 | python | en | code | 0 | github-code | 36 |
11892203140 | class Solution:
def maxDistance(self, nums1: List[int], nums2: List[int]) -> int:
max_dist = 0
i = 0
j = 0
while i < len(nums1) and j < len(nums2):
if nums2[j] < nums1[i]:
i = i+1
elif nums2[j] >= nums1[i]:
max_dist = max(max_dist, (j-i))
j = j+1
return max_dist
| bandiatindra/DataStructures-and-Algorithms | Additional Algorithms/LC 1855. Max Distance Between Pair of Values.py | LC 1855. Max Distance Between Pair of Values.py | py | 394 | python | en | code | 3 | github-code | 36 |
32559830813 | import jwt
from functools import wraps
from app import request, jsonify, app
from app.use_db.tools import quarry
def token_required(f):
@wraps(f)
def _verify(*args, **kwargs):
auth_headers = request.headers.get('Authorization', '').split()
invalid_msg = {
'message': 'Invalid token. Registeration and / or authentication required',
'authenticated': False
}
expired_msg = {
'message': 'Expired token. Reauthentication required.',
'authenticated': False
}
if len(auth_headers) != 2:
return jsonify(invalid_msg), 401
try:
token = auth_headers[1]
data = jwt.decode(token, app.config['SECRET_KEY'], algorithms=['HS256'])
email = data['sub']
email_exist = quarry.call('select exists '
'(select * from person where email_per = %s)', [email], commit=False, fetchall=False)
if email_exist[0] == 0:
raise RuntimeError('User not found')
id_per = quarry.call('select id_per from person where email_per = %s', [email], commit=False, fetchall=False)
return f(id_per[0], *args, **kwargs)
except jwt.ExpiredSignatureError:
return jsonify(expired_msg), 401 # 401 is Unauthorized HTTP status code
except (jwt.InvalidTokenError, Exception) as e:
print(e)
return jsonify(invalid_msg), 401
return _verify
| Baral-Chief-of-Compliance/ice_tracing_software | prototype/v1/backend/authorization/decorator_for_authorization.py | decorator_for_authorization.py | py | 1,506 | python | en | code | 0 | github-code | 36 |
11490438190 | import base64
import io
from PIL import Image
from pyzbar.pyzbar import decode
from requests_ntlm import HttpNtlmAuth
import requests
def get_js(sc, shop):
username = r'WebService'
password = 'web2018'
auth = HttpNtlmAuth(username, password)
strParam = shop + '/' + sc
list_url = r"https://ts.offprice.eu/service_retail/hs/wms_api/getpriceQR/" + strParam
headers = {'Accept': 'application/json;odata=verbose'}
responce = requests.get(list_url, verify=False, auth=auth, headers=headers)
response_json = responce.json()
return response_json
def decode_barcode(my_image):
# decodes all barcodes from an my_image
# bar_class = barcode.ean.EAN13.name
decoded_objects = decode(Image.open(my_image))
# print(decoded_objects)
for obj in decoded_objects:
# draw the barcode
# if obj.type == bar_class.replace("-", ""):
# my_image = draw_barcode(obj, my_image)
# print barcode type & data
# print("Type:", obj.type)
# print("Data:", obj.data.decode("utf-8"))
return obj.data.decode("utf-8")
return 0
def use_barcode(my_image):
decoded_objects = decode_barcode(my_image)
return decoded_objects
def use_barcode_ajax(my_image):
decoded_objects = decode_barcode(my_image)
return decoded_objects
def get_my_code(image_base64, shop):
imgdata = base64.b64decode(str(image_base64))
tempimg = io.BytesIO(imgdata)
datasacan = use_barcode(tempimg)
if datasacan == 0:
return 0
textbar = datasacan
textjson = get_js(textbar, shop)
# Надо чтобы возвращал штрихкод, если не удалось получить по нему данные
if textjson == '[] []':
return 1
# get string with all double quotes
single_quoted_dict_in_string = textjson
desired_double_quoted_dict = str(single_quoted_dict_in_string)
desired_double_quoted_dict = desired_double_quoted_dict.replace("'", "\"")
return desired_double_quoted_dict
| otitarenko/djangoqr | qrapp/decoder.py | decoder.py | py | 2,047 | python | en | code | 0 | github-code | 36 |
71707011944 | # Write code to extract the number at the end of the line below.
# Convert the extracted value to a floating point number and print it out.
text = "Lorem ipsum dolor sit amet elit, consectetur adipiscing elit 20.65434"
ftext = text.find("adipiscing")
find_text = text.find(' ', ftext)
part_text = text[find_text + 5 : ]
float_text = float(part_text)
print(float_text) | Sarah-Rz/finding-value-in-string | ex_1.py | ex_1.py | py | 374 | python | en | code | 0 | github-code | 36 |
74062234985 | import pytest
from fauxcaml.semantics.check import Checker
from fauxcaml.semantics.typ import *
from fauxcaml.semantics.unifier_set import UnificationError
def test_concrete_atom_unification():
checker = Checker()
checker.unify(Int, Int)
def test_concrete_poly_unification():
checker = Checker()
checker.unify(Tuple(Int, Bool), Tuple(Int, Bool))
def test_var_unification():
checker = Checker()
T = checker.fresh_var()
U = checker.fresh_var()
assert not checker.unifiers.same_set(T, U)
checker.unify(T, U)
assert checker.unifiers.same_set(T, U)
checker.unify(T, Bool)
assert checker.unifiers.same_set(T, Bool)
assert checker.unifiers.same_set(U, Bool)
def test_var_more_unification():
checker = Checker()
T = checker.fresh_var()
U = checker.fresh_var()
checker.unify(Tuple(T, Bool), Tuple(Int, U))
assert checker.unifiers.same_set(T, Int)
assert checker.unifiers.same_set(U, Bool)
def test_unification_error():
checker = Checker()
T = checker.fresh_var()
with pytest.raises(UnificationError):
checker.unify(Tuple(Bool, Int), Tuple(T, T))
with pytest.raises(UnificationError):
checker.unify(Tuple(Bool, Int), Tuple(Bool))
with pytest.raises(UnificationError):
checker.unify(Tuple(Bool, Int), Fn(Bool, Int))
def test_basic_generic_non_generic_unification():
checker = Checker()
generic = checker.fresh_var()
non_generic = checker.fresh_var(non_generic=True)
checker.unify(generic, non_generic)
assert checker.is_non_generic(generic)
def test_basic_generic_non_generic_unification_reversed():
checker = Checker()
generic = checker.fresh_var()
non_generic = checker.fresh_var(non_generic=True)
checker.unify(non_generic, generic)
assert checker.is_non_generic(generic)
def test_complex_generic_non_generic_unification():
checker = Checker()
generic = checker.fresh_var()
non_generic = checker.fresh_var(non_generic=True)
t = Tuple(generic)
checker.unify(non_generic, t)
assert checker.is_non_generic(generic)
def test_concretize():
checker = Checker()
T = checker.fresh_var()
U = checker.fresh_var()
tup = Tuple(T, Fn(U, Int))
checker.unify(T, List(Bool))
checker.unify(U, T)
concrete = checker.concretize(tup)
assert concrete == Tuple(List(Bool), Fn(List(Bool), Int))
| eignnx/fauxcaml | fauxcaml/tests/test_unification.py | test_unification.py | py | 2,419 | python | en | code | 2 | github-code | 36 |
5183501759 | primelist = [2,3,5,7]
adder = [1,3,7,9]
inc = 1
while(1):
for i in adder:
num = int(str(inc)+str(i))
flg = False
for j in range(3,num//2,2):
if num%j==0:
flg = True
break
if flg==False:
primelist.append(num)
#print(num)
inc+=1
if len(primelist)==10001:
print(primelist[10000])
break | pythonic-shk/Euler-Problems | euler7.py | euler7.py | py | 336 | python | en | code | 0 | github-code | 36 |
25607915801 | class Solution:
def rightSideView(self, root: Optional[TreeNode]) -> List[int]:
if not root:
return None
queue = deque()
queue.append(root)
result = []
while queue:
result.append(queue[-1].val)
for _ in range(len(queue)):
curr = queue.popleft()
if curr.left:
queue.append(curr.left)
if curr.right:
queue.append(curr.right)
return result
| Nirmalkumarvs/programs | Trees/Binary Tree Right Side View.py | Binary Tree Right Side View.py | py | 589 | python | en | code | 0 | github-code | 36 |
36322979415 | #! /usr/bin/env python
import sys
import pygame
import os
import argparse
import logging
from watchdog.observers import Observer
from watchdog.events import PatternMatchingEventHandler
from subprocess import Popen
from pygame.locals import *
logging.basicConfig(level=logging.DEBUG, format=' %(asctime)s - %(levelname)s - %(message)s')
last_image = None
new_image = False
startimg = None
flashimg = None
gphoto_command = ['gphoto2', '--capture-image-and-download', '--filename', '%Y%m%d%H%M%S.jpg']
photo_event = pygame.USEREVENT + 1
class Button:
""" a simple button class to hold all the attributes together and draw itself """
def __init__(self, rect=pygame.Rect(0, 0, 0, 0), color=pygame.Color('WHITE'), caption='Button'):
self.rect = rect
self.color = color
self.caption = caption
self.fsize = 36
def draw(self, surface):
surface.fill(self.color, rect=self.rect)
if (pygame.font):
font = pygame.font.Font('fkfont.ttf', self.fsize)
text = font.render(self.caption, 0, pygame.Color('BLACK'))
textpos = text.get_rect(center=self.rect.center)
surface.blit(text, textpos)
class MyHandler(PatternMatchingEventHandler):
patterns = ["*.jpg", "*.JPG"]
def process(self, event):
"""
event.event_type
'modified' | 'created' | 'moved' | 'deleted'
event.is_directory
True | False
event.src_path
path/to/observed/file
"""
logging.debug ("got something")
logging.debug ((event.src_path, event.event_type))
global last_image
global new_image
logging.debug ("loading image")
last_image = aspect_scale(get_image(event.src_path), (x, y)).convert()
new_image = True
logging.debug ("done loading")
def on_created(self, event):
self.process(event)
def on_modified(self, event):
self.process(event)
def load_resources():
logging.debug ("loading ressources")
global startimg
global flashimg
global bgimg
global cntfont
base_path = './gfx/'
startimg = aspect_scale(pygame.image.load(base_path + 'start.png'), (x, y))
bgimg = aspect_scale(pygame.image.load(base_path + 'BG.png'), (x, y))
flashimg = aspect_scale(pygame.image.load(base_path + 'flash.png'), (x, y))
cntfont = pygame.font.Font('fkfont.ttf', y / 2)
logging.debug ("done loading")
def draw_buttons(surface, sw, sh):
color = pygame.Color('#ee4000')
btnwidth = 250
btnheight = 50
margin = (sw - (2 * btnwidth)) / 3
btnleft = Button(pygame.Rect(margin, sh - btnheight, btnwidth, btnheight), color, 'Start')
btnright = Button(btnleft.rect.move(btnwidth + margin, 0), color, 'Print')
btnleft.draw(surface)
btnright.draw(surface)
def get_image(path):
canonicalized_path = path.replace('/', os.sep).replace('\\', os.sep)
image = pygame.image.load(canonicalized_path)
return image
def aspect_scale(img, size):
""" Scales 'img' to fit into box bx/by.
This method will retain the original image's aspect ratio """
bx, by = size
ix, iy = img.get_size()
if ix > iy:
# fit to width
scale_factor = bx / float(ix)
sy = scale_factor * iy
if sy > by:
scale_factor = by / float(iy)
sx = scale_factor * ix
sy = by
else:
sx = bx
else:
# fit to height
scale_factor = by / float(iy)
sx = scale_factor * ix
if sx > bx:
scale_factor = bx / float(ix)
sx = bx
sy = scale_factor * iy
else:
sy = by
sx = int(sx)
sy = int(sy)
return pygame.transform.scale(img, (sx, sy))
def end_script():
logging.debug ("exit")
global done
done = True
observer.stop()
observer.join()
def display_count():
global cnt
global screen
screen.blit(bgimg, (0, 0))
text = cntfont.render(str(cnt), 0, pygame.Color('WHITE'))
textpos = text.get_rect(center=screen.get_rect().center)
screen.blit(text, textpos)
cnt = cnt - 1
if __name__ == '__main__':
args = sys.argv[1:]
parser = argparse.ArgumentParser()
parser.add_argument("--width", type=int, help="screen width", default=1024)
parser.add_argument("--height", type=int, help="screen height", default=600)
parser.add_argument("--path", help="path to observe", default=".")
parser.add_argument("--fullscreen", "-f", action='store_true', help="run in fullscreen")
parser.add_argument("--delay", "-d", type=int, help="delay before picture is taken", default=5)
args = parser.parse_args()
x = args.width
y = args.height
path = args.path
fullscreen = args.fullscreen
delay = args.delay
observer = Observer()
observer.schedule(MyHandler(), path)
observer.start()
pygame.init()
load_resources()
if(fullscreen):
screen = pygame.display.set_mode((x, y), FULLSCREEN)
else:
screen = pygame.display.set_mode((x, y))
pygame.mouse.set_visible(False)
done = False
clock = pygame.time.Clock()
first_run = True
cnt = 5
while not done:
for event in pygame.event.get():
if event.type == pygame.QUIT:
end_script()
if event.type == KEYDOWN and event.key == K_ESCAPE:
end_script()
if event.type == KEYDOWN and event.key == K_SPACE:
display_count()
pygame.time.set_timer(photo_event, 1000)
pygame.display.flip()
#sub = Popen(['gphoto2','--capture-image-and-download'])
if event.type == photo_event:
if (cnt <= 0):
screen.blit(bgimg, (0, 0))
text = cntfont.render('CHEESE!!', 0, pygame.Color('WHITE'))
textpos = text.get_rect(center=screen.get_rect().center)
screen.blit(text, textpos)
cnt = 5
pygame.time.set_timer(photo_event, 0)
sub = Popen(gphoto_command)
else:
display_count()
pygame.display.flip()
if(last_image and new_image):
logging.debug ("blitting image")
left = (screen.get_width() - last_image.get_width()) / 2
top = (screen.get_height() - last_image.get_height()) / 2
screen.blit(last_image, (left, top))
new_image = False
logging.debug ("done blitting")
draw_buttons(screen, x, y)
pygame.display.flip()
if(not last_image and first_run):
screen.blit(startimg, (0, 0))
first_run = False
draw_buttons(screen, x, y)
pygame.display.flip()
clock.tick(60)
| hreck/PyBooth | pyBooth.py | pyBooth.py | py | 7,007 | python | en | code | 0 | github-code | 36 |
73708087464 | """Covariance-free Partial Least Squares"""
# Author: Artur Jordao <arturjlcorreia[at]gmail.com>
# Artur Jordao
import numpy as np
from scipy import linalg
from sklearn.utils import check_array
from sklearn.utils.validation import FLOAT_DTYPES
from sklearn.base import BaseEstimator
from sklearn.preprocessing import normalize
import copy
class CIPLS(BaseEstimator):
"""Covariance-free Partial Least Squares (CIPLS).
Parameters
----------
n_components : int or None, (default=None)
Number of components to keep. If ``n_components `` is ``None``,
then ``n_components`` is set to ``min(n_samples, n_features)``.
copy : bool, (default=True)
If False, X will be overwritten. ``copy=False`` can be used to
save memory but is unsafe for general use.
References
Covariance-free Partial Least Squares: An Incremental Dimensionality Reduction Method
"""
def __init__(self, n_components=10, copy=True):
self.__name__ = 'Covariance-free Partial Least Squares'
self.n_components = n_components
self.n = 0
self.copy = copy
self.sum_x = None
self.sum_y = None
self.n_features = None
self.x_rotations = None
self.x_loadings = None
self.y_loadings = None
self.eign_values = None
self.x_mean = None
self.p = []
def normalize(self, x):
return normalize(x[:, np.newaxis], axis=0).ravel()
def fit(self, X, Y):
X = check_array(X, dtype=FLOAT_DTYPES, copy=self.copy)
Y = check_array(Y, dtype=FLOAT_DTYPES, copy=self.copy, ensure_2d=False)
if Y.ndim == 1:
Y = Y.reshape(-1, 1)
if np.unique(Y).shape[0] == 2:
Y[np.where(Y == 0)[0]] = -1
n_samples, n_features = X.shape
if self.n == 0:
self.x_rotations = np.zeros((self.n_components, n_features))
self.x_loadings = np.zeros((n_features, self.n_components))
self.y_loadings = np.zeros((Y.shape[1], self.n_components))
self.n_features = n_features
self.eign_values = np.zeros((self.n_components))
self.p = [0] * self.n_components
for j in range(0, n_samples):
self.n = self.n + 1
u = X[j]
l = Y[j]
if self.n == 1:
self.sum_x = u
self.sum_y = l
else:
old_mean = 1 / (self.n - 1) * self.sum_x
self.sum_x = self.sum_x + u
mean_x = 1 / self.n * self.sum_x
u = u - mean_x
delta_x = mean_x - old_mean
self.x_rotations[0] = self.x_rotations[0] - delta_x * self.sum_y
self.x_rotations[0] = self.x_rotations[0] + (u * l)
self.sum_y = self.sum_y + l
t = np.dot(u, self.normalize(self.x_rotations[0].T))
self.x_loadings[:, 0] = self.x_loadings[:, 0] + (u * t)
self.y_loadings[:, 0] = self.y_loadings[:, 0] + (l * t)
for c in range(1, self.n_components):
u -= np.dot(t, self.x_loadings[:, c - 1])
l -= np.dot(t, self.y_loadings[:, c - 1])
self.x_rotations[c] = self.x_rotations[c] + (u * l)
self.x_loadings[:, c] = self.x_loadings[:, c] + (u * t)
self.y_loadings[:, c] = self.y_loadings[:, c] + (l * t)
t = np.dot(u, self.normalize(self.x_rotations[c].T))
return self
def transform(self, X, Y=None, copy=True):
"""Apply the dimension reduction learned on the train data."""
X = check_array(X, copy=copy, dtype=FLOAT_DTYPES)
mean = 1 / self.n * self.sum_x
X -= mean
w_rotation = np.zeros(self.x_rotations.shape)
for c in range(0, self.n_components):
w_rotation[c] = self.normalize(self.x_rotations[c])
return np.dot(X, w_rotation.T)
| arturjordao/IncrementalDimensionalityReduction | Code/CIPLS.py | CIPLS.py | py | 4,113 | python | en | code | 6 | github-code | 36 |
35251633118 | #prob.8
from timeit import default_timer as dt
#repetitive calling of the isprime improves the performance
#use isPrime in Prob.7
#=========================prob.7=================================
primenumbers :list[int] = [2] #prime number cache
#find the number is prime
def isPrime (num :int) ->bool:
"""Check the number is prime
Args:
num (int): the number to check
Returns:
bool: if the number is prime number, return True. Return False if not.
"""
pn :int =0
#2 is the exception
if (num == 2):
return True
#1st check : devide by primenumbers
for pn in primenumbers:
if (pn>(num**(0.5))):
break #exit the loop if all prime numbers are checked
if (num % pn)==0:
return False
#2nd check : devide by the numbers within the maximum primenumbers - root(num)
for nn in range(pn,int(num**0.5)+1):
if (num % nn)==0:
return False
#For now, the number is prime number so add it to the list
primenumbers.append(num)
return True
#=====================================================================================
def numberOfPrime(num :int) ->int:
"""count the prime numbers smaller than input number num
Args:
num (int): input number
Returns:
int: the number of the prime numbers smaller than num
"""
currentno = 2 #check from 2
count =0
#check all number smaller or equal to n
#it seems that while is faster than for...
while currentno <= num:
if(isPrime(currentno)):
count = count +1
currentno = currentno+1
return count
#test code
startt = dt() #initialize Timer
print("prime numbers in 0-10 : %d"%numberOfPrime(10))
print("eleapsed time : %.2fms"%((dt()-startt)*1000))
startt = dt() #initialize Timer
print("prime numbers in 0-100 : %d"%numberOfPrime(100))
print("eleapsed time : %.2fms"%((dt()-startt)*1000))
startt = dt() #initialize Timer
print("prime numbers in 0-1000 : %d"%numberOfPrime(1000))
print("eleapsed time : %.2fms"%((dt()-startt)*1000))
startt = dt() #initialize Timer
print("prime numbers in 0-10000 : %d"%numberOfPrime(10000))
print("eleapsed time : %.2fms"%((dt()-startt)*1000))
startt = dt() #initialize Timer
print("prime numbers in 0-100000 : %d"%numberOfPrime(100000))
print("eleapsed time : %.2fms"%((dt()-startt)*1000))
'''
prime numbers in 0-10 : 4
eleapsed time : 7.96ms
prime numbers in 0-100 : 25
eleapsed time : 1.42ms
prime numbers in 0-1000 : 168
eleapsed time : 5.03ms
prime numbers in 0-10000 : 1229
eleapsed time : 54.48ms
prime numbers in 0-100000 : 9592
eleapsed time : 753.35ms
cache algorithm -> 훨씬 짧은 처리시간!
캐시가 함수가 끝나도 계속 유지되므로 0-10에서 사용한 캐시를 0-100에서 다시 사용, 오히려 검색시간이 감소함!
''' | lila-lalab/SDDataExpertProgram2021 | 이재호/day3/test_8_cache_for.py | test_8_cache_for.py | py | 2,858 | python | en | code | 0 | github-code | 36 |
37055431378 | import asyncio
import ciberedev
# creating our client instance
client = ciberedev.Client()
async def main():
# starting our client with a context manager
async with client:
# taking our screenshot
screnshot = await client.take_screenshot("www.google.com")
# printing the screenshots url
print(screnshot.url)
# saving the screenshot to a file
await screnshot.save("test.png")
# checking if this file is the one that was run
if __name__ == "__main__":
# if so, run the main function
asyncio.run(main())
| cibere/ciberedev.py | examples/take_screenshot.py | take_screenshot.py | py | 572 | python | en | code | 1 | github-code | 36 |
718080167 | from re import S
import re
from django.db.models.signals import pre_init
from django.shortcuts import render
from .models import *
from .serializers import *
from django.shortcuts import render
from rest_framework import viewsets, mixins, generics
from rest_framework.views import APIView
from rest_framework.decorators import api_view
from rest_framework.response import Response
import datetime
import time
from rest_framework.parsers import JSONParser
from django.utils import timezone
from rest_framework.decorators import action
from rest_framework.permissions import IsAuthenticated
from rest_framework.decorators import permission_classes
from django.http import HttpResponse
from django.shortcuts import render, get_object_or_404, get_list_or_404, reverse
from django.http import (HttpResponse, HttpResponseNotFound, Http404,
HttpResponseRedirect, HttpResponsePermanentRedirect)
from django.db.models import Q
from django.contrib.auth.decorators import login_required
from django.contrib.auth import logout
from django.contrib import auth
import requests
from django.core.mail import send_mail
from rest_framework import status
from django.contrib.auth import authenticate, login
from datetime import datetime
from django.contrib.auth.models import User
from django.contrib import messages
from datetime import datetime, date
from django.core.mail import send_mail
import json
from django.core.serializers.json import DjangoJSONEncoder
import os
from django.views.decorators.cache import cache_control
from django.db.models import Sum
import collections
import json
from datetime import date
from django.contrib.auth.models import User
from django.db.models import Count, Sum
import datetime
from datetime import datetime, timedelta
from django.db.models.functions import TruncMonth, TruncYear
import requests
import json
import random
from django.db.models import Q
import requests
import json
import uuid
def getFoodImageURL(foodName):
headers = {
"Authorization": "563492ad6f917000010000013784e527f0764d279ff0e8157222e0d2",
"Content-Type": "application/json"
}
r = requests.get(
'https://api.pexels.com/v1/search?query={}&per_page=1'.format(foodName), headers=headers)
data = r.json()
try:
return (random.choice(data["photos"])['src']['original']+"?auto=compress")
except:
return "https://images.pexels.com/photos/1640777/pexels-photo-1640777.jpeg?auto=compress"
class CustomerProfileView(APIView):
permission_classes = [IsAuthenticated]
def get(self, request, format=None, **kwargs):
try:
user = CustomerProfile.objects.get(user=request.user)
except:
pass
serializer = CustomerProfileSerializer(user)
return Response(serializer.data)
class DeliveryProfileView(APIView):
permission_classes = [IsAuthenticated]
def get(self, request, format=None, **kwargs):
try:
user = DeliveryProfile.objects.get(user=request.user)
except:
pass
serializer = DeliveryProfileSerializer(user)
return Response(serializer.data)
@api_view(('GET',))
@ permission_classes([IsAuthenticated])
def WhoAmI(request):
data = {
}
vendor = Shop.objects.filter(vendor=request.user)
temp = CustomerProfile.objects.filter(user=request.user)
delb = DeliveryProfile.objects.filter(user=request.user)
if len(vendor) > 0:
data['iam'] = "vendor"
return HttpResponse(json.dumps(data), status=status.HTTP_200_OK)
elif len(temp) > 0:
data['iam'] = "customer"
return HttpResponse(json.dumps(data), status=status.HTTP_200_OK)
elif len(delb) > 0:
data['iam'] = "deliveryboy"
return HttpResponse(json.dumps(data), status=status.HTTP_200_OK)
elif request.user.is_staff:
data['iam'] = "admin"
return HttpResponse(json.dumps(data), status=status.HTTP_200_OK)
@ api_view(('POST',))
def RegisterNewUserCustomer(request):
temp = request.data.copy()
if len(User.objects.filter(email=temp['email'])) > 0:
return Response({'Error': 'Already Registered with this email'}, status=status.HTTP_400_BAD_REQUEST)
if len(User.objects.filter(username=temp['username'])) > 0:
return Response({'Error': 'This username already exist'}, status=status.HTTP_400_BAD_REQUEST)
# if len(CustomerProfile.objects.filter(aadharNo=temp['aadharNo'])) > 0:
# return Response({'Error': 'Already Registered with this aadhar'}, status=status.HTTP_406_NOT_ACCEPTABLE)
try:
tempUser = User(
username=temp['username'],
first_name=temp['full_name'],
email=temp['email'],
)
tempUser.set_password(temp['password'])
tempUser.save()
tempCustomerProfile = CustomerProfile(
user=tempUser,
phoneNo=temp['phoneNo']
)
tempCustomerProfile.save()
except:
return Response(temp, status=status.HTTP_400_BAD_REQUEST)
return Response(CustomerProfileSerializer(tempCustomerProfile).data, status=status.HTTP_201_CREATED)
@ api_view(('POST',))
def RegisterNewUserDeliveryBoy(request):
temp = request.data.copy()
if len(User.objects.filter(email=temp['email'])) > 0:
return Response({'Error': 'Already Registered with this email'}, status=status.HTTP_400_BAD_REQUEST)
if len(User.objects.filter(username=temp['username'])) > 0:
return Response({'Error': 'This username already exist'}, status=status.HTTP_400_BAD_REQUEST)
# if len(CustomerProfile.objects.filter(aadharNo=temp['aadharNo'])) > 0:
# return Response({'Error': 'Already Registered with this aadhar'}, status=status.HTTP_406_NOT_ACCEPTABLE)
try:
tempUser = User(
username=temp['username'],
first_name=temp['full_name'],
email=temp['email'],
)
tempUser.set_password(temp['password'])
tempUser.save()
tempDeliveryProfile = DeliveryProfile(
user=tempUser,
phoneNo=temp['phoneNo']
)
tempDeliveryProfile.save()
except:
return Response(temp, status=status.HTTP_400_BAD_REQUEST)
return Response(DeliveryProfileSerializer(tempDeliveryProfile).data, status=status.HTTP_201_CREATED)
@ api_view(('GET',))
@ permission_classes([IsAuthenticated])
def LoggedInCustomerOrders(request):
temp = CustomerOrder.objects.filter(
orderFor=request.user).filter(Q(status="pending") | Q(status="inorder")).order_by(*['-date', '-time'])
return Response(CustomerOrderSerializer(temp, many=True).data, status=status.HTTP_200_OK)
@ api_view(('GET',))
@ permission_classes([IsAuthenticated])
def CustomerPendingOrders(request):
temp = CustomerOrder.objects.filter(
orderFor=request.user).filter(status="pending")
return Response(CustomerOrderSerializer(temp, many=True).data, status=status.HTTP_200_OK)
@ api_view(('GET',))
@ permission_classes([IsAuthenticated])
def ListAllShops(request):
temp = Shop.objects.all()
return Response(ShopSerializer(temp, many=True).data, status=status.HTTP_200_OK)
@ api_view(('GET',))
@ permission_classes([IsAuthenticated])
def ListAllProducts(request):
temp = Product.objects.all()
return Response(ProductSerializer(temp, many=True).data, status=status.HTTP_200_OK)
@ api_view(('POST',))
@ permission_classes([IsAuthenticated])
def CustomerBuyProduct(request):
data = request.data.copy()
tempProductList = []
temp = CustomerOrder(
orderFor=request.user,
orderImg=getFoodImageURL("food"),
latitude=data['latitude'],
longitude=data['longitude'],
status=data['status'],
addressinwords=data["addressinwords"],
typeOfPayment=PaymentCategory.objects.filter(
name=data["typeOfPayment"]).first(),
shop=Shop.objects.filter(id=int(data["shopID"])).first(),
locality=Shop.objects.filter(id=int(data["shopID"])).first().locality,
orderPrice=float(data["orderPrice"]),
payment_status=data["payment_status"]
)
temp.save()
productIDS = data['productId'].split(',')
try:
quan = data['productQuan'].split(',')
except:
quan = []
for idx, i in enumerate(productIDS):
try:
pro = Product.objects.get(id=int(i))
temp.product.add(pro)
new = ProductQuanities(
product=pro,
quantity=int(quan[idx]),
orderID=temp
)
new.save()
except:
pass
temp.save()
return Response(CustomerOrderSerializer(temp).data, status=status.HTTP_200_OK)
@ api_view(('POST',))
@ permission_classes([IsAuthenticated])
def CustomerCancelProduct(request):
data = request.data.copy()
temp = CustomerOrder.objects.filter(id=data['productId'])
temp.delete()
return Response(CustomerOrderSerializer(temp).data, status=status.HTTP_200_OK)
@ api_view(('GET', 'POST'))
@ permission_classes([IsAuthenticated])
def DeliveryPendingOrders(request):
if request.method == "GET":
temp = CustomerOrder.objects.filter(status="pending")
return Response(CustomerOrderSerializer(temp, many=True).data, status=status.HTTP_200_OK)
else:
data = request.data.copy()
temp = CustomerOrder.objects.get(id=data['orderID'])
temp.deliveryboy = DeliveryProfile.objects.get(user=request.user)
temp.status = data['status']
temp.save()
return Response(CustomerOrderSerializer(temp).data, status=status.HTTP_200_OK)
@ api_view(('GET', 'POST'))
@ permission_classes([IsAuthenticated])
def DeliveryinorderOrders(request):
if request.method == "GET":
temp = CustomerOrder.objects.filter(deliveryboy=DeliveryProfile.objects.get(
user=request.user)).filter(status="inorder")
return Response(CustomerOrderSerializer(temp, many=True).data, status=status.HTTP_200_OK)
# else:
# data = request.data.copy()
# temp = CustomerOrder.objects.get(id=data['orderID'])
# temp.deliveryboy = DeliveryProfile.objects.get(user=request.user)
# temp.status = data['status']
# return Response(CustomerOrderSerializer(temp).data, status=status.HTTP_200_OK)
# Vendor
@ api_view(('POST',))
@ permission_classes([IsAuthenticated])
def AddProduct(request):
data = request.data.copy()
food = StoreImage(
image=request.data["image"]
)
food.save()
siteLink = "{0}://{1}".format(request.scheme,
request.get_host())
temp = Product(
name=data['name'],
price=float(data['price']),
shop=Shop.objects.get(id=int(data["shopID"])),
category=ProductCategory.objects.get(id=int(data["category"])),
productImage=data['image'],
)
temp.save()
return Response(ProductSerializer(temp).data, status=status.HTTP_200_OK)
@ api_view(('GET',))
@ permission_classes([IsAuthenticated])
def ListAllProductCategories(request):
temp = ProductCategory.objects.all()
return Response(ProductCategorySerializer(temp, many=True).data, status=status.HTTP_200_OK)
@ api_view(('POST',))
@ permission_classes([IsAuthenticated])
def UpdateOrderStatus(request):
temp = CustomerOrder.objects.filter(
id=int(request.data["orderID"])).first()
temp.status = request.data["status"]
temp.save()
return Response(CustomerOrderSerializer(temp).data, status=status.HTTP_200_OK)
@ api_view(('POST',))
@ permission_classes([IsAuthenticated])
def AddShop(request):
data = request.data
temp = Shop(
vendor=request.user,
name=data["name"],
currentOffer=float(data["currentOffer"]),
ShopImg=getFoodImageURL('restaurent'),
locality=ShopLocality.objects.filter(id=int(data["locality"])).first(),
latitude=float(data["latitude"]),
longitude=float(data["longitude"]),
addressinwords=data["addressinwords"],
phoneNo=data["phoneNo"],
email=data["email"],
)
temp.save()
return Response(ShopSerializer(temp).data, status=status.HTTP_200_OK)
@ api_view(('POST',))
@ permission_classes([IsAuthenticated])
def AllProductsOfShop(request):
data = request.data
temp = Product.objects.filter(
shop=Shop.objects.filter(id=data["shopID"]).first())
return Response(ProductSerializer(temp, many=True).data, status=status.HTTP_200_OK)
@ api_view(('POST', 'GET'))
@ permission_classes([IsAuthenticated])
def FirebaseTokenView(request):
if request.method == "GET":
return Response(FireabaseTokenSerializer(FireabaseToken.objects.all(), many=True).data, status=status.HTTP_200_OK)
else:
data = request.data
temp = FireabaseToken.objects.filter(user=request.user).first()
if temp is None:
temp = FireabaseToken(
user=request.user,
token=request.data["token"]
)
else:
temp.token = request.data["token"]
temp.save()
return Response(FireabaseTokenSerializer(temp).data, status=status.HTTP_200_OK)
@ api_view(('POST',))
@ permission_classes([IsAuthenticated])
def ShopAnalysis(request):
shopID = int(request.data['shopID'])
# weekly
today = datetime.today().weekday()
sunday = datetime.today() - timedelta(days=today+1)
last_week = [["Sun", 0, 0], ["Mon", 0, 0], ["Tue", 0, 0], [
"Wed", 0, 0], ["Thu", 0, 0], ["Fri", 0, 0], ["Sat", 0, 0]]
for i in range(today+2):
temp = CustomerOrder.objects.filter(shop=Shop.objects.filter(
id=shopID).first()).exclude(status="shoppending").exclude(status="shoprejected").filter(date=sunday).values("date").annotate(price=Sum('orderPrice')).annotate(c=Count('id'))
try:
last_week[i] = [last_week[i][0], temp[0]["c"], temp[0]["price"]]
except:
pass
sunday += timedelta(days=1)
# monthly
name_months = [("Jan", 0, 0), ("Feb", 0, 0), ("March", 0, 0), ("April", 0, 0), ("May", 0, 0), ("June", 0, 0),
("July", 0, 0), ("August", 0, 0), ("Sept", 0, 0), ("Oct", 0, 0), ("Nov", 0, 0), ("Dec", 0, 0)]
month = CustomerOrder.objects.filter(shop=Shop.objects.filter(id=shopID).first()).exclude(status="shoppending").exclude(status="shoprejected").annotate(
month=TruncMonth('date')).values('month').annotate(price=Sum('orderPrice')).annotate(c=Count('id'))
for i in month:
if(date.today().year == i['month'].year):
name_months[i['month'].month] = (
name_months[i['month'].month][0], i["c"], i["price"])
# print(name_months)
# yearly
name_year = [[i, 0, 0]
for i in range(date.today().year, date.today().year-3, -1)]
years = CustomerOrder.objects.filter(shop=Shop.objects.filter(id=shopID).first()).exclude(status="shoppending").exclude(status="shoprejected").annotate(
year=TruncYear('date')).values('year').annotate(price=Sum('orderPrice')).annotate(c=Count('id'))[:3]
for j, i in enumerate(years):
name_year[j] = [name_year[j][0], i["c"], i["price"]]
# print(name_year)
return Response({"last_week": last_week, "months": name_months, "year": name_year}, status=status.HTTP_200_OK)
@ api_view(('POST',))
@ permission_classes([IsAuthenticated])
def UpdateShopDetails(request):
data = request.data
shop = Shop.objects.filter(id=int(data["shopID"])).first()
shop.currentOffer = float(data["currentOffer"])
shop.save()
return Response(ShopSerializer(shop).data, status=status.HTTP_200_OK)
@ api_view(('POST',))
@ permission_classes([IsAuthenticated])
def DeleteProduct(request):
data = request.data
product = Product.objects.filter(id=int(data["prodID"])).first()
product.delete()
return Response({}, status=status.HTTP_200_OK)
@ api_view(('POST',))
@ permission_classes([IsAuthenticated])
def UpdateProduct(request):
data = request.data
product = Product.objects.filter(id=int(data["prodID"])).first()
product.name = data["name"]
product.price = data["price"]
product.save()
return Response(ProductSerializer(product).data, status=status.HTTP_200_OK)
@ api_view(('GET',))
@ permission_classes([IsAuthenticated])
def LoggedInVendorShop(request):
data = request.data
shop = Shop.objects.filter(vendor=request.user).first()
return Response(ShopSerializer(shop).data, status=status.HTTP_200_OK)
@ api_view(('GET',))
@ permission_classes([IsAuthenticated])
def VendorsShopOrders(request):
data = request.data
shop = Shop.objects.filter(vendor=request.user).first()
orders = CustomerOrder.objects.filter(
shop=shop).order_by(*['-date', '-time'])
return Response(CustomerOrderSerializer(orders, many=True).data, status=status.HTTP_200_OK)
@ api_view(('GET',))
@ permission_classes([IsAuthenticated])
def SingleShopDetails(request):
shop = Shop.objects.filter(vendor=request.user).first()
return Response(ShopSerializer(shop).data, status=status.HTTP_200_OK)
@ api_view(('POST',))
@ permission_classes([IsAuthenticated])
def SingleShopAllProducts(request):
shop = Shop.objects.filter(id=int(request.data["shopID"])).first()
products = Product.objects.filter(shop=shop)
return Response(ProductSerializer(products, many=True).data, status=status.HTTP_200_OK)
@ api_view(('POST',))
@ permission_classes([IsAuthenticated])
def UpdateUserDetails(request):
data = request.data
customer = CustomerProfile.objects.filter(user=request.user).first()
customer.phoneNo = data["phoneNo"]
customer.user.first_name = data["first_name"]
return Response(CustomerProfileSerializer(customer).data, status=status.HTTP_200_OK)
@ api_view(('POST',))
@ permission_classes([IsAuthenticated])
def StoreImageView(request, *args, **kwargs):
print(request.FILES['image'], args, kwargs)
temp = StoreImage(
image=request.FILES['image']
)
temp.save()
siteLink = "{0}://{1}".format(request.scheme,
request.get_host())
return Response({"url": "{}".format(""+temp.image.url)}, status=status.HTTP_200_OK)
def GeneratetOrderIDPayment(name, email, phoneNo, amount):
data1 = {
"client_id": "test_UnAu7a0tHRsdeequ20AEKVCNR2NHOUpBydi",
"client_secret": "test_dzbvZFl6Cl5anSSEwV8wDcgNtAwygXGzi7aPUMgDk2g14lz9U4uiebOB4ZNsqcJhAET3KaN6nhB9Rbj9NDP3ORc6FQRSEF4wYB1jcMidH4miO1HhYsOIx3rI7dN",
"grant_type": "client_credentials"
}
res1 = requests.post(
"https://test.instamojo.com/oauth2/token/", data=data1)
res1 = res1.json()
header2 = {
"Authorization": "Bearer {}".format(res1["access_token"]),
"Content-Type": "application/x-www-form-urlencoded",
"client_id": "test_UnAu7a0tHRsdeequ20AEKVCNR2NHOUpBydi",
"client_secret": "test_dzbvZFl6Cl5anSSEwV8wDcgNtAwygXGzi7aPUMgDk2g14lz9U4uiebOB4ZNsqcJhAET3KaN6nhB9Rbj9NDP3ORc6FQRSEF4wYB1jcMidH4miO1HhYsOIx3rI7dN",
"grant_type": "client_credentials"
}
data2 = {
"name": str(name),
"email": str(email),
"phone": str(phoneNo),
"amount": str(amount),
"transaction_id": uuid.uuid4(),
"currency": "INR",
"redirect_url": "https://test.instamojo.com/integrations/android/redirect/"
}
# print(data2)
res2 = requests.post(
"https://test.instamojo.com/v2/gateway/orders/", data=data2, headers=header2)
res2 = res2.json()
# print(res2)
data3 = {
"id": str(res2["order"]["id"])
}
res3 = requests.post(
"https://test.instamojo.com/v2/gateway/orders/payment-request/", data=data3, headers=header2)
res3 = res3.json()
return(res3["order_id"])
@ api_view(('POST',))
@ permission_classes([IsAuthenticated])
def GetOrderID(request):
user = request.user
customer = CustomerProfile.objects.filter(user=user).first()
order_id = GeneratetOrderIDPayment(user.first_name, user.email, str(
customer.phoneNo), str(request.data["amount"]))
return Response({"order_id": order_id}, status=status.HTTP_200_OK)
@ api_view(('GET',))
@ permission_classes([IsAuthenticated])
def GetDeliveredOrders(request):
user = request.user
customer = CustomerProfile.objects.filter(user=user).first()
orders = CustomerOrder.objects.filter(
orderFor=customer).filter(status="delivered")
return Response(CustomerOrderSerializer(orders, many=True).data, status=status.HTTP_200_OK)
@ api_view(('POST',))
@ permission_classes([IsAuthenticated])
def UpdateDeliveryBoyDetails(request):
data = request.data
customer = DeliveryProfile.objects.filter(user=request.user).first()
customer.phoneNo = data["phoneNo"]
customer.user.first_name = data["first_name"]
return Response(CustomerProfileSerializer(customer).data, status=status.HTTP_200_OK)
| haydencordeiro/FoodDeliveryDjango | food/views.py | views.py | py | 20,986 | python | en | code | 1 | github-code | 36 |
39924477846 | from rest_framework import serializers
from core.models import Match
class MatchSerializer(serializers.ModelSerializer):
"""
The `season` field is read only for the external API, because we force it to
use the currently active season inside the MatchViewSet.perform_create()
method.
This means that you can ONLY record matches for the currently active
season, as this is the poolbot centric use case to record match results
after they have just finished via a client (slack, NFC etc.)
"""
class Meta:
model = Match
fields = (
'date',
'season',
'winner',
'loser',
'channel',
'granny',
)
read_only_fields = (
'date',
'season',
)
| dannymilsom/poolbot-server | src/api/serializers/match.py | match.py | py | 805 | python | en | code | 4 | github-code | 36 |
34495102899 | import adijif
import pprint
clk = adijif.ad9545(solver="gekko")
clk.avoid_min_max_PLL_rates = True
clk.minimize_input_dividers = True
input_refs = [(0, 1), (1, 10e6)]
output_clocks = [(0, 30720000)]
input_refs = list(map(lambda x: (int(x[0]), int(x[1])), input_refs)) # force to be ints
output_clocks = list(map(lambda x: (int(x[0]), int(x[1])), output_clocks)) # force to be ints
clk.set_requested_clocks(input_refs, output_clocks)
clk.solve()
o = clk.get_config()
pprint.pprint(o)
| analogdevicesinc/pyadi-jif | examples/ad9545_example.py | ad9545_example.py | py | 493 | python | en | code | 6 | github-code | 36 |
70295398824 | import torch
from torch import nn
from torch.utils.tensorboard import SummaryWriter
from models.convnet import ConvNet
from utils.data_loader import load_cifar10, create_dataloaders
from utils.train import train
device = 'cuda' if torch.cuda.is_available() else 'cpu'
writer = SummaryWriter('runs/exercise-2_1')
train_data, val_data, test_data = load_cifar10()
train_dataloader, val_dataloader, test_dataloader = create_dataloaders(train_data, val_data, test_data, batch_size=32)
n_runs = 10
for i in range(n_runs):
n_epochs = 20
convnet = ConvNet()
loss_fn = nn.CrossEntropyLoss()
optimizer = torch.optim.SGD(convnet.parameters(), lr=0.001, momentum=0.9)
train(epochs=n_epochs, train_dataloader=train_dataloader, val_dataloader=val_dataloader, model=convnet, loss_fn=loss_fn, optimizer=optimizer, device=device, model_name='ConvNet34', writer=writer, save_gradients=True, run_id=i)
resnet34 = ConvNet(is_res_net=True)
loss_fn = nn.CrossEntropyLoss()
optimizer = torch.optim.SGD(resnet34.parameters(), lr=0.001, momentum=0.9)
train(epochs=n_epochs, train_dataloader=train_dataloader, val_dataloader=val_dataloader, model=resnet34, loss_fn=loss_fn, optimizer=optimizer, device=device, model_name='ResNet34', writer=writer, save_gradients=True, run_id=i)
| simogiovannini/DLA-lab1 | 2_1.py | 2_1.py | py | 1,300 | python | en | code | 0 | github-code | 36 |
44210134673 | # -*- coding: utf-8 -*-
#!/usr/bin/env python3
#(pandas)求出每個檔案中,一組值的總和與平均值
"""
Created on Fri Sep 22 11:23:54 2017
@author: vizance
"""
import pandas as pd
import sys
import glob
import os
input_path = sys.argv[1]
output_file = sys.argv[2]
all_files = glob.glob(os.path.join(input_path, 'sales_*'))
all_data_frames =[]
for input_file in all_files:
data_frame = pd.read_csv(input_file, index_col=None)
total_sales = pd.DataFrame([float(str(value).strip('$').replace(',','')) \
for value in data_frame.loc[:,'Sale Amount']]).sum()
average_sales = pd.DataFrame([float(str(value).strip('$').replace(',',''))\
for value in data_frame.loc[:,'Sale Amount']]).mean()
data = {'file_name':os.path.basename(input_file), 'total_sales':total_sales\
,'average_sales':average_sales} #建立一個dict叫做data
all_data_frames.append(pd.DataFrame(data, columns=['file_name','total_sales','average_sales']))
#創建名為data的dataframe,並將其append到all_data_frames的list中
print(all_data_frames)
data_frame_concat = pd.concat(all_data_frames,axis=0,ignore_index=True)#ignore_index=True的目的為,重新排序df的index
print (data_frame_concat)
data_frame_concat.to_csv(output_file, index=False)
| vizance/Python_Data_Analysis | 第二章_CSV檔案處理/pandas_sum_average_from_multiple_files.py | pandas_sum_average_from_multiple_files.py | py | 1,354 | python | en | code | 0 | github-code | 36 |
35599138078 | from pandas import Series
from matplotlib import pyplot
from statsmodels.tsa.ar_model import AR
from sklearn.metrics import mean_squared_error
series = Series.from_csv('daily-minimum-temperatures.csv', header=0)
# split dataset
X = series.values
train, test = X[1:len(X)-7], X[len(X)-7:]
# train autoregression
model = AR(train)
model_fit = model.fit()
# 滞后长度
print('Lag: %s' % model_fit.k_ar)
# 系数
print('Coefficients: %s' % model_fit.params)
# make predictions
predictions = model_fit.predict(start=len(train), end=len(train)+len(test)-1, dynamic=False)
for i in range(len(predictions)):
print('predicted=%f, expected=%f' % (predictions[i], test[i]))
error = mean_squared_error(test, predictions)
print('Test MSE: %.3f' % error)
# plot results
pyplot.plot(test)
pyplot.plot(predictions, color='red')
pyplot.show() | yangwohenmai/TimeSeriesForecasting | AR自回归模型/自回归模型.py | 自回归模型.py | py | 828 | python | en | code | 183 | github-code | 36 |
9355826258 | import requests
import re
def check_link(url_parent, url_child):
pattern = r"href=\"(.*)\""
res = requests.get(url_parent)
if res.status_code == 200:
all_inclusions = re.findall(pattern, res.text)
else:
print("No")
return
for link in all_inclusions:
res = requests.get(link)
if res.status_code == 200:
all_inclusions_this_page = re.findall(pattern, res.text)
if url_child in all_inclusions_this_page:
print("Yes")
return
print("No")
return
if __name__ == "__main__":
check_link(input(), input()) | ArtemevIvanAlekseevich/Python_course | module 3/3.3-step_6-check_link.py | 3.3-step_6-check_link.py | py | 625 | python | en | code | 0 | github-code | 36 |
17062759580 | import Account
class SavingAccount(Account.BaseAccount):
def __init__(self,accNum,accHolderName):
super().__init__(accNum,accHolderName)
self._minimumBalance = 5000
self._rateOfInterest = 10
def withdraw(self,withdrawMoney):
if self._currentBalance > self._minimumBalance:
self._currentBalance = self._currentBalance - withdrawMoney
return True
else:
return False
| AmenTauhid/Bank-Management-System | SavingAccount.py | SavingAccount.py | py | 466 | python | en | code | 0 | github-code | 36 |
28725034067 | import os
from tkinter import *
from tkinter import filedialog
def openfile():
filename = filedialog.askopenfilenames(parent=root, initialdir="C:\\Users\\Tri Nguyen\\Documents", title="Select File")
print(filename)
root = Tk()
root.geometry("300x300")
menubar = Menu(root)
filemenu = Menu(menubar, tearoff=0)
filemenu.add_command(label="Open", command=openfile)
menubar.add_cascade(label="File", menu=filemenu)
root.config(menu=menubar)
root.mainloop() | ninjanaruto1012/PDFTool | app2.py | app2.py | py | 463 | python | en | code | 0 | github-code | 36 |
44310786559 | import serial, time, syslog, string
def scoredisp(score):
# initializes the serial port
port = '/dev/ttyACM0'
ard = serial.Serial(port,9600)
# writes the inputted score to the serial port
ard.write(str(score).encode('ascii'))
| RamboTheGreat/Minigame-Race | test.py | test.py | py | 237 | python | en | code | 0 | github-code | 36 |
18321986402 | import numpy
def MoveToChange(move):
r1=r2=c1=c2=0
c1,c2 = ord(move[0])-97,ord(move[2])-97
r1,r2 = 8-int(move[1]),8-int(move[3])
if len(move) == 6:
return r1,c1,r2,c2,move[5]
return r1,c1,r2,c2,None
def ChangeToMove(r1,c1,r2,c2):
return ''.join((chr(c1+97),str(8-r1),chr(c2+97),str(8-r2)))
class GameState():
def __init__(self):
self.un = self.v = False
self.castle = [True,True]
self.board = [
["bR","bN","bB","bQ","bK","bB","bN","bR"],
["bP","bP","bP","bP","bP","bP","bP","bP"],
["--","--","--","--","--","--","--","--"],
["--","--","--","--","--","--","--","--"],
["--","--","--","--","--","--","--","--"],
["--","--","--","--","--","--","--","--"],
["wP","wP","wP","wP","wP","wP","wP","wP"],
["wR","wN","wB","wQ","wK","wB","wN","wR"]]
self.Kings = [[7,4],[0,4]]
self.whiteToMove = True
self.movelog = []
self.moveFunctions = {'P' : self.GetPawnMoves, 'R' : self.GetRookMoves, 'B' : self.GetBishopMoves, 'N' : self.GetKnightMoves, 'Q' : self.GetQueenMoves, 'K' : self.GetKingMoves, }
def Move(self,r1,c1,r2,c2,piece):
if r1 == r2 and c1 == c2:
return
if not self.v:
moves = self.GetValidMoves()
if ChangeToMove(r1,c1,r2,c2) not in moves:
if ChangeToMove(r1,c1,r2,c2) + '=Q' not in moves:
return
if piece != None:
self.board[r2][c2] = self.board[r1][c1][0] + piece
self.board[r1][c1] = '--'
if self.un == False:
self.movelog.append(ChangeToMove(r1,c1,r2,c2) + '=' + piece)
if [r1,c1] in self.Kings:
self.Kings[self.Kings.index([r1,c1])] = [r2,c2]
self.whiteToMove = not self.whiteToMove
else:
if self.board[r1][c1][1] == 'P' and self.board[r2][c2] == '--':
self.board[r2][c2] = self.board[r1][c1]
self.board[r1][c2] = self.board[r1][c1] = '--'
else:
self.board[r2][c2] = self.board[r1][c1]
self.board[r1][c1] = '--'
if self.board[r2][c2][1] == 'K' and abs(c2-c1) > 1:
if self.whiteToMove:
self.board[r1][c2-numpy.sign(c2-c1)] = 'wR'
self.board[r1][int(3.5*(1+numpy.sign(c2-c1)))] = '--'
self.castle[0] = False
else:
self.board[r1][c2-numpy.sign(c2-c1)] = 'bR'
self.board[r1][int(3.5*(1+numpy.sign(c2-c1)))] = '--'
self.castle[1] = False
if self.un == False:
self.movelog.append(ChangeToMove(r1,c1,r2,c2))
if [r1,c1] in self.Kings:
self.Kings[self.Kings.index([r1,c1])] = [r2,c2]
self.whiteToMove = not self.whiteToMove
return
def Undo(self):
if len(self.movelog) == 0:
return
self.un = self.v = True
del(self.movelog[-1])
self.board = [
["bR","bN","bB","bQ","bK","bB","bN","bR"],
["bP","bP","bP","bP","bP","bP","bP","bP"],
["--","--","--","--","--","--","--","--"],
["--","--","--","--","--","--","--","--"],
["--","--","--","--","--","--","--","--"],
["--","--","--","--","--","--","--","--"],
["wP","wP","wP","wP","wP","wP","wP","wP"],
["wR","wN","wB","wQ","wK","wB","wN","wR"]]
self.Kings = [[7,4],[0,4]]
self.whiteToMove = True
self.castle = [True,True]
for moves in self.movelog:
if len(moves) == 4:
self.Move(MoveToChange(moves)[0],MoveToChange(moves)[1],MoveToChange(moves)[2],MoveToChange(moves)[3],None)
elif len(moves) == 6:
self.Move(MoveToChange(moves)[0],MoveToChange(moves)[1],MoveToChange(moves)[2],MoveToChange(moves)[3],moves[5])
self.un = self.v = False
return
def GetPossibleMoves(self):
moves = []
for r in range(len(self.board)):
for c in range(len(self.board[r])):
turn = self.board[r][c][0]
if (self.whiteToMove == True and turn == 'w') or (self.whiteToMove == False and turn == 'b'):
piece = self.board[r][c][1]
self.moveFunctions[piece](r,c,moves)
return moves
def GetValidMoves(self):
moves = self.GetPossibleMoves()
validMoves = []
for m in moves:
self.v = True
r1,c1,r2,c2,piece = MoveToChange(m)
if self.board[r1][c1][1] == 'K' and abs(c2-c1) > 1:
for i in range(c1+1,c2+numpy.sign(c2-c1),numpy.sign(c2-c1)):
self.v = True
self.Move(r1,c1,r2,i,piece)
self.whiteToMove = not self.whiteToMove
if self.IsCheck():
self.Undo()
break
self.Undo()
if i == c2: validMoves.append(m)
else:
self.Move(r1,c1,r2,c2,piece)
self.whiteToMove = not self.whiteToMove
if not self.IsCheck():
validMoves.append(m)
self.Undo()
self.v = False
return validMoves
def IsCheck(self):
moves = []
[[r,c],enemyColour] = [self.Kings[0],'b'] if self.whiteToMove else [self.Kings[1],'w']
for p in self.moveFunctions:
self.moveFunctions[p](r,c,moves)
for m in moves:
_,_,r1,c1,piece = MoveToChange(m)
if piece == None:
if self.board[r1][c1] == enemyColour+p:
return True
elif self.board[r1][c1] == enemyColour+piece:
return True
moves = []
return False
def IsCheckMate(self):
moves = self.GetValidMoves()
if self.IsCheck() and moves == []:
return True
return False
def IsStaleMate(self):
moves = self.GetValidMoves()
if not self.IsCheck() and moves == []:
return True
return False
def GetPawnMoves(self,r,c,moves):
if self.whiteToMove == True:
if self.board[r-1][c] == '--':
moves.append(ChangeToMove(r,c,r-1,c))
if r == 6 and self.board[r-2][c] == '--':
moves.append(ChangeToMove(r,c,r-2,c))
if r == 1:
moves.extend([moves[-1]+'=Q',moves[-1]+'=R',moves[-1]+'=N',moves[-1]+'=B',])
del moves[-5]
if c-1 >= 0:
if self.board[r-1][c-1][0] == 'b':
moves.append(ChangeToMove(r,c,r-1,c-1))
if r == 1:
moves.extend([moves[-1]+'=Q',moves[-1]+'=R',moves[-1]+'=N',moves[-1]+'=B',])
del moves[-5]
elif r == 3 and self.board[r][c-1] == 'bP' and self.movelog[-1] == ChangeToMove(r-2,c-1,r,c-1):
moves.append(ChangeToMove(r,c,r-1,c-1))
if c+1 <= 7:
if self.board[r-1][c+1][0] == 'b':
moves.append(ChangeToMove(r,c,r-1,c+1))
if r == 1:
moves.extend([moves[-1]+'=Q',moves[-1]+'=R',moves[-1]+'=N',moves[-1]+'=B',])
del moves[-5]
elif r == 3 and self.board[r][c+1] == 'bP' and self.movelog[-1] == ChangeToMove(r-2,c+1,r,c+1):
moves.append(ChangeToMove(r,c,r-1,c+1))
else:
if self.board[r+1][c] == '--':
moves.append(ChangeToMove(r,c,r+1,c))
if r == 1 and self.board[r+2][c] == '--':
moves.append(ChangeToMove(r,c,r+2,c))
if r == 6:
moves.extend([moves[-1]+'=Q',moves[-1]+'=R',moves[-1]+'=N',moves[-1]+'=B',])
del moves[-5]
if c-1 >= 0:
if self.board[r+1][c-1][0] == 'w':
moves.append(ChangeToMove(r,c,r+1,c-1))
if r == 6:
moves.extend([moves[-1]+'=Q',moves[-1]+'=R',moves[-1]+'=N',moves[-1]+'=B',])
del moves[-5]
elif r == 4 and self.board[r][c-1] == 'wP' and self.movelog[-1] == ChangeToMove(r+2,c-1,r,c-1):
moves.append(ChangeToMove(r,c,r+1,c-1))
if c+1 <= 7:
if self.board[r+1][c+1][0] == 'w':
moves.append(ChangeToMove(r,c,r+1,c+1))
if r == 6:
moves.extend([moves[-1]+'=Q',moves[-1]+'=R',moves[-1]+'=N',moves[-1]+'=B',])
del moves[-5]
elif r == 4 and self.board[r][c+1] == 'wP' and self.movelog[-1] == ChangeToMove(r+2,c+1,r,c+1):
moves.append(ChangeToMove(r,c,r+1,c+1))
def GetRookMoves(self,r,c,moves):
directions = ((-1,0),(1,0),(0,-1),(0,1))
enemyColour = 'b' if self.whiteToMove else 'w'
for d in directions:
for i in range(1,8):
endrow = r + d[0]*i
endcol = c + d[1]*i
if 0 <= endrow < 8 and 0 <= endcol < 8:
if self.board[endrow][endcol] == '--':
moves.append(ChangeToMove(r,c,endrow,endcol))
elif self.board[endrow][endcol][0] == enemyColour:
moves.append(ChangeToMove(r,c,endrow,endcol))
break
else:
break
else:
break
def GetBishopMoves(self,r,c,moves):
directions = ((-1,-1),(-1,1),(1,-1),(1,1))
enemyColour = 'b' if self.whiteToMove else 'w'
for d in directions:
for i in range(1,8):
endrow = r + d[0]*i
endcol = c + d[1]*i
if 0 <= endrow < 8 and 0 <= endcol < 8:
if self.board[endrow][endcol] == '--':
moves.append(ChangeToMove(r,c,endrow,endcol))
elif self.board[endrow][endcol][0] == enemyColour:
moves.append(ChangeToMove(r,c,endrow,endcol))
break
else:
break
else:
break
def GetKnightMoves(self,r,c,moves):
directions = ((1,2),(2,1),(-1,2),(-2,1),(1,-2),(2,-1),(-1,-2),(-2,-1))
allyColour = 'w' if self.whiteToMove else 'b'
for d in directions:
endrow = r + d[0]
endcol = c + d[1]
if 0 <= endrow < 8 and 0 <= endcol < 8:
if self.board[endrow][endcol][0] != allyColour:
moves.append(ChangeToMove(r,c,endrow,endcol))
def GetQueenMoves(self,r,c,moves):
self.GetBishopMoves(r,c,moves)
self.GetRookMoves(r,c,moves)
def GetKingMoves(self,r,c,moves):
directions = ((0,1),(1,0),(1,1),(0,-1),(-1,0),(-1,-1),(1,-1),(-1,1))
allyColour = 'w' if self.whiteToMove else 'b'
for d in directions:
endrow = r + d[0]
endcol = c + d[1]
if 0 <= endrow < 8 and 0 <= endcol < 8:
if self.board[endrow][endcol][0] != allyColour:
moves.append(ChangeToMove(r,c,endrow,endcol))
if self.whiteToMove and self.castle[0] == True:
if self.board[7][1] == '--' and self.board[7][2] == '--' and self.board[7][3] == '--':
moves.append('e1c1')
if self.board[7][5] == '--' and self.board[7][6] == '--':
moves.append('e1g1')
elif self.castle[1] == True:
if self.board[0][1] == '--' and self.board[0][2] == '--' and self.board[0][3] == '--':
moves.append('e8c8')
if self.board[0][5] == '--' and self.board[0][6] == '--':
moves.append('e8g8')
| hwright01/General | Python/Chess/ChessEngine.py | ChessEngine.py | py | 12,133 | python | en | code | 0 | github-code | 36 |
13870439802 | # 10~99 사이의 난수 n개 생성하기(13이 나오면 중단)
import random
print('10~99 사이의 난수 n개 생성하기(13이 나오면 중단)')
n = int(input('난수의 개수를 입력하세요.: '))
for _ in range(n):
rn = random.randint(10, 99)
print(rn,'', end='')
if rn == 13:
print('\n프로그램을 중단합니다.')
break
else:
print('\n난수 생성을 중단합니다.') | hye0ngyun/PythonPractice | books/AlgorithmWithPython/chap01/01_2/chap01_2Ex9.py | chap01_2Ex9.py | py | 428 | python | ko | code | 0 | github-code | 36 |
17078297023 | from django.shortcuts import render
from django.core.mail import send_mail
from django.http import HttpResponseRedirect
from django.core.urlresolvers import reverse
from django.conf import settings
from .forms import Contact_us_form, SupportForm
import urllib
import json
def contact_us(request):
if request.method == 'POST':
form = Contact_us_form(request.POST)
if form.is_valid():
contact_us = form.save(commit=False)
''' Begin reCAPTCHA validation '''
recaptcha_response = request.POST.get('g-recaptcha-response')
url = 'https://www.google.com/recaptcha/api/siteverify'
values = {
'secret': settings.GOOGLE_RECAPTCHA_SECRET_KEY,
'response': recaptcha_response
}
data = urllib.parse.urlencode(values).encode()
req = urllib.request.Request(url, data=data)
response = urllib.request.urlopen(req)
result = json.loads(response.read().decode())
''' End reCAPTCHA validation '''
if result['success']:
if request.user.is_authenticated:
contact_us.email = request.user.email
contact_us.user = request.user.username
logined = True
else:
contact_us.user=request.POST['text']
contact_us.email=request.POST['email']
logined = False
send_mail(
'Contact Us from "{}" (Logined: {})'.format(contact_us.email, logined),
contact_us.body,
contact_us.email,
[settings.GMAIL_MAIL],
fail_silently=False,
)
contact_us.save()
return render(request, 'get_in_touch/contact_us_success.html')
else:
form = Contact_us_form()
context ={'form': form}
return render(request, 'get_in_touch/contact_us.html', context)
def support(request):
if request.method == 'POST':
form = SupportForm(request.POST)
if form.is_valid():
support = form.save(commit=False)
''' Begin reCAPTCHA validation '''
recaptcha_response = request.POST.get('g-recaptcha-response')
url = 'https://www.google.com/recaptcha/api/siteverify'
values = {
'secret': settings.GOOGLE_RECAPTCHA_SECRET_KEY,
'response': recaptcha_response
}
data = urllib.parse.urlencode(values).encode()
req = urllib.request.Request(url, data=data)
response = urllib.request.urlopen(req)
result = json.loads(response.read().decode())
''' End reCAPTCHA validation '''
if result['success']:
if request.user.is_authenticated:
support.email = request.user.email
support.user = request.user.username
logined = True
else:
support.user=request.POST['text']
support.email=request.POST['email']
logined = False
send_mail(
'Support ({}) from "{}" (Logined: {})'.format(support.get_problem_display(), support.email, logined),
support.body,
support.email,
[settings.GMAIL_MAIL],
fail_silently=False,
)
support.save()
return render(request, 'get_in_touch/support_success.html')
else:
form = SupportForm()
context ={'form': form}
return render(request, 'get_in_touch/support.html', context) | Pavlo-Olshansky/E-market | get_in_touch/views.py | views.py | py | 3,028 | python | en | code | 2 | github-code | 36 |
35305572933 | from flask import Flask, jsonify, request
from flask_cors import CORS
import database
app = Flask(__name__)
app.config["ERROR_404_HELP"] = False
# allow all for simplicity
CORS(app)
@app.route("/")
def landing():
return """
Hello, this is the News Article Searcher of Koen Douterloigne! <br>
Please enter any keyword to search for articles containing that keyword<br>
<form action="search" method="post">
<input type="text" name="search" />
</form>
"""
@app.route("/search", methods=['GET', 'POST'])
def search():
data = request.values
query = data['search']
db = database.Database()
results = db.search(query)
if not results:
return f"No results found for search query '{query}' :("
else:
return jsonify(results)
if __name__ == "__main__":
app.run()
| tobneok/isentia_test | server/app.py | app.py | py | 851 | python | en | code | 0 | github-code | 36 |
12834488522 | '''
1. 최대 수익을 저장하는 변수를 만들고 0을 저장합니다.
2. 지금까지의 최저 주가를 저장하는 변수를 만들고 첫째 날의 주가를 기록합니다.
3. 둘째 날의 주가부터 마지막 날의 주가까지 반복합니다.
4. 반복하는 동안 그날의 주가에서 최저 주가를 뺀 값이 현재 최대 수익보다 크면 최대 수익 값을 그 값으로 고칩니다.
5. 그날의 주가가 최저 주가보다 낮으면 최저 주가 값을 그날의 주가로 고칩니다.
6. 처리할 날이 남았으면 4번 과정으로 돌아가 반복하고, 다 마쳤으면 최대 수익에 저장된 값을 결괏값으로 돌려주고 종료합니다.
'''
# n = int(input())
# prices = []
#
# for i in range(5):
# price = int(input())
# prices.append(price)
# print(prices)
# def stockloss(price):
# n = len(price)
# maxloss = 0
# highprice = 0
# for i in range(n):
# if price[i] > highprice:
# highprice = price[i]
# if price[i] - highprice < maxloss:
# maxloss = price[i] - highprice
# return maxloss
# print(stockloss(prices))
n = int(input())
maxloss = 0
highprice = 0
for i in input().split():
v = int(i)
if v > highprice:
highprice = v
if v - highprice < maxloss:
maxloss = v - highprice
print(maxloss)
# n = int(input())
# prices = list(map(int, input().split()))
#
# loss = 0
# high = prices[0]
#
# for p in prices:
# high = max(high, p)
# loss = min(loss, p - high)
#
# print(loss)
| ohjooyeong/codingame | stock exchange losses.py | stock exchange losses.py | py | 1,551 | python | ko | code | 0 | github-code | 36 |
5163641580 | import yaml
import sys
import xarray as xr
import time
import glob
def subset_vars(argv):
if(len(argv)!=7):
print("USAGE: wrf-subset-vars.py <in nc path> <in nc file> <out nc path> <out nc file> <var list path> <var list file>\n")
sys.exit(1)
innc_path = argv[1]
innc_file = argv[2]
innc_name = innc_path+innc_file
outnc_path = argv[3]
outnc_file = argv[4]
outnc_name = outnc_path+outnc_file
yaml_varkeep_path = argv[5]
yaml_varkeep_file = argv[6]
yaml_varkeep_name = yaml_varkeep_path+yaml_varkeep_file
# Get the name of the variables to be subset
with open(yaml_varkeep_name,'r') as file_keep:
var_keep_dict = yaml.full_load(file_keep)
var_keep_list = [ sub['var_name'] for sub in var_keep_dict ]
# Open the wrfout file using Xarray
ds_wrf = xr.open_dataset(innc_name)
# Get the subset by passing the list of variable names to keep to
# the *lazily opened* raw wrfout dataset
ds_wrf_subset = ds_wrf[var_keep_list]
# Copy the attributes of the raw WRF dataset to the new subset dataset
ds_wrf_subset.attrs = ds_wrf.attrs
# Save the output dataset to the specified netcdf file name
ds_wrf_subset.to_netcdf(path=outnc_name)
return
| LEAF-BoiseState/py-wrf-postproc | wrf-subset-vars.py | wrf-subset-vars.py | py | 1,265 | python | en | code | 3 | github-code | 36 |
16583267084 | from datetime import datetime, date
from email.mime.text import MIMEText
from flask import Flask
import os
import schedule
import smtplib
import time
# import threading
from mailjet_rest import Client
from sendgrid import SendGridAPIClient
from sendgrid.helpers.mail import Mail
from config import *
startupTs = datetime.now()
global env_
env_ = env('_ENV')
# env_ = 'prod'
app = Flask(__name__)
# This works on google app engine but appends a different email suffix that makes it look like spam
def mailjet():
today = date.today()
# Textual month, day and year
today_ = today.strftime("%B %d, %Y")
api_key = config[env_].MAILJET_KEY
api_secret = config[env_].MAILJET_SECRET
mailjet = Client(auth=(api_key, api_secret), version='v3.1')
data = {
'Messages': [
{
"From": {
"Email": f"{config[env_].user1}",
"Name": f"{config[env_].name1.split(' ')[0]}"
},
"To": [
{
"Email": f"{config[env_].user1}",
"Name": f"{config[env_].name1.split(' ')[0]}"
}
],
"Subject": f'MNPD COVID-19 Vaccine Standby List: {config[env_].name1}, {today_}',
"TextPart": "My first Mailjet email",
"HTMLPart": f'''
Hello,
Reaching out to be entered into the Metro Nashville Public Health Department COVID-19 Vaccine Standby List!
Contact Info:
Name: {config[env_].name1}
Phone: {config[env_].ph1}
Thank you,
-{config[env_].name1.split(' ')[0]}
''',
"CustomID": ""
}
]
}
result = mailjet.send.create(data=data)
print(result.status_code)
print(result.json())
return
# this does not seem to work on google app engine, but does work locally and does not look like spam. will get this going in cron
def send_emails():
today = date.today()
# Textual month, day and year
today_ = today.strftime("%B %d, %Y")
############################################# USER1 ############################################
# connect with Google's servers
smtp_ssl_host = 'smtp.gmail.com'
smtp_ssl_port = 465
# use username or email to log in
username = config[env_].user1
password = config[env_].pw1
name = config[env_].name1
ph = config[env_].ph1
from_addr = config[env_].user1
to_addrs = config[env_].to_addr
# the email lib has a lot of templates
# for different message formats,
# on our case we will use MIMEText
# to send only text
message = MIMEText(f'''
Hi,
Reaching out to be added to the Metro Nashville Public Health Department COVID-19 Vaccine standby list.
Contact Info:
Name: {name}
Ph: {ph}
Thank you!
-{name.split(' ')[0]}
''')
message['subject'] = f'MNPD COVID-19 Vaccine Standby List: {name}, {today_}'
message['from'] = from_addr
message['to'] = ', '.join([to_addrs])
# we'll connect using SSL
server = smtplib.SMTP_SSL(smtp_ssl_host, smtp_ssl_port)
# to interact with the server, first we log in
# and then we send the message
server.login(username, password)
try:
server.sendmail(from_addr, to_addrs, message.as_string())
print(f'''Successfully sent email from {name.split(' ')[0]} at {datetime.now()}''')
except Exception as e:
print(e)
############################################# USER2 ############################################
# time.sleep(5) # seconds
# use username or email to log in
username = config[env_].user2
password = config[env_].pw2
name = config[env_].name2
ph = config[env_].ph2
from_addr = config[env_].user2
to_addrs = config[env_].to_addr
# the email lib has a lot of templates
# for different message formats,
# on our case we will use MIMEText
# to send only text
message = MIMEText(f'''
Hello,
Reaching out to be entered into the Metro Nashville Public Health Department COVID-19 Vaccine Standby List!
Contact Info:
Name: {name}
Phone: {ph}
Thank you,
-{name.split(' ')[0]}
''')
message['subject'] = f'MNPD COVID-19 Vaccine Standby List: {name}, {today_}'
message['from'] = from_addr
message['to'] = ', '.join([to_addrs])
# we'll connect using SSL
server = smtplib.SMTP_SSL(smtp_ssl_host, smtp_ssl_port)
# to interact with the server, first we log in
# and then we send the message
server.login(username, password)
try:
server.sendmail(from_addr, to_addrs, message.as_string())
print(f'''Successfully sent email from {name.split(' ')[0]} at {datetime.now()}''')
except Exception as e:
print(e)
server.quit()
return
# sendgrid's setup was a pain so i abandoned this
# def sendgrid():
# message = Mail(
# from_email=config[env_].user1,
# to_emails=config[env_].to_addr,
# subject='Sending with Twilio SendGrid is Fun',
# html_content='<strong>and easy to do anywhere, even with Python</strong>')
# try:
# sg = SendGridAPIClient(config[env_].SENDGRID_API_KEY)
# response = sg.send(message)
# print(response.status_code)
# print(response.body)
# print(response.headers)
# except Exception as e:
# print(e.message)
# return
# Scheduling Part of Script
# def background_thread():
# schedule_thread = threading.Thread(
# target=schedules)
# schedule_thread.start()
# return '{}'
def schedules():
print(f'Starting service at {startupTs} in Env: {env_}')
send_emails()
schedule.every(config[env_].refresh["frequency"]).minutes.do(send_emails)
while True:
schedule.run_pending()
time.sleep(3600) # checks if any pending jobs every 3600 seconds -> 1 hour
return
# End of scheduling part
def test():
schedules()
# mailjet()
return
if __name__ == '__main__':
try:
app.run(test())
except Exception as e:
print('app kickoff error: ', e)
| wjewell3/email | main.py | main.py | py | 6,000 | python | en | code | 0 | github-code | 36 |
10663976037 | # -*- coding: utf-8 -*-
"""
Created on Fri Dec 4 15:43:55 2015
Plot coodinate time series for radio sources.
@author: Neo
"""
import numpy as np
import matplotlib.pyplot as plt
from fun import ADepoA, ADepoS
cos = np.cos
dat_dir = '../data/opa/'
res_dir = '../plot/timeseries/'
t0 = 2000.0
def tsplot(soun, pmra, pmdec, ra0, dec0):
epo, ra, dec, era, edec = np.loadtxt(dat_dir+soun +'.dat', usecols=list(range(5)), unpack=True)
if epo.size>1:
epo = ADepoA(epo)
else:
epo = ADepoS(epo)
if ra0 == 0.0:
ra0 = ra[-1]
dec0= dec[-1]
x, y1, err1, y2, err2 = epo, (ra-ra0)*3.6e6*cos(np.deg2rad(dec)), era, (dec-dec0)*3.6e6, edec
x0 = t0
x1 = np.arange(1979.0, 2017.0, 0.1)
## time series plot
fig, (ax0, ax1) = plt.subplots(nrows=2, sharex=True)
ax0.errorbar(x, y1, yerr=err1, fmt='bo', markersize=3)
ax1.errorbar(x, y2, yerr=err2, fmt='bo', markersize=3)
### for data points >=9:
if pmra != 0.0:
y3 = pmra*(x1-x0)/1.0e3
y4 = pmdec*(x1-x0)/1.0e3
ax0.plot(x1, y3, 'r')
ax1.plot(x1, y4, 'r')
## some details.
ax0.set_ylabel('R.A.(mas)')
ax0.set_ylim([-50, 50])
ax0.set_xlim([1979,2017])
ax0.set_title(soun)
ax1.set_ylabel('Dec(mas)')
ax1.set_ylim([-50, 50])
# plt.show()
plt.savefig(res_dir+soun+'.eps', dpi=100)
plt.close()
#tsplot('0434-188')
## read catalog file to get name of sources.
cat = '../list/opa.list'
soun = np.loadtxt(cat, dtype=str)
## linear drift data.
apm = '../results/opa_all.apm'
pmRA, pmDE, RA0, DE0 = np.loadtxt(apm, usecols=(2,3,7,8), unpack=True)
for i in range(len(soun)):
sou_name = soun[i]
pmra, pmdec, ra0, dec0 = pmRA[i], pmDE[i], RA0[i], DE0[i]
## plot
tsplot(sou_name, pmra, pmdec, ra0, dec0)
print('Done!') | Niu-Liu/thesis-materials | sou-selection/progs/TimeseriesPlot.py | TimeseriesPlot.py | py | 1,827 | python | en | code | 0 | github-code | 36 |
8209022862 | # from gen_captcha import gen_captcha_text_and_image
# from gen_captcha import number
# from gen_captcha import alphabet
# from gen_captcha import ALPHABET
from custom import gen_captcha_text_and_image
from custom import number
from custom import alphabet
from custom import ALPHABET
import time
import numpy as np
import tensorflow as tf
text, image = gen_captcha_text_and_image()
print("verification code iamge channel:", image.shape) # (60, 160, 3)
# 图像大小
IMAGE_HEIGHT = 60
IMAGE_WIDTH = 160
MAX_CAPTCHA = len(text)
print("Max number of label:", MAX_CAPTCHA) # 验证码最长4字符; 我全部固定为4,可以不固定. 如果验证码长度小于4,用'_'补齐
# 把彩色图像转为灰度图像(色彩对识别验证码没有什么用)
def convert2gray(img):
if len(img.shape) > 2:
gray = np.mean(img, -1)
# 上面的转法较快,正规转法如下
# r, g, b = img[:,:,0], img[:,:,1], img[:,:,2]
# gray = 0.2989 * r + 0.5870 * g + 0.1140 * b
return gray
else:
return img
"""
cnn在图像大小是2的倍数时性能最高, 如果你用的图像大小不是2的倍数,可以在图像边缘补无用像素。
np.pad(image,((2,3),(2,2)), 'constant', constant_values=(255,)) # 在图像上补2行,下补3行,左补2行,右补2行
"""
# 文本转向量
char_set = number + alphabet + ALPHABET + ['_'] # 如果验证码长度小于4, '_'用来补齐
CHAR_SET_LEN = len(char_set)
def text2vec(text):
text_len = len(text)
if text_len > MAX_CAPTCHA:
raise ValueError('验证码最长4个字符')
vector = np.zeros(MAX_CAPTCHA * CHAR_SET_LEN)
def char2pos(c):
if c == '_':
k = 62
return k
k = ord(c) - 48
if k > 9:
k = ord(c) - 55
if k > 35:
k = ord(c) - 61
if k > 61:
raise ValueError('No Map')
return k
for i, c in enumerate(text):
idx = i * CHAR_SET_LEN + char2pos(c)
vector[idx] = 1
return vector
# 向量转回文本
def vec2text(vec):
char_pos = vec.nonzero()[0]
text = []
for i, c in enumerate(char_pos):
char_at_pos = i # c/63
char_idx = c % CHAR_SET_LEN
if char_idx < 10:
char_code = char_idx + ord('0')
elif char_idx < 36:
char_code = char_idx - 10 + ord('A')
elif char_idx < 62:
char_code = char_idx - 36 + ord('a')
elif char_idx == 62:
char_code = ord('_')
else:
raise ValueError('error')
text.append(chr(char_code))
return "".join(text)
"""
#向量(大小MAX_CAPTCHA*CHAR_SET_LEN)用0,1编码 每63个编码一个字符,这样顺利有,字符也有
vec = text2vec("F5Sd")
text = vec2text(vec)
print(text) # F5Sd
vec = text2vec("SFd5")
text = vec2text(vec)
print(text) # SFd5
"""
# 生成一个训练batch
def get_next_batch(batch_size=128):
batch_x = np.zeros([batch_size, IMAGE_HEIGHT * IMAGE_WIDTH])
batch_y = np.zeros([batch_size, MAX_CAPTCHA * CHAR_SET_LEN])
# 有时生成图像大小不是(60, 160, 3)
def wrap_gen_captcha_text_and_image():
while True:
text, image = gen_captcha_text_and_image()
if image.shape == (60, 160, 3):
return text, image
for i in range(batch_size):
text, image = wrap_gen_captcha_text_and_image()
image = convert2gray(image)
batch_x[i, :] = image.flatten() / 255 # (image.flatten()-128)/128 mean为0
batch_y[i, :] = text2vec(text)
return batch_x, batch_y
####################################################################
# 占位符,X和Y分别是输入训练数据和其标签,标签转换成8*10的向量
with tf.name_scope('input'):
X = tf.placeholder(tf.float32, [None, IMAGE_HEIGHT * IMAGE_WIDTH])
Y = tf.placeholder(tf.float32, [None, MAX_CAPTCHA * CHAR_SET_LEN])
# 声明dropout占位符变量
keep_prob = tf.placeholder(tf.float32) # dropout
# 定义CNN
def crack_captcha_cnn(w_alpha=0.01, b_alpha=0.1):
# 把 X reshape 成 IMAGE_HEIGHT*IMAGE_WIDTH*1的格式,输入的是灰度图片,所有通道数是1;
# shape 里的-1表示数量不定,根据实际情况获取,这里为每轮迭代输入的图像数量(batchsize)的大小;
x = tf.reshape(X, shape=[-1, IMAGE_HEIGHT, IMAGE_WIDTH, 1])
# w_c1_alpha = np.sqrt(2.0/(IMAGE_HEIGHT*IMAGE_WIDTH))
# w_c2_alpha = np.sqrt(2.0/(3*3*32))
# w_c3_alpha = np.sqrt(2.0/(3*3*64))
# w_d1_alpha = np.sqrt(2.0/(8*32*64))
# out_alpha = np.sqrt(2.0/1024)
# 3 conv layer
# 搭建第一层卷积层
# shape[3, 3, 1, 32]里前两个参数表示卷积核尺寸大小,即patch;
# 第三个参数是图像通道数,第四个参数是该层卷积核的数量,有多少个卷积核就会输出多少个卷积特征图像
w_c1 = tf.Variable(w_alpha * tf.random_normal([3, 3, 1, 32]))
# 每个卷积核都配置一个偏置量,该层有多少个输出,就应该配置多少个偏置量
b_c1 = tf.Variable(b_alpha * tf.random_normal([32]))
# 图片和卷积核卷积,并加上偏执量,卷积结果28x28x32
# tf.nn.conv2d() 函数实现卷积操作
# tf.nn.conv2d()中的padding用于设置卷积操作对边缘像素的处理方式,在tf中有VALID和SAME两种模式
# padding='SAME'会对图像边缘补0,完成图像上所有像素(特别是边缘象素)的卷积操作
# padding='VALID'会直接丢弃掉图像边缘上不够卷积的像素
# strides:卷积时在图像每一维的步长,是一个一维的向量,长度4,并且strides[0]=strides[3]=1
# tf.nn.bias_add() 函数的作用是将偏置项b_c1加到卷积结果value上去;
# 注意这里的偏置项b_c1必须是一维的,并且数量一定要与卷积结果value最后一维数量相同
# tf.nn.relu() 函数是relu激活函数,实现输出结果的非线性转换,即features=max(features, 0),输出tensor的形状和输入一致
conv1 = tf.nn.relu(tf.nn.bias_add(tf.nn.conv2d(x, w_c1, strides=[1, 1, 1, 1], padding='SAME'), b_c1))
# tf.nn.max_pool()函数实现最大池化操作,进一步提取图像的抽象特征,并且降低特征维度
# ksize=[1, 2, 2, 1]定义最大池化操作的核尺寸为2*2, 池化结果14x14x32 卷积结果乘以池化卷积核
conv1 = tf.nn.max_pool(conv1, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
# tf.nn.dropout是tf里为了防止或减轻过拟合而使用的函数,一般用在全连接层;
# Dropout机制就是在不同的训练过程中根据一定概率(大小可以设置,一般情况下训练推荐0.5)随机扔掉(屏蔽)一部分神经元,
# 不参与本次神经网络迭代的计算(优化)过程,权重保留但不做更新;
# tf.nn.dropout()中 keep_prob用于设置概率,需要是一个占位变量,在执行的时候具体给定数值
conv1 = tf.nn.dropout(conv1, keep_prob)
# 原图像HEIGHT = 60 WIDTH = 160,经过神经网络第一层卷积(图像尺寸不变、特征×32)、池化(图像尺寸缩小一半,特征不变)之后;
# 输出大小为 30*80*32
# 搭建第二层卷积层
w_c2 = tf.Variable(w_alpha * tf.random_normal([3, 3, 32, 64]))
b_c2 = tf.Variable(b_alpha * tf.random_normal([64]))
conv2 = tf.nn.relu(tf.nn.bias_add(tf.nn.conv2d(conv1, w_c2, strides=[1, 1, 1, 1], padding='SAME'), b_c2))
conv2 = tf.nn.max_pool(conv2, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
conv2 = tf.nn.dropout(conv2, keep_prob)
# 原图像HEIGHT = 60 WIDTH = 160,经过神经网络第一层后输出大小为 30*80*32
# 经过神经网络第二层运算后输出为 16*40*64 (30*80的图像经过2*2的卷积核池化,padding为SAME,输出维度是16*40)
# 搭建第三层卷积层
w_c3 = tf.Variable(w_alpha * tf.random_normal([3, 3, 64, 64]))
b_c3 = tf.Variable(b_alpha * tf.random_normal([64]))
conv3 = tf.nn.relu(tf.nn.bias_add(tf.nn.conv2d(conv2, w_c3, strides=[1, 1, 1, 1], padding='SAME'), b_c3))
conv3 = tf.nn.max_pool(conv3, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
conv3 = tf.nn.dropout(conv3, keep_prob)
# 原图像HEIGHT = 60 WIDTH = 160,经过神经网络第一层后输出大小为 30*80*32 经过第二层后输出为 16*40*64
# 经过神经网络第二层运算后输出为 16*40*64 ; 经过第三层输出为 8*20*64,这个参数很重要,决定量后边全连接层的维度
# 搭建全连接层
# 二维张量,第一个参数8*20*64的patch,这个参数由最后一层卷积层的输出决定,第二个参数代表卷积个数共1024个,即输出为1024个特征
# Fully connected layer
w_d = tf.Variable(w_alpha * tf.random_normal([8 * 20 * 64, 1024]))
# 偏置项为1维,个数跟卷积核个数保持一致
b_d = tf.Variable(b_alpha * tf.random_normal([1024]))
# w_d.get_shape()作用是把张量w_d的形状转换为元组tuple的形式,w_d.get_shape().as_list()是把w_d转为元组再转为list形式
# w_d 的 形状是[ 8 * 20 * 64, 1024],w_d.get_shape().as_list()结果为 8*20*64=10240 ;
# 所以tf.reshape(conv3, [-1, w_d.get_shape().as_list()[0]])的作用是把最后一层隐藏层的输出转换成一维的形式
dense = tf.reshape(conv3, [-1, w_d.get_shape().as_list()[0]])
# tf.matmul(dense, w_d)函数是矩阵相乘,输出维度是 -1*1024
dense = tf.nn.relu(tf.add(tf.matmul(dense, w_d), b_d))
dense = tf.nn.dropout(dense, keep_prob)
# 经过全连接层之后,输出为 一维,1024个向量
# w_out定义成一个形状为 [1024, 8 * 10] = [1024, 80]
with tf.name_scope('w_out'):
w_out = tf.Variable(w_alpha * tf.random_normal([1024, MAX_CAPTCHA * CHAR_SET_LEN]))
with tf.name_scope('b_out'):
b_out = tf.Variable(b_alpha * tf.random_normal([MAX_CAPTCHA * CHAR_SET_LEN]))
# out 的输出为 8*10 的向量, 8代表识别结果的位数,10是每一位上可能的结果(0到9)
out = tf.add(tf.matmul(dense, w_out), b_out)
# out = tf.nn.softmax(out)
# 输出神经网络在当前参数下的预测值
return out
# 训练
def train_crack_captcha_cnn():
# with tf.device('/cpu:0'):
output = crack_captcha_cnn()
# loss
# loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(output, Y))
# tf.nn.sigmoid_cross_entropy_with_logits()函数计算交叉熵,输出的是一个向量而不是数;
# 交叉熵刻画的是实际输出(概率)与期望输出(概率)的距离,也就是交叉熵的值越小,两个概率分布就越接近
# tf.reduce_mean()函数求矩阵的均值
with tf.name_scope('loss'):
loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=output, labels=Y))
tf.summary.scalar('loss', loss) # 可视化loss常量
# optimizer 为了加快训练 learning_rate应该开始大,然后慢慢衰
# tf.train.AdamOptimizer()函数实现了Adam算法的优化器
optimizer = tf.train.AdamOptimizer(learning_rate=0.001).minimize(loss)
predict = tf.reshape(output, [-1, MAX_CAPTCHA, CHAR_SET_LEN])
max_idx_p = tf.argmax(predict, 2)
max_idx_l = tf.argmax(tf.reshape(Y, [-1, MAX_CAPTCHA, CHAR_SET_LEN]), 2)
correct_pred = tf.equal(max_idx_p, max_idx_l)
with tf.name_scope('accuracy'):
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
tf.summary.scalar('accuracy', accuracy)
saver = tf.train.Saver()
with tf.Session(config=tf.ConfigProto(log_device_placement=True)) as sess:
merged = tf.summary.merge_all()
writer = tf.summary.FileWriter("log/", sess.graph)
sess.run(tf.global_variables_initializer())
step = 0
while True:
batch_x, batch_y = get_next_batch(64)
_, loss_ = sess.run([optimizer, loss], feed_dict={X: batch_x, Y: batch_y, keep_prob: 0.75})
print("step is %s , loss is %g" % (step, loss_))
# writer.add_summary(summary,step)
# 每100 step计算一次准确率
if step % 100 == 0:
batch_x_test, batch_y_test = get_next_batch(100)
summary, acc = sess.run([merged, accuracy], feed_dict={X: batch_x_test, Y: batch_y_test, keep_prob: 1.})
print("----------step is %s , acc is %g--------" % (step, acc))
writer.add_summary(summary, step)
# 如果准确率大于50%,保存模型,完成训练
if acc > 0.50 :
saver.save(sess, "F://crack_capcha_model/crack_capcha.model", global_step=step)
break
step += 1
# performance test
# if step == 20:
# break
if __name__ == '__main__':
start = time.clock()
train_crack_captcha_cnn()
end = time.clock()
print('Running time: %s Seconds' % (end - start))
| lensv/Captcha | training.py | training.py | py | 13,040 | python | zh | code | 0 | github-code | 36 |
22140484347 | from django.core.exceptions import ValidationError
from django.http import HttpResponse
from django.http.response import HttpResponseForbidden, JsonResponse
from django.shortcuts import redirect, get_object_or_404
from django.template import loader
from django.views.decorators.csrf import csrf_exempt
from .models import ShortenURL
from .src.constants import URL_ENC
from .src.base62 import encode_base62, decode_base62
def index(request): # encode_url
""" [GET /] """
if request.method != "GET":
return HttpResponseForbidden()
return HttpResponse(
loader.get_template('url_shortener/index.html')
.render( {}, request )
)
@csrf_exempt
def post_encode_url(request):
""" [POST /enc-url] """
if request.method != "POST":
return HttpResponseForbidden()
response_data = {
"shorten_url": None,
"message": None
}
status_code = 200
try:
# request body should include URL like { "url": "https://www.github.com" }
print(request.POST)
url_fetched = request.POST.get("url")
if not url_fetched.endswith("/"):
url_fetched += "/"
# add 'https://' or 'http://' if url does not start with them
url_record = None
if url_fetched.startswith("https://") or url_fetched.startswith("http://"):
url_record = ShortenURL.objects.filter(url=url_fetched)
else:
url_record = ShortenURL.objects.filter(url="https://" + url_fetched)
if not url_record:
url_record = ShortenURL.objects.filter(url="http://" + url_fetched)
# if the url dose not exist in the table, insert new record
if not url_record:
url_record = ShortenURL(url=url_fetched)
url_record.save()
else:
url_record = url_record[0]
# response 200 ok
response_data["shorten_url"] = request.build_absolute_uri()[:-len(URL_ENC)] \
+ encode_base62(url_record.id)
response_data["message"] = "Success! You may copy the shorten URL above."
except ValidationError as e:
# URL is not in vaild form
if "url" in e.message_dict:
response_data["message"] = "The URL may be invalid. Try something else."
status_code = 400
else:
response_data["message"] = "Sorry. There is a problem with the service."
status_code = 500
response = JsonResponse(response_data)
response.status_code = status_code
return response
def get_decode_url(request, shorten_url):
""" [GET /[url_shorten]] """
if request.method != "GET":
return HttpResponseForbidden()
return redirect(
get_object_or_404(ShortenURL, pk=decode_base62(shorten_url)).url
)
| njsh4261/url_shortener | backend/url_shortener/views.py | views.py | py | 2,785 | python | en | code | 0 | github-code | 36 |
19743419969 | from os import sep
from subprocess import call
import click
path_ini_alembic_file = 'app_config/config_files/alembic.ini'.replace('/', sep)
@click.group('db')
def db():
...
@db.command()
@click.option('-m', 'message', default='migração via CLI',
help='Mensagem para identificar a migrations do alembic')
def makemigration(message):
call(
['alembic', '-c', path_ini_alembic_file, 'revision', '--autogenerate',
'-m', message]
)
@db.command()
def migrate():
call(['alembic', '-c', path_ini_alembic_file, 'upgrade', 'head'])
@db.command()
@click.option('-m', 'message', default='migração via CLI',
help='Mensagem para identificar a migrations do alembic')
def initialize(message):
...
| isaquefel/ensaio_app | app_rotinas/cli/migrations_management.py | migrations_management.py | py | 760 | python | en | code | 0 | github-code | 36 |
73488163624 |
class Animal:
is_alive: bool = True
def breeze(self):
print("I'm breezing")
class Mammal(Animal):
leg_amount: int
kid_food_type: str = 'Milk'
def voice(self):
raise NotImplementedError
def do_bad_things(self):
raise NotImplementedError
class Cat(Mammal):
def voice(self):
print('Meow')
class Dog(Mammal):
def voice(self):
print('Guf!')
#pass
class CatDog(Cat, Dog):
pass
dog = Dog()
cat = Cat()
#cat.voice()
#dog.voice()
catdog = CatDog()
catdog.voice()
#animals = [cat, dog]
#for animal in animals:
# animal.voice()
from datetime import datetime
class Human:
first_name: str
last_name: str
def __digest_food(self):
print("I'm digesting")
def eat(self):
self.__digest_food()
def __init__(self):
self.first_name = 'Ivan'
@staticmethod
def print_current_time():
print(datetime.now())
@classmethod
def get_list_of_attributes(cls):
return['first name', 'last_name']
h = Human()
h.eat()
# h._Human__digest_food()
print(CatDog.mro())
h.print_current_time()
print(Human.get_list_of_attributes())
print(type(type))
NewHuman = type('NewHuman', (Human,), {'power': 100500, 'can_die': False})
newhuman = NewHuman
print(newhuman.power, newhuman.can_die)
class Configuration:
_instance = None
def __new__(cls, *args, **kwargs):
if not isinstance(cls._instance, cls):
cls._instance = object.__new__(cls, *args, **kwargs)
return cls._instance
config = Configuration()
config2 = Configuration()
print(config2 is config)
from dataclasses import dataclass
from typing import List
@dataclass
class Player:
full_name: str
@dataclass
class Coach:
full_name: str
@dataclass
class Team:
players: List[Player]
coach: Coach
players = [Player(full_name='Roberto Carlos'), Player(full_name='Roberto Pirlo')]
coach = Coach ('Jurgen Klopp')
dream_team = Team(players=players, coach=oach)
| VladPetrov19/Lessons | venv/lesson_14.py | lesson_14.py | py | 2,024 | python | en | code | 0 | github-code | 36 |
5501591657 | ### IMPORT THE REQUIRED LIBRARIES
# To read the dataset in .mat format
import scipy.io as sio
# For matrix operations
import numpy as np
# Keras functions to create and compile the model
from keras.layers import Input, Conv2D, Lambda, Reshape, Multiply, Add, Subtract
from keras.activations import relu
from keras.optimizers import Adam
from keras.models import Model
from keras import backend as K
### READING THE DATA
phi_read = sio.loadmat('phi_0_25_1089.mat')
train = sio.loadmat('Training_Data_Img91.mat')
### PREPROCESSING
# Reading training input and labels
train_inp = train['inputs']
train_labels = train['labels']
# Preparing the constant matrices
phi = np.transpose(phi_read['phi'])
ptp = np.dot(phi, np.transpose(phi)) # phi^T x phi
temp1 = np.transpose(train_labels)
temp2 = np.dot(np.transpose(phi), temp1)
temp3 = np.dot(np.dot(temp1, np.transpose(temp2)), np.linalg.inv(np.dot(temp2, np.transpose(temp2))))
phi_inv = np.transpose(temp3) # phi^-1
# Instead of multiplying each batch by phi and then supplying it to the model as input,
# we multiply the entire training set by phi in the preprocessing stage itself
x_inp = np.dot(train_labels, phi)
### INITIALIZING CONSTANTS
n_input = 272
tau = 0.1
lambda_step = 0.1
soft_thr = 0.1
conv_size = 32
filter_size = 3
### PREPARING THE MODEL (An image of the model map has been attached)
# Defining the input and output
inp = Input((n_input,))
inp_labels = Input((1089, ))
# Defining the input for the first ISTA block
x0 = Lambda(lambda x: K.dot(x, K.constant(phi_inv)))(inp)
phi_tb = Lambda(lambda x: K.dot(x, K.constant(np.transpose(phi))))(inp)
# ISTA block #1
conv1_x1 = Lambda(lambda x: x - lambda_step * K.dot(x, K.constant(ptp)) + lambda_step * phi_tb, name='conv1_x1')(x0)
conv1_x2 = Reshape((33, 33, 1), name='conv1_x2')(conv1_x1)
conv1_x3 = Conv2D(conv_size, [filter_size, filter_size], padding='SAME', use_bias=False, name='conv1_x3')(conv1_x2)
conv1_sl1 = Conv2D(conv_size, [filter_size, filter_size], padding='SAME', use_bias=False, activation='relu', name='conv1_sl1')
conv1_x4 = conv1_sl1(conv1_x3)
conv1_sl2 = Conv2D(conv_size, [filter_size, filter_size], padding='SAME', use_bias=False, name='conv1_sl2')
conv1_x44 = conv1_sl2(conv1_x4)
conv1_x5 = Multiply(name='conv1_x5')([Lambda(lambda x: K.sign(x))(conv1_x44), Lambda(lambda x: relu(x - soft_thr))(Lambda(lambda x: K.abs(x))(conv1_x44))])
conv1_sl3 = Conv2D(conv_size, [filter_size, filter_size], padding='SAME', use_bias=False, activation='relu', name='conv1_sl3')
conv1_x6 = conv1_sl3(conv1_x5)
conv1_sl4 = Conv2D(conv_size, [filter_size, filter_size], padding='SAME', use_bias=False, name='conv1_sl4')
conv1_x66 = conv1_sl4(conv1_x6)
conv1_x7 = Conv2D(1, [filter_size, filter_size], padding='SAME', use_bias=False, name='conv1_x7a')(conv1_x66)
conv1_x7 = Add(name='conv1_x7b')([conv1_x7, conv1_x2])
conv1_x8 = Reshape((1089,), name='conv1_x8')(conv1_x7)
conv1_x3_sym = conv1_sl1(conv1_x3)
conv1_x4_sym = conv1_sl2(conv1_x3_sym)
conv1_x6_sym = conv1_sl3(conv1_x4_sym)
conv1_x7_sym = conv1_sl4(conv1_x6_sym)
conv1_x11 = Subtract(name='conv1_x11')([conv1_x7_sym, conv1_x3])
conv1 = conv1_x8
conv1_sym = conv1_x11
# ISTA block #2
conv2_x1 = Lambda(lambda x: x - lambda_step * K.dot(x, K.constant(ptp)) + lambda_step * phi_tb, name='conv2_x1')(conv1)
conv2_x2 = Reshape((33, 33, 1), name='conv2_x2')(conv2_x1)
conv2_x3 = Conv2D(conv_size, [filter_size, filter_size], padding='SAME', use_bias=False, name='conv2_x3')(conv2_x2)
conv2_sl1 = Conv2D(conv_size, [filter_size, filter_size], padding='SAME', use_bias=False, activation='relu', name='conv2_sl1')
conv2_x4 = conv2_sl1(conv2_x3)
conv2_sl2 = Conv2D(conv_size, [filter_size, filter_size], padding='SAME', use_bias=False, name='conv2_sl2')
conv2_x44 = conv2_sl2(conv2_x4)
conv2_x5 = Multiply(name='conv2_x5')([Lambda(lambda x: K.sign(x))(conv2_x44), Lambda(lambda x: relu(x - soft_thr))(Lambda(lambda x: K.abs(x))(conv2_x44))])
conv2_sl3 = Conv2D(conv_size, [filter_size, filter_size], padding='SAME', use_bias=False, activation='relu', name='conv2_sl3')
conv2_x6 = conv2_sl3(conv2_x5)
conv2_sl4 = Conv2D(conv_size, [filter_size, filter_size], padding='SAME', use_bias=False, name='conv2_sl4')
conv2_x66 = conv2_sl4(conv2_x6)
conv2_x7 = Conv2D(1, [filter_size, filter_size], padding='SAME', use_bias=False, name='conv2_x7a')(conv2_x66)
conv2_x7 = Add(name='conv2_x7b')([conv2_x7, conv2_x2])
conv2_x8 = Reshape((1089,), name='conv2_x8')(conv2_x7)
conv2_x3_sym = conv2_sl1(conv2_x3)
conv2_x4_sym = conv2_sl2(conv2_x3_sym)
conv2_x6_sym = conv2_sl3(conv2_x4_sym)
conv2_x7_sym = conv2_sl4(conv2_x6_sym)
conv2_x11 = Subtract(name='conv2_x11')([conv2_x7_sym, conv2_x3])
conv2 = conv2_x8
conv2_sym = conv2_x11
# ISTA block #3
conv3_x1 = Lambda(lambda x: x - lambda_step * K.dot(x, K.constant(ptp)) + lambda_step * phi_tb, name='conv3_x1')(conv2)
conv3_x2 = Reshape((33, 33, 1), name='conv3_x2')(conv3_x1)
conv3_x3 = Conv2D(conv_size, [filter_size, filter_size], padding='SAME', use_bias=False, name='conv3_x3')(conv3_x2)
conv3_sl1 = Conv2D(conv_size, [filter_size, filter_size], padding='SAME', use_bias=False, activation='relu', name='conv3_sl1')
conv3_x4 = conv3_sl1(conv3_x3)
conv3_sl2 = Conv2D(conv_size, [filter_size, filter_size], padding='SAME', use_bias=False, name='conv3_sl2')
conv3_x44 = conv3_sl2(conv3_x4)
conv3_x5 = Multiply(name='conv3_x5')([Lambda(lambda x: K.sign(x))(conv3_x44), Lambda(lambda x: relu(x - soft_thr))(Lambda(lambda x: K.abs(x))(conv3_x44))])
conv3_sl3 = Conv2D(conv_size, [filter_size, filter_size], padding='SAME', use_bias=False, activation='relu', name='conv3_sl3')
conv3_x6 = conv3_sl3(conv3_x5)
conv3_sl4 = Conv2D(conv_size, [filter_size, filter_size], padding='SAME', use_bias=False, name='conv3_sl4')
conv3_x66 = conv3_sl4(conv3_x6)
conv3_x7 = Conv2D(1, [filter_size, filter_size], padding='SAME', use_bias=False, name='conv3_x7a')(conv3_x66)
conv3_x7 = Add(name='conv3_x7b')([conv3_x7, conv3_x2])
conv3_x8 = Reshape((1089,), name='conv3_x8')(conv3_x7)
conv3_x3_sym = conv3_sl1(conv3_x3)
conv3_x4_sym = conv3_sl2(conv3_x3_sym)
conv3_x6_sym = conv3_sl3(conv3_x4_sym)
conv3_x7_sym = conv3_sl4(conv3_x6_sym)
conv3_x11 = Subtract(name='conv3_x11')([conv3_x7_sym, conv3_x3])
conv3 = conv3_x8
conv3_sym = conv3_x11
# ISTA block #4
conv4_x1 = Lambda(lambda x: x - lambda_step * K.dot(x, K.constant(ptp)) + lambda_step * phi_tb, name='conv4_x1')(conv3)
conv4_x2 = Reshape((33, 33, 1), name='conv4_x2')(conv4_x1)
conv4_x3 = Conv2D(conv_size, [filter_size, filter_size], padding='SAME', use_bias=False, name='conv4_x3')(conv4_x2)
conv4_sl1 = Conv2D(conv_size, [filter_size, filter_size], padding='SAME', use_bias=False, activation='relu', name='conv4_sl1')
conv4_x4 = conv4_sl1(conv4_x3)
conv4_sl2 = Conv2D(conv_size, [filter_size, filter_size], padding='SAME', use_bias=False, name='conv4_sl2')
conv4_x44 = conv4_sl2(conv4_x4)
conv4_x5 = Multiply(name='conv4_x5')([Lambda(lambda x: K.sign(x))(conv4_x44), Lambda(lambda x: relu(x - soft_thr))(Lambda(lambda x: K.abs(x))(conv4_x44))])
conv4_sl3 = Conv2D(conv_size, [filter_size, filter_size], padding='SAME', use_bias=False, activation='relu', name='conv4_sl3')
conv4_x6 = conv4_sl3(conv4_x5)
conv4_sl4 = Conv2D(conv_size, [filter_size, filter_size], padding='SAME', use_bias=False, name='conv4_sl4')
conv4_x66 = conv4_sl4(conv4_x6)
conv4_x7 = Conv2D(1, [filter_size, filter_size], padding='SAME', use_bias=False, name='conv4_x7a')(conv4_x66)
conv4_x7 = Add(name='conv4_x7b')([conv4_x7, conv4_x2])
conv4_x8 = Reshape((1089,), name='conv4_x8')(conv4_x7)
conv4_x3_sym = conv4_sl1(conv4_x3)
conv4_x4_sym = conv4_sl2(conv4_x3_sym)
conv4_x6_sym = conv4_sl3(conv4_x4_sym)
conv4_x7_sym = conv4_sl4(conv4_x6_sym)
conv4_x11 = Subtract(name='conv4_x11')([conv4_x7_sym, conv4_x3])
conv4 = conv4_x8
conv4_sym = conv4_x11
# ISTA block #5
conv5_x1 = Lambda(lambda x: x - lambda_step * K.dot(x, K.constant(ptp)) + lambda_step * phi_tb, name='conv5_x1')(conv4)
conv5_x2 = Reshape((33, 33, 1), name='conv5_x2')(conv5_x1)
conv5_x3 = Conv2D(conv_size, [filter_size, filter_size], padding='SAME', use_bias=False, name='conv5_x3')(conv5_x2)
conv5_sl1 = Conv2D(conv_size, [filter_size, filter_size], padding='SAME', use_bias=False, activation='relu', name='conv5_sl1')
conv5_x4 = conv5_sl1(conv5_x3)
conv5_sl2 = Conv2D(conv_size, [filter_size, filter_size], padding='SAME', use_bias=False, name='conv5_sl2')
conv5_x44 = conv5_sl2(conv5_x4)
conv5_x5 = Multiply(name='conv5_x5')([Lambda(lambda x: K.sign(x))(conv5_x44), Lambda(lambda x: relu(x - soft_thr))(Lambda(lambda x: K.abs(x))(conv5_x44))])
conv5_sl3 = Conv2D(conv_size, [filter_size, filter_size], padding='SAME', use_bias=False, activation='relu', name='conv5_sl3')
conv5_x6 = conv5_sl3(conv5_x5)
conv5_sl4 = Conv2D(conv_size, [filter_size, filter_size], padding='SAME', use_bias=False, name='conv5_sl4')
conv5_x66 = conv5_sl4(conv5_x6)
conv5_x7 = Conv2D(1, [filter_size, filter_size], padding='SAME', use_bias=False, name='conv5_x7a')(conv5_x66)
conv5_x7 = Add(name='conv5_x7b')([conv5_x7, conv5_x2])
conv5_x8 = Reshape((1089,), name='conv5_x8')(conv5_x7)
conv5_x3_sym = conv5_sl1(conv5_x3)
conv5_x4_sym = conv5_sl2(conv5_x3_sym)
conv5_x6_sym = conv5_sl3(conv5_x4_sym)
conv5_x7_sym = conv5_sl4(conv5_x6_sym)
conv5_x11 = Subtract(name='conv5_x11')([conv5_x7_sym, conv5_x3])
conv5 = conv5_x8
conv5_sym = conv5_x11
# Defining the custom loss metric
def custom_loss(y_true, y_pred):
# Referred to in the paper as cost
cost1 = K.mean(K.square(y_pred[1] - y_pred[0]))
# Referred to in the paper as cost_sym
cost2 = K.mean(K.square(y_pred[2])) + K.mean(K.square(y_pred[3])) + K.mean(K.square(y_pred[4])) + K.mean(K.square(y_pred[5])) + K.mean(K.square(y_pred[6]))
# Referred to in the paper as cost_all
cost = cost1 + 0.01*cost2
return cost
### COMPILING THE MODEL
# Defining the inputs and outputs
model = Model(inputs=[inp, inp_labels], outputs=[conv5, conv1_sym, conv2_sym, conv3_sym, conv4_sym, conv5_sym])
# Display a model summary
model.summary()
# Define costs
cost1 = K.mean(K.square(conv5 - inp_labels))
cost2 = K.mean(K.square(conv1_sym)) + K.mean(K.square(conv2_sym)) + K.mean(K.square(conv3_sym)) + K.mean(K.square(conv4_sym)) + K.mean(K.square(conv5_sym))
cost = cost1 + 0.01*cost2
# Add custom loss
model.add_loss(K.mean(K.square(conv5 - inp_labels)) + 0.01 * K.mean(K.square(conv1_sym)) + K.mean(K.square(conv2_sym)) + K.mean(K.square(conv3_sym)) + K.mean(K.square(conv4_sym)) + K.mean(K.square(conv5_sym)))
# Compile the model
model.compile(optimizer=Adam(lr=0.0001), metrics=[cost, cost1, cost2])
# Define custom metrics to display
model.metrics_tensors.append(K.mean(K.square(conv5 - inp_labels)) + 0.01*K.mean(K.square(conv1_sym)) + K.mean(K.square(conv2_sym)) + K.mean(K.square(conv3_sym)) + K.mean(K.square(conv4_sym)) + K.mean(K.square(conv5_sym)))
model.metrics_names.append("cost")
model.metrics_tensors.append(K.mean(K.square(conv5 - inp_labels)))
model.metrics_names.append("cost1")
model.metrics_tensors.append(K.mean(K.square(conv1_sym)) + K.mean(K.square(conv2_sym)) + K.mean(K.square(conv3_sym)) + K.mean(K.square(conv4_sym)) + K.mean(K.square(conv5_sym)))
model.metrics_names.append("cost2")
### TRAINING THE MODEL
model.fit([x_inp, train_labels],
epochs = 300,
batch_size = 64)
| hansinahuja/ISTA-Net | ista_net.py | ista_net.py | py | 11,288 | python | en | code | 4 | github-code | 36 |
11538172081 | #!/usr/bin/python3
""" a module that queries API """
from requests import get
def top_ten(subreddit):
""" A function that queries the Reddit API
Args:
subreddit (str): the name of the subreddit
Returns:
str: print valid titles
"""
load = {'limit': 10}
headers = {'User-Agent': 'MyRedditScraper/1.0'}
url = f'https://www.reddit.com/r/{subreddit}/hot.json'
# Set a custom User-Agent to avoid API rate limiting
response = get(url, headers=headers, params=load, allow_redirects=False)
if response.status_code == 200:
data = response.json().get('data')
for val in data['children']:
print(val['data']['title'])
else:
print(None)
| Rashnotech/alx-system_engineering-devops | 0x16-api_advanced/1-top_ten.py | 1-top_ten.py | py | 737 | python | en | code | 0 | github-code | 36 |
5340116338 | from globals import *
DEFAULT_LATITUDE = 45.943161
DEFAULT_LONGITUDE = 24.96676
class _New_Hub(webocrat_Request):
def get(self):
self.show_form(False)
def post(self):
self.show_form(True)
@need_registered_user
def show_form(self, post = False):
errors = dict()
if post:
#posting form
# verify the data
#-- NAME CHECK --
post_name = self.request.get('hub-name')
if post_name == 'Name':
errors['name'] = "Please enter a valid name"
if len(errors)==0:
# DATA_OK:
newEgo = Ego()
newEgo.display_name = self.request.get('hub-name')
# newEgo.location =
newEgo.put()
newHub = Hub(ego = newEgo)
newHub.display_name = newEgo.display_name
newHub.put()
self.redirect("/hub."+str(newHub.key().id()))
template_vals={
'lat': self.request.get('lat', DEFAULT_LATITUDE),
'lng': self.request.get('lng', DEFAULT_LONGITUDE),
'errors': errors
}
self.render_simple_template('NewHubForm.django.html', template_vals)
def main():
application = webapp.WSGIApplication([('/new.hub', _New_Hub)],
debug=True)
util.run_wsgi_app(application)
if __name__ == '__main__':
main()
| webocrat/webocrat | py/new_hub.py | new_hub.py | py | 1,450 | python | en | code | 1 | github-code | 36 |
39763274152 | from django.conf.urls import url
from django.urls import path
from . import views
urlpatterns = [
url(r'^$', views.assignments, name='assignments'),
url(r'^addnewassignments/$', views.addnewassignments, name='addnewassignments'),
# url(r'^deleteassignments/$', views.deleteassignments, name='deleteassignments'),
url(r'^editassignments/$', views.editassignments, name='editassignments'),
path('da/', views.deleteassignments, name='da'),
path('allsubmissions/<assid>/', views.allsubmissions, name='allsubmissions'),
path('evaluate/<submissionid>/', views.evaluate, name='evaluate'),
path('submitgrade/<submissionid>/', views.submitgrade, name='submitgrade'),
path('signout/', views.signout, name='signout'),
] | hafeezurrahmansaleh/Daily-Lab-Assistance | assignments/urls.py | urls.py | py | 744 | python | en | code | 0 | github-code | 36 |
43859197331 | class Solution:
def twoSum(self, nums: List[int], target: int) -> List[int]:
res = []
for i in range(len(nums)):
b = target - nums[i]
for j in range(1, len(nums) - i):
if nums[i+j] == b:
res.append(i)
res.append(j+i)
return res | CocoKe98/LeetCode | 1. Two Sum.py | 1. Two Sum.py | py | 335 | python | en | code | 0 | github-code | 36 |
74833825384 | import sqlite3
import argparse
import logging
# Optional argument to use a listed database file. otherwise use vics.sqlite
# argparse with usage
# If no vics.sqlite3 then create it, and make the 'all' table.
parser = argparse.ArgumentParser()
parser.add_argument("-f", "--file", dest="db_file", help="Optional. Path to vics database, if you do not want to use vics.sqlite")
args = parser.parse_args()
if args.db_file:
db_file = args.db_file
else:
db_file = "vics.sqlite"
logging.info(f"database file: {db_file}")
table_creation_string = '''CREATE TABLE el_todo (date text, b64image text, sha text, tags text)'''
def create_new_database(sqlitedb_filename):
con = sqlite3.connect(sqlitedb_filename)
cur = con.cursor()
cur.close()
def sqlite_table_schema(conn, name):
"""Return a string representing the table's CREATE. via https://techoverflow.net/2019/10/14/how-to-get-schema-of-sqlite3-table-in-python/"""
con = sqlite3.connect(sqlitedb)
cur = con.cursor()
cursor = conn.execute("SELECT sql FROM sqlite_master WHERE name=?;", [name])
sql = cursor.fetchone()[0]
cursor.close()
return sql
def old_stuff_from_first_session():
try:
el_todo_schema = sqlite_table_schema(con, 'el_todo')
if table_creation_string != el_todo_schema:
schema_mismatch_error = f"schema mismatch. \n\nExpected: {table_creation_string}\nFound: {el_todo_schema}\n"
logging.critical(schema_mismatch_error)
exit(schema_mismatch_error)
except TypeError:
logging.info("Table 'el_todo' not found, creating.")
cur.execute(table_creation_string)
con.commit()
date = "2021-05-05"
b64image = "abcdefg1234"
sha = "123"
tags = "test baddata notanimage"
cur.execute("insert into el_todo values (?, ?, ?, ?)", (date, b64image, sha, tags))
con.commit()
con.close()
| fine-fiddle/vics | vics.py | vics.py | py | 1,888 | python | en | code | 0 | github-code | 36 |
36955901449 | import wttest
from helper_tiered import TieredConfigMixin, gen_tiered_storage_sources
from wtscenario import make_scenarios
# test_schema06.py
# Repeatedly create and drop indices
class test_schema06(TieredConfigMixin, wttest.WiredTigerTestCase):
"""
Test basic operations
"""
nentries = 1000
types = [
('normal', { 'type': 'normal', 'idx_config' : '' }),
('lsm', { 'type': 'lsm', 'idx_config' : ',type=lsm' }),
]
tiered_storage_sources = gen_tiered_storage_sources()
scenarios = make_scenarios(tiered_storage_sources, types)
def flip(self, inum, val):
"""
Defines a unique transformation of values for each index number.
We reverse digits so the generated values are not perfectly ordered.
"""
newval = str((inum + 1) * val)
return newval[::-1] # reversed digits
def unflip(self, inum, flipped):
"""
The inverse of flip.
"""
newval = flipped[::-1]
return int(newval)/(inum + 1)
def create_index(self, inum):
colname = "s" + str(inum)
self.session.create("index:schema06:" + colname,
"columns=(" + colname + ")" + self.idx_config)
def drop_index(self, inum):
colname = "s" + str(inum)
self.dropUntilSuccess(self.session, "index:schema06:" + colname)
def test_index_stress(self):
if self.is_tiered_scenario() and self.type == 'lsm':
self.skipTest('Tiered storage does not support LSM URIs.')
self.session.create("table:schema06",
"key_format=S,value_format=SSSSSS," +
"columns=(key,s0,s1,s2,s3,s4,s5),colgroups=(c1,c2)")
self.create_index(0)
self.session.create("colgroup:schema06:c1", "columns=(s0,s1,s4)")
self.create_index(1)
self.session.create("colgroup:schema06:c2", "columns=(s2,s3,s5)")
cursor = self.session.open_cursor('table:schema06', None, None)
for i in range(0, self.nentries):
cursor.set_key(self.flip(0, i))
values = [self.flip(inum, i) for inum in range(6)]
cursor.set_value(values[0],values[1],values[2],
values[3],values[4],values[5])
cursor.insert()
cursor.close()
self.drop_index(0)
self.drop_index(1)
def check_entries(self, check_indices):
cursor = self.session.open_cursor('table:main', None, None)
# spot check via search
n = self.nentries
for i in (n // 5, 0, n - 1, n - 2, 1):
cursor.set_key(i, 'key' + str(i))
square = i * i
cube = square * i
cursor.search()
(s1, i2, s3, i4) = cursor.get_values()
self.assertEqual(s1, 'val' + str(square))
self.assertEqual(i2, square)
self.assertEqual(s3, 'val' + str(cube))
self.assertEqual(i4, cube)
count = 0
# then check all via cursor
cursor.reset()
for ikey, skey, s1, i2, s3, i4 in cursor:
i = ikey
square = i * i
cube = square * i
self.assertEqual(ikey, i)
self.assertEqual(skey, 'key' + str(i))
self.assertEqual(s1, 'val' + str(square))
self.assertEqual(i2, square)
self.assertEqual(s3, 'val' + str(cube))
self.assertEqual(i4, cube)
count += 1
cursor.close()
self.assertEqual(count, n)
if check_indices:
# we check an index that was created before populating
cursor = self.session.open_cursor('index:main:S1i4', None, None)
count = 0
for s1key, i4key, s1, i2, s3, i4 in cursor:
i = int(i4key ** (1 // 3.0) + 0.0001) # cuberoot
self.assertEqual(s1key, s1)
self.assertEqual(i4key, i4)
ikey = i
skey = 'key' + str(i)
square = i * i
cube = square * i
self.assertEqual(ikey, i)
self.assertEqual(skey, 'key' + str(i))
self.assertEqual(s1, 'val' + str(square))
self.assertEqual(i2, square)
self.assertEqual(s3, 'val' + str(cube))
self.assertEqual(i4, cube)
count += 1
cursor.close()
self.assertEqual(count, n)
# we check an index that was created after populating
cursor = self.session.open_cursor('index:main:i2S1i4', None, None)
count = 0
for i2key, s1key, i4key, s1, i2, s3, i4 in cursor:
i = int(i4key ** (1 // 3.0) + 0.0001) # cuberoot
self.assertEqual(i2key, i2)
self.assertEqual(s1key, s1)
self.assertEqual(i4key, i4)
ikey = i
skey = 'key' + str(i)
square = i * i
cube = square * i
self.assertEqual(ikey, i)
self.assertEqual(skey, 'key' + str(i))
self.assertEqual(s1, 'val' + str(square))
self.assertEqual(i2, square)
self.assertEqual(s3, 'val' + str(cube))
self.assertEqual(i4, cube)
count += 1
cursor.close()
self.assertEqual(count, n)
if __name__ == '__main__':
wttest.run()
| mongodb/mongo | src/third_party/wiredtiger/test/suite/test_schema06.py | test_schema06.py | py | 5,452 | python | en | code | 24,670 | github-code | 36 |
38801618139 | from transformers import pipeline
# classifier = pipeline('sentiment-analysis')
# res = classifier(
# 'We are not very happy to introduce pipeline to the transformers repository.')
pipe = pipeline('question-answering')
res = pipe({
'question': 'What is the name of the repository ?',
'context': 'Pipeline have been included in the huggingface/transformers repository'
})
print(res)
| taterboom/simple-tts | index.py | index.py | py | 397 | python | en | code | 0 | github-code | 36 |
7690180947 | def method1(X, Y):
m = len(X)
n = len(Y)
L = [[None] * (n + 1) for i in range(m + 1)]
for i in range(m + 1):
for j in range(n + 1):
if i == 0 or j == 0:
L[i][j] = 0
elif X[i - 1] == Y[j - 1]:
L[i][j] = L[i - 1][j - 1] + 1
else:
L[i][j] = max(L[i - 1][j], L[i][j - 1])
return L[m][n]
def method2(X, Y, m, n):
if m == 0 or n == 0:
return 0
elif X[m - 1] == Y[n - 1]:
return 1 + method2(X, Y, m - 1, n - 1)
else:
return max(method2(X, Y, m, n - 1), method2(X, Y, m - 1, n))
if __name__ == "__main__":
"""
from timeit import timeit
X = "AGGTAB"
Y = "GXTXAYB"
print(timeit(lambda: method1(X, Y), number=10000)) # 0.14817858800233807
print(
timeit(lambda: method2(X, Y, len(X), len(Y)), number=10000)
) # 0.5299446069984697
""" | thisisshub/DSA | T_dynamic_programming/problems/A_longest_common_subsequence.py | A_longest_common_subsequence.py | py | 921 | python | en | code | 71 | github-code | 36 |
16509838314 | import netmanthan
from netmanthan.model.document import Document
from netmanthan.query_builder import Interval
from netmanthan.query_builder.functions import Now
class ErrorSnapshot(Document):
no_feed_on_delete = True
def onload(self):
if not self.parent_error_snapshot:
self.db_set("seen", 1, update_modified=False)
for relapsed in netmanthan.get_all("Error Snapshot", filters={"parent_error_snapshot": self.name}):
netmanthan.db.set_value("Error Snapshot", relapsed.name, "seen", 1, update_modified=False)
netmanthan.local.flags.commit = True
def validate(self):
parent = netmanthan.get_all(
"Error Snapshot",
filters={"evalue": self.evalue, "parent_error_snapshot": ""},
fields=["name", "relapses", "seen"],
limit_page_length=1,
)
if parent:
parent = parent[0]
self.update({"parent_error_snapshot": parent["name"]})
netmanthan.db.set_value("Error Snapshot", parent["name"], "relapses", parent["relapses"] + 1)
if parent["seen"]:
netmanthan.db.set_value("Error Snapshot", parent["name"], "seen", 0)
@staticmethod
def clear_old_logs(days=30):
table = netmanthan.qb.DocType("Error Snapshot")
netmanthan.db.delete(table, filters=(table.modified < (Now() - Interval(days=days))))
| netmanthan/Netmanthan | netmanthan/core/doctype/error_snapshot/error_snapshot.py | error_snapshot.py | py | 1,244 | python | en | code | 0 | github-code | 36 |
74863821545 | import json
import unittest
from api.tests.base import BaseTestCase
class TestSimulationsService(BaseTestCase):
""" Tests for the Simulation Service """
def test_simulations(self):
""" Ensure the /ping route behaves correctly. """
response = self.client.get("/simulations/ping")
data = json.loads(response.data.decode())
self.assertEqual(response.status_code, 200)
self.assertIn("pong!", data["message"])
self.assertIn("success", data["status"])
if __name__ == "__main__":
unittest.main()
| door2door-io/mi-code-challenge | backend/api/tests/test_simulations.py | test_simulations.py | py | 555 | python | en | code | 0 | github-code | 36 |
37986564283 | import sys
import scipy
from scipy import io
from scipy.io import wavfile
def getVolume(sound):
value = 0
for sample in sound:
value += abs(sample)
print(value)
def main():
file = sys.argv[1]
print(file)
sampling_rate, sound = scipy.io.wavfile.read(file)
getVolume(sound)
if __name__ == '__main__':
main() | emilymacq/Project-Clear-Lungs | ARCHIVE/Python files/TestTemplate.py | TestTemplate.py | py | 349 | python | en | code | 2 | github-code | 36 |
22542096057 | #!/usr/bin/env python
# coding: utf-8
# In[ ]:
#Answer no 1
n = int(input())
divBy7 = [i for i in range(0,n) if (i % 7 == 0)]
print(divBy7)
def divCheck(n):
for i in range(n):
if i % 7 == 0:
value = True
else:
value = False
print(i,value)
divCheck(n)
# In[ ]:
#Answer no 2
import operator
text_line = input("Type in:")
freq = {}
for i in text_line.split(' '):
if i.isalpha():
if i not in freq:
freq[i] = 1
elif i in freq:
freq[i] = freq[i]+1
else:
pass
sorted_freq = sorted(freqitems(), key = operator.itemgetter(0))
print(sorted_freq)
for i in sorted-freq:
print(i[0],i[1])
# In[4]:
#Answer no 3
class person:
gender = "Gaurav"
def getGender(self):
print("Hi! i am %s"%self.gender)
class Male(person):
gender = "male"
class Female(person):
gender = "female"
a = Male()
b = Female()
a.getGender()
b.getGender()
# In[5]:
#Answer no 4
subjects = ["I","You"]
verbs = ["Play","Love"]
objects = ["Hockey","Football"]
for i in range(len(subjects)):
for j in range(len(verbs)):
for k in range(len(objects)):
sentence = "%s %s %s."%(subjects[i],verbs[j],objects[k])
print(sentence)
# In[9]:
#Answer no 5
import zlib
a = "this string needs compressing"
a = zlib.compress(a.encode())
print(zlib.decompress(a))
# In[10]:
#Answer no 6
import math
def binarySearch(li,ele):
lowest = 0
highest = len(li)-1
index = -1
while highest>+lowest and index == -1:
mid = int(math.floor((highest+lowest)/2.0))
if li[mid]==ele:
index = mid
elif li[mid]<ele:
highest = mid-1
else:
lowest = mid+1
return index
sortedList = [2,5,7,9,11,17,222]
print(binarySearch(sortedList,11))
# In[ ]:
| Gaurav262701/Assgnmnt-no-14 | Assgnmnt_No14.py | Assgnmnt_No14.py | py | 1,912 | python | en | code | 0 | github-code | 36 |
822226274 | #!/usr/bin/env python
# coding: utf-8
# import all packages
from nilearn.connectome import ConnectivityMeasure
from nilearn.input_data import NiftiLabelsMasker
from load_confounds import Scrubbing
from nilearn import datasets
from os.path import join
import nibabel as nib
import numpy as np
import shutil
import os
# intialize the layout to retrieve the data
path = '/path/to/fmriprep/'
file_name = 'task-rest_space-MNI152NLin2009cAsym_desc-preproc_bold'
subjects = ['01', '02', '03', '04', '05', '06', '07', '08', '09', '10', '11', '12', '13', '14', '15','16','17', '18']
condition = ['control', 'deaf']
task = 'func'
ext = 'nii.gz'
# variables attribution
conn_measure = ConnectivityMeasure(kind='correlation', vectorize=True, discard_diagonal=True)
all_features = {'condition':[], 'subject':[], 'connectomes':[]} # where all the features are stored
schaefer_atlas = datasets.fetch_atlas_schaefer_2018(n_rois=100) # load the atlas
files_nii = []
for sub in subjects:
for cond in condition:
filename = f'sub-{cond}{sub}/{task}/sub-{cond}{sub}_{file_name}.{ext}'
sub_func = os.path.join(path, filename)
# print (sub_func) to keep track of the loop
if os.path.isfile(sub_func): # verify if path exist
img_load = nib.load(sub_func)
files_nii=np.append(files_nii, img_load)
confounds = Scrubbing().load(sub_func)
# initialize the masker
masker = NiftiLabelsMasker(labels_img=schaefer_atlas.maps, t_r=2.2, standardize=True,
verbose= 0)
masked_data = masker.fit(img_load)
timeseries = masker.transform(img_load, confounds=confounds)
correlation_matrix = conn_measure.fit_transform([timeseries])[0]
# add each subject caracteristics to a container
all_features['condition'].append(cond)
all_features['subject'].append(sub)
all_features['connectomes'].append(correlation_matrix)
np.savez_compressed('schaefern100_features', cond = all_features['condition'], sub = all_features['subject'],
conn = all_features['connectomes'])
original = r'/path/to/save/schaefern100_features.npz'
target = r'/new/path/to/save/'
shutil.move(original,target) # change the path of the saved data
| PSY6983-2021/clandry_project | codes/data_prep.py | data_prep.py | py | 2,355 | python | en | code | 0 | github-code | 36 |
7853730185 | class Sorter:
def bubblesort(self, array):
l = len(array)
i = l - 1
while i > 0:
for j in range(i):
print(array)
if array[j] > array[j+1]:
array[j], array[j+1] = array[j+1], array[j]
i = i - 1
def quicksort(self,array):
pivotlist = []
less = []
more = []
l = len(array)
if l <= 1:
return array
pivot = array[0]
for e in array:
if e < pivot:
less.append(e)
elif e > pivot:
more.append(e)
else:
pivotlist.append(e)
return self.quicksort(less) + pivotlist + self.quicksort(more)
s = Sorter()
li = [9,8,7,6,5,4,3,2,1]
# soort = s.quicksort
def pop_sort(array):
for i in range(len(array)-1):
for j in range(len(array)-i-1):
if array[j] > array[j+1]:
array[j],array[j+1] =array[j+1],array[j]
return array
pop_sort(li)
print(li) | midasevil/Babel | Sorter.py | Sorter.py | py | 1,068 | python | en | code | 0 | github-code | 36 |
2894241909 | import sys
from src.dialog.common.Dialog import Dialog
from src.dialog.common.DialogContainer import DialogContainer
from src.dialog.common.DialogFactory import DialogFactory
from src.dialog.common.form_doc import FormDocContainer
from src.dialog.common.manage_entity import ManageEntityContainer
from src.dialog.common.manage_entity.ManageEntityDialogMode import ManageEntityDialogMode
from src.dialog.common.table.TableFuncs import TableFuncs
from src.dialog.common.table.data.Table import Table
from src.dialog.common.table.data.TableFactory import TableFactory
from src.session.common.Session import Session
from src.storage.common.entity import EntityStorage
class TableContainer(DialogContainer, TableFuncs):
def __init__(
self,
manage_entity_container: ManageEntityContainer,
form_doc_container: FormDocContainer,
session: Session,
entity_storage: EntityStorage,
dialog_factory: DialogFactory,
table_factory: TableFactory
):
super().__init__(dialog_factory)
self.__manage_entity_container = manage_entity_container
self.__form_doc_container = form_doc_container
self.__session = session
self.__entity_storage = entity_storage
self.__table_factory = table_factory
self.__manage_entity_container.set_parent_container(self)
self.__form_doc_container.set_parent_container(self)
def create_dialog(self) -> Dialog:
return self.dialog_factory.create_table_dialog(self)
def form_doc(self, key: str):
self.__session.set_form_doc_entity_key(key)
self.__form_doc_container.show_dialog()
def create_entity(self):
self.__session.set_manage_entity_mode(ManageEntityDialogMode.CREATE)
self.__manage_entity_container.show_dialog()
def edit_entity(self, key: str):
self.__session.set_manage_entity_mode(ManageEntityDialogMode.EDIT)
self.__session.set_edit_entity_id(key)
self.__manage_entity_container.show_dialog()
def delete_entity(self, key):
self.__entity_storage.remove_entity(key)
self.dialog.draw_table()
def duplicate_entity(self, key):
self.__entity_storage.duplicate(key)
self.dialog.draw_table()
def get_table_data(self) -> Table:
return self.__table_factory.create(self.__entity_storage.get_all_entities())
# Перерисуем таблицу, когда закрывается диалог редактирования сущности
def child_unfocused(self):
super().child_unfocused()
self.dialog.draw_table()
| andreyzaytsev21/MasterDAPv2 | src/dialog/common/table/TableContainer.py | TableContainer.py | py | 2,644 | python | en | code | 0 | github-code | 36 |
25422749814 | from util import get_history_identifier, get_user_identifier, calculate_num_tokens, calculate_num_tokens_by_prompt, say_ts, check_availability
from typing import List, Dict
class GPT_4_CommandExecutor():
"""GPT-4を使って会話をするコマンドの実行クラス"""
MAX_TOKEN_SIZE = 8192 # トークンの最大サイズ
COMPLETION_MAX_TOKEN_SIZE = 2048 # ChatCompletionの出力の最大トークンサイズ
INPUT_MAX_TOKEN_SIZE = MAX_TOKEN_SIZE - COMPLETION_MAX_TOKEN_SIZE # ChatCompletionの入力に使うトークンサイズ
def __init__(self, openai):
self.history_dict : Dict[str, List[Dict[str, str]]] = {}
self.openai = openai
def execute(self, client, message, say, context, logger):
"""GPT-4を使って会話をするコマンドの実行メソッド"""
using_team = message["team"]
using_channel = message["channel"]
history_idetifier = get_history_identifier(
using_team, using_channel, message["user"])
user_identifier = get_user_identifier(using_team, message["user"])
prompt = context["matches"][0]
# ヒストリーを取得
history_array: List[Dict[str, str]] = []
if history_idetifier in self.history_dict.keys():
history_array = self.history_dict[history_idetifier]
history_array.append({"role": "user", "content": prompt})
# トークンのサイズがINPUT_MAX_TOKEN_SIZEを超えたら古いものを削除
while calculate_num_tokens(history_array) > self.INPUT_MAX_TOKEN_SIZE:
history_array = history_array[1:]
# 単一の発言でMAX_TOKEN_SIZEを超えたら、対応できない
if(len(history_array) == 0):
messege_out_of_token_size = f"発言内容のトークン数が{self.INPUT_MAX_TOKEN_SIZE}を超えて、{calculate_num_tokens_by_prompt(prompt)}であったため、対応できませんでした。"
say_ts(client, message, messege_out_of_token_size)
logger.info(messege_out_of_token_size)
return
say_ts(client, message, f"GPT-4で <@{message['user']}> さんの以下の発言に対応中(履歴数: {len(history_array)} 、トークン数: {calculate_num_tokens(history_array)})\n```\n{prompt}\n```")
# ChatCompletionを呼び出す
logger.info(f"user: {message['user']}, prompt: {prompt}")
response = self.openai.ChatCompletion.create(
model="gpt-4",
messages=history_array,
top_p=1,
n=1,
max_tokens=self.COMPLETION_MAX_TOKEN_SIZE,
temperature=1, # 生成する応答の多様性
presence_penalty=0,
frequency_penalty=0,
logit_bias={},
user=user_identifier
)
logger.debug(response)
# ヒストリーを新たに追加
new_response_message = response["choices"][0]["message"]
history_array.append(new_response_message)
# トークンのサイズがINPUT_MAX_TOKEN_SIZEを超えたら古いものを削除
while calculate_num_tokens(history_array) > self.INPUT_MAX_TOKEN_SIZE:
history_array = history_array[1:]
self.history_dict[history_idetifier] = history_array # ヒストリーを更新
say_ts(client, message, new_response_message["content"])
logger.info(f"user: {message['user']}, content: {new_response_message['content']}")
def execute_reset(self, client, message, say, context, logger):
"""GPT-4を使って会話履歴のリセットをするコマンドの実行メソッド"""
using_team = message["team"]
using_channel = message["channel"]
historyIdetifier = get_history_identifier(
using_team, using_channel, message["user"])
# 履歴をリセットをする
self.history_dict[historyIdetifier] = []
logger.info(f"GPT-4の <@{message['user']}> さんの <#{using_channel}> での会話の履歴をリセットしました。")
say_ts(client, message, f"GPT-4の <@{message['user']}> さんの <#{using_channel}> での会話の履歴をリセットしました。")
| sifue/chatgpt-slackbot | opt/gpt_4.py | gpt_4.py | py | 4,222 | python | ja | code | 54 | github-code | 36 |
12423007297 | class Carro:
def __init__(self,request):
self.request = request # Guardamos la peticion
self.session = request.session # guardamos la sesion
carro = self.session.get("carro") # igualamos la sesion del carro con la del usuario
if not carro: # Si no hay carro en la sesion
carro =self.session["carro"]={} #Aca guardamos los productos
else:
self.carro = carro # El carro es igual al carro que ya habia para agregar o quitar productos que tenias antes de irte de la pagina
def agregar(self,producto):
if str(producto.id) not in self.carro.keys():
self.carro[producto.id] = {
"producto_id":producto_id,
"nombre":producto.nombre,
"precio": str(producto.precio),
"cantidad":1,
"imagen":producto.imagen.url
}
else:
# Si el articulo ya estaba le incrementamos a uno
for i,j in self.carro.items():
if key == (str(producto.id)):
value["cantidad"] = value["cantidad"]+1
break
#Actualizamos el carro
self.guardar_carro()
def guardar_carro(self):
self.session["carro"] = self.carro # actualizamos el carro
self.session.modified = True # Se modifico la session
def eliminar(self,producto):
producto.id = str(producto)
if producto.id in self.carro:
del self.carro[producto.id]
#Actualizamos el carro
self.guardar_carro()
def restar_producto(self,producto):
# Si el articulo ya estaba le incrementamos a uno
for i,j in self.carro.items():
if key == (str(producto.id)):
value["cantidad"] = value["cantidad"]-1
if value["cantidad"] < 1: # Si la cantidad de productos es menor a 1 , se elimina el producto del carro
self.eliminar(producto)
break
#Actualizamos el carro
self.guardar_carro()
def limpiar_carro(self):
self.session["carro"]={}
self.session.modified = True # Se modifico la session ya que se limpio el carro
| Rojas-Andres/proyecto-web-django | carro/carro.py | carro.py | py | 2,220 | python | es | code | 0 | github-code | 36 |
6395344004 | import operator
from intersection import Movement, Phase
from agent import Agent
class Demand_Agent(Agent):
"""
The class defining an agent which controls the traffic lights using the demand based approach
always prioritizing the phase with the biggest demand
"""
def __init__(self, eng, ID=''):
super().__init__(eng, ID)
def act(self, lanes_count):
"""
selects phase with biggest demand
:param lanes_count: a dictionary with lane ids as keys and vehicle count as values
"""
phases_priority = {}
for phase in self.phases.values():
priority = 0
for moveID in phase.movements:
priority += self.movements[moveID].get_demand(lanes_count)
phases_priority.update({phase.ID : priority})
return self.phases[max(phases_priority.items(), key=operator.itemgetter(1))[0]]
| mbkorecki/rl_traffic | src/demand_agent.py | demand_agent.py | py | 912 | python | en | code | 1 | github-code | 36 |
13784383950 | import pynmea2, serial, os, time, sys, glob, datetime
def logfilename():
now = datetime.datetime.now()
return 'datalog.nmea'
#return '/home/pi/Desktop/PiCameraApp/Source/datalog.nmea'
'''
return 'NMEA_%0.4d-%0.2d-%0.2d_%0.2d-%0.2d-%0.2d.nmea' % \
(now.year, now.month, now.day,
now.hour, now.minute, now.second)'''
try:
while True:
ports = ['/dev/serial0']
if len(ports) == 0:
sys.stderr.write('No ports found, waiting 10 seconds...press Ctrl-C to quit...\n')
time.sleep(10)
continue
for port in ports:
# try to open serial port
sys.stderr.write('Trying port %s\n' % port)
try:
# try to read a line of data from the serial port and parse
with serial.Serial(port, 9600, timeout=1) as ser:
# 'warm up' with reading some input
for i in range(10):
ser.readline()
# try to parse (will throw an exception if input is not valid NMEA)
pynmea2.parse(ser.readline().decode('ascii', errors='replace'))
# log data
outfname = logfilename()
sys.stderr.write('Logging data on %s to %s\n' % (port, outfname))
with open(outfname, 'wb') as f:
# loop will exit with Ctrl-C, which raises a
# KeyboardInterrupt
while True:
line = ser.readline()
#line = str(line.decode('ascii', errors='replace').strip())
n = len(line)
if(line[0:6] == "$GNGGA"):
if(len(line) < 45):
## ADD ANYTHING YOU WANT TO DO WHEN FIX IS LOST ##
print('FIX LOST, STOP PHOTOS')
print(line)
f.write(line)
except Exception as e:
sys.stderr.write('Error reading serial port %s: %s\n' % (type(e).__name__, e))
sys.exit()
except KeyboardInterrupt as e:
#sys.stderr.write('Ctrl-C pressed, exiting log of %s to %s\n' % (port, outfname))
sys.exit()
sys.stderr.write('Scanned all ports, waiting 10 seconds...press Ctrl-C to quit...\n')
time.sleep(10)
except KeyboardInterrupt:
sys.stderr.write('Ctrl-C pressed, exiting port scanner\n')
| Keshavkant/RpiGeotaggedImages | GeoLogger.py | GeoLogger.py | py | 2,636 | python | en | code | 0 | github-code | 36 |
35842604532 | from django.urls import path
from CafeStar import views
app_name = 'CafeStar'
urlpatterns = [
path('', views.homePage, name='home_page'),
path('homePage', views.homePage, name='home_page'),
path('drinkDetail', views.drinkDetail, name='drink_detail'),
path('drinks', views.drinks, name='drinks'),
path('order', views.order, name='order'),
path('orderPricePoint', views.OrderInformationView.as_view(), name='order_price_point'),
path('login', views.newLogin, name='login'),
path('register', views.register, name='register'),
path('logout', views.logout, name='logout'),
path('edit', views.userProfile, name='edit'),
path('orderList', views.orderList, name='order_list'),
path('shopStatus', views.status, name='shop_status'),
path('drinksModify', views.drinksModify, name='drinks_modify'),
]
| zhengx-2000/CafeStar | CafeStar/urls.py | urls.py | py | 843 | python | en | code | 1 | github-code | 36 |
39060336799 | from obspy import read
from numpy import genfromtxt,sin,cos,deg2rad,array,c_
from matplotlib import pyplot as plt
n=read(u'/Users/dmelgar/kestrel/BRIC/BRIC.BK/BYN.00.D/BRIC.BK.BYN.00.D.2016.232')
e=read(u'/Users/dmelgar/kestrel/BRIC/BRIC.BK/BYE.00.D/BRIC.BK.BYE.00.D.2016.232')
z=read(u'/Users/dmelgar/kestrel/BRIC/BRIC.BK/BYZ.00.D/BRIC.BK.BYZ.00.D.2016.232')
n[0].data=n[0].data*100e-6
e[0].data=e[0].data*100e-6
z[0].data=z[0].data*100e-6
yl=[-0.08,0.08]
#sopac
g=genfromtxt('/Users/dmelgar/Downloads/pos_brib_57620_00')
x1=g[:,2]-g[0,2]
y1=g[:,3]-g[0,3]
z1=g[:,4]-g[0,4]
x2=g[:,8]-g[0,8]
y2=g[:,9]-g[0,9]
z2=g[:,10]-g[0,10]
#Rotate to local NEU
lat=deg2rad(37.91940521)
lon=deg2rad(-122.15255493)
R=array([[-sin(lat)*cos(lon),-sin(lat)*sin(lon),cos(lat)],[-sin(lon),cos(lon),0],[cos(lon)*cos(lat),cos(lat)*sin(lon),sin(lat)]])
scripps1=R.dot(c_[x1,y1,z1].T).T
scripps2=R.dot(c_[x2,y2,z2].T).T
plt.subplot(311)
plt.plot(n[0].times(),n[0].data,'k')
plt.plot(scripps1[:,0],c='#1E90FF')
plt.plot(scripps2[:,0],c='#DC143C')
plt.xlim([0,len(y1)])
plt.ylabel('North (m)')
plt.legend(['Kestrel RTX','Scripps 1','Scripps 2'])
plt.ylim(yl)
plt.subplot(312)
plt.plot(e[0].times(),e[0].data,'k')
plt.plot(scripps1[:,1],c='#1E90FF')
plt.plot(scripps2[:,1],c='#DC143C')
plt.xlim([0,len(x1)])
plt.ylabel('East (m)')
plt.ylim(yl)
plt.subplot(313)
plt.plot(z[0].times(),z[0].data,'k')
plt.plot(scripps1[:,2],c='#1E90FF')
plt.plot(scripps2[:,2],c='#DC143C')
plt.xlim([0,len(y1)])
plt.ylabel('Up (m)')
plt.xlabel('Seconds')
plt.ylim(yl)
plt.show()
| Ogweno/mylife | kestrel/plot_data.py | plot_data.py | py | 1,542 | python | en | code | 0 | github-code | 36 |
43891326692 | from math import factorial
n = float(input('Digite um número qualquer para ver seu fatorial: '))
print(factorial(n))
continua = str(input('Quer continuar? [S/N] ')).upper()
while continua == 'S':
n = float(input('Digite outro número: '))
print(factorial(n))
continua = str(input('Quer continuar? [S/N] ')).upper()
if continua == 'N':
exit('Obrigado')
elif continua != 'N' or 'S':
print('Opção inválida')
else:
print('Obrigado')
| Kaue-Romero/Python_Repository | Exercícios/exerc_60.py | exerc_60.py | py | 457 | python | pt | code | 0 | github-code | 36 |
28891628391 | """Tests for traces.traces."""
import ast
import collections
import sys
import textwrap
from pytype import config
from pytype.pytd import pytd
from pytype.pytd import pytd_utils
from pytype.tests import test_utils
from pytype.tools.traces import traces
import unittest
_PYVER = sys.version_info[:2]
_BINMOD_OP = "BINARY_OP" if _PYVER >= (3, 11) else "BINARY_MODULO"
_CALLFUNC_OP = "CALL" if _PYVER >= (3, 11) else "CALL_FUNCTION"
_CALLMETH_OP = "CALL" if _PYVER >= (3, 11) else "CALL_METHOD"
_FORMAT_OP = "FORMAT_VALUE" if _PYVER >= (3, 11) else "BINARY_MODULO"
class _NotImplementedVisitor(traces.MatchAstVisitor):
def visit_Module(self, node):
self.match(node)
class _TestVisitor(traces.MatchAstVisitor):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.traces_by_node_type = collections.defaultdict(list)
def generic_visit(self, node):
try:
matches = self.match(node)
except NotImplementedError:
return
self.traces_by_node_type[node.__class__].extend(matches)
class TraceTest(unittest.TestCase):
"""Tests for traces.trace."""
def test_traces(self):
src = traces.trace("")
trace, = src.traces[0 if _PYVER >= (3, 11) else 1]
self.assertEqual(trace.op, "LOAD_CONST")
self.assertIsNone(trace.symbol)
pyval, = trace.types
self.assertEqual(pyval.name, "builtins.NoneType")
self.assertEqual(pyval.cls.name, "builtins.NoneType")
def test_options(self):
src = traces.trace("", config.Options.create("rumpelstiltskin"))
self.assertEqual(src.filename, "rumpelstiltskin")
def test_external_type(self):
with test_utils.Tempdir() as d:
pyi_path = d.create_file("foo.pyi", "class Foo: ...")
imports_info = d.create_file("imports_info", f"foo {pyi_path}")
src = traces.trace(
"import foo\nx = foo.Foo()",
config.Options.create(imports_map=imports_info))
trace, = (x for x in src.traces[2] if x.op == "STORE_NAME")
pyval, = trace.types
self.assertEqual(pyval.name, "foo.Foo")
self.assertEqual(pyval.cls.name, "foo.Foo")
def test_py3_class(self):
src = traces.trace(textwrap.dedent("""
class Foo:
pass
""").lstrip())
trace, = (x for x in src.traces[1] if x.op == "LOAD_BUILD_CLASS")
pyval, = trace.types
self.assertEqual(pyval.name, "typing.Callable")
def test_unknown(self):
# pytype represents unannotated function parameters as unknowns. Make sure
# unknowns don't appear in the traced types.
src = traces.trace("def f(x): return x")
trace = next(x for x in src.traces[1] if x.op == "LOAD_FAST")
pyval, = trace.types
self.assertIsInstance(pyval, pytd.AnythingType)
class MatchAstTestCase(unittest.TestCase):
"""Base class for testing traces.MatchAstVisitor."""
def _parse(self, text, options=None):
text = textwrap.dedent(text).lstrip()
return ast.parse(text), traces.trace(text, options)
def _get_traces(self, text, node_type, options=None):
module, src = self._parse(text, options)
v = _TestVisitor(src, ast)
v.visit(module)
return v.traces_by_node_type[node_type]
def assertTracesEqual(self, actual_traces, expected_traces):
self.assertEqual(len(actual_traces), len(expected_traces))
for trace, expected_trace in zip(actual_traces, expected_traces):
loc, trace = trace
expected_loc, expected_op, expected_symbol, expected_annots = (
expected_trace)
self.assertEqual(loc, expected_loc)
self.assertEqual(trace.op, expected_op)
self.assertEqual(trace.symbol, expected_symbol)
self.assertEqual(len(trace.types), len(expected_annots))
for t, annot in zip(trace.types, expected_annots):
self.assertEqual(pytd_utils.Print(t), annot)
class MatchAstVisitorTest(MatchAstTestCase):
"""Tests for traces.MatchAstVisitor."""
def test_not_implemented(self):
module, src = self._parse("")
v = _NotImplementedVisitor(src, ast)
with self.assertRaises(NotImplementedError):
v.visit(module)
def test_import(self):
matches = self._get_traces("import os, sys as tzt", ast.Import)
self.assertTracesEqual(matches, [
((1, 7), "IMPORT_NAME", "os", ("module",)),
((1, 18), "STORE_NAME", "tzt", ("module",))])
def test_import_from(self):
matches = self._get_traces(
"from os import path as p, environ", ast.ImportFrom)
self.assertTracesEqual(matches, [
((1, 23), "STORE_NAME", "p", ("module",)),
((1, 26), "STORE_NAME", "environ", ("os._Environ[str]",))])
class MatchAttributeTest(MatchAstTestCase):
"""Tests for traces.MatchAstVisit.match_Attribute."""
def test_basic(self):
matches = self._get_traces("""
x = 0
print(x.real)
""", ast.Attribute)
self.assertTracesEqual(matches, [
((2, 8), "LOAD_ATTR", "real", ("int", "int"))])
def test_multi(self):
matches = self._get_traces("""
class Foo:
real = True
x = 0
(Foo.real, x.real)
""", ast.Attribute)
# The second attribute is at the wrong location due to limitations of
# source.Code.get_attr_location(), but we can at least test that we get the
# right number of traces with the right types.
self.assertTracesEqual(matches, [
((4, 5), "LOAD_ATTR", "real", ("Type[Foo]", "bool")),
((4, 5), "LOAD_ATTR", "real", ("int", "int"))])
def test_property(self):
matches = self._get_traces("""
class Foo:
@property
def x(self):
return 42
v = Foo().x
""", ast.Attribute)
self.assertTracesEqual(matches, [
((5, 10), "LOAD_ATTR", "x", ("Foo", "int"))])
class MatchNameTest(MatchAstTestCase):
"""Tests for traces.MatchAstVisitor.match_Name."""
def test_basic(self):
matches = self._get_traces("x = 42", ast.Name)
self.assertTracesEqual(matches, [((1, 0), "STORE_NAME", "x", ("int",))])
def test_multiline(self):
matches = self._get_traces("""
x = (1 +
2)
""", ast.Name)
self.assertTracesEqual(matches, [((1, 0), "STORE_NAME", "x", ("int",))])
def test_multiline_subscr(self):
matches = self._get_traces("""
x = [0]
x[0] = (1,
2)
""", ast.Name)
x_annot = "List[Union[int, Tuple[int, int]]]"
self.assertTracesEqual(matches, [((1, 0), "STORE_NAME", "x", (x_annot,)),
((2, 0), "LOAD_NAME", "x", (x_annot,))])
class MatchCallTest(MatchAstTestCase):
"""Tests for traces.MatchAstVisitor.match_Call."""
def test_basic(self):
matches = self._get_traces("""
def f(x):
return x + 1.0
f(42)
""", ast.Call)
self.assertTracesEqual(matches, [
((3, 0), _CALLFUNC_OP, "f", ("Callable[[Any], Any]", "float"))])
def test_chain(self):
matches = self._get_traces("""
class Foo:
def f(self, x):
return x
Foo().f(42)
""", ast.Call)
self.assertTracesEqual(matches, [
((4, 0), _CALLFUNC_OP, "Foo", ("Type[Foo]", "Foo")),
((4, 0), _CALLMETH_OP, "f", ("Callable[[Any], Any]", "int"))])
def test_multiple_bindings(self):
matches = self._get_traces("""
class Foo:
@staticmethod
def f(x):
return x
class Bar:
@staticmethod
def f(x):
return x + 1.0
f = Foo.f if __random__ else Bar.f
f(42)
""", ast.Call)
self.assertTracesEqual(matches, [
((10, 0), _CALLFUNC_OP, "f",
("Callable[[Any], Any]", "Union[int, float]"))])
def test_bad_call(self):
matches = self._get_traces("""
def f(): pass
f(42)
""", ast.Call)
self.assertTracesEqual(
matches, [((2, 0), _CALLFUNC_OP, "f", ("Callable[[], Any]", "Any"))])
def test_literal(self):
matches = self._get_traces("''.upper()", ast.Call)
self.assertTracesEqual(matches, [
((1, 0), _CALLMETH_OP, "upper", ("Callable[[], str]", "str"))])
def test_lookahead(self):
matches = self._get_traces("""
def f(x, y, z):
return x + y + z
f(
0,
1,
2,
)
""", ast.Call)
self.assertTracesEqual(matches, [
((3, 0), _CALLFUNC_OP, "f",
("Callable[[Any, Any, Any], Any]", "int"))])
class MatchConstantTest(MatchAstTestCase):
def test_num(self):
matches = self._get_traces("v = 42", ast.Constant)
self.assertTracesEqual(matches, [((1, 4), "LOAD_CONST", 42, ("int",))])
def test_str(self):
matches = self._get_traces("v = 'hello'", ast.Constant)
self.assertTracesEqual(matches, [((1, 4), "LOAD_CONST", "hello", ("str",))])
def test_unicode(self):
matches = self._get_traces("v = u'hello'", ast.Constant)
self.assertTracesEqual(matches, [((1, 4), "LOAD_CONST", "hello", ("str",))])
def test_bytes(self):
matches = self._get_traces("v = b'hello'", ast.Constant)
self.assertTracesEqual(
matches, [((1, 4), "LOAD_CONST", b"hello", ("bytes",))])
def test_bool(self):
matches = self._get_traces("v = True", ast.Constant)
self.assertTracesEqual(matches, [((1, 4), "LOAD_CONST", True, ("bool",))])
def test_ellipsis(self):
matches = self._get_traces("v = ...", ast.Constant)
self.assertTracesEqual(
matches, [((1, 4), "LOAD_CONST", Ellipsis, ("ellipsis",))])
class MatchSubscriptTest(MatchAstTestCase):
def test_index(self):
matches = self._get_traces("""
v = "hello"
print(v[0])
""", ast.Subscript)
self.assertTracesEqual(
matches, [((2, 6), "BINARY_SUBSCR", "__getitem__", ("str",))])
def test_simple_slice(self):
matches = self._get_traces("""
v = "hello"
print(v[:-1])
""", ast.Subscript)
self.assertTracesEqual(
matches, [((2, 6), "BINARY_SUBSCR", "__getitem__", ("str",))])
def test_complex_slice(self):
matches = self._get_traces("""
v = "hello"
print(v[0:4:2])
""", ast.Subscript)
self.assertTracesEqual(
matches, [((2, 6), "BINARY_SUBSCR", "__getitem__", ("str",))])
class MatchBinOpTest(MatchAstTestCase):
def test_modulo(self):
matches = self._get_traces("""
v = "hello %s"
print(v % "world")
""", ast.BinOp)
self.assertTracesEqual(matches, [((2, 6), _BINMOD_OP, "__mod__", ("str",))])
def test_modulo_multiline_string(self):
matches = self._get_traces("""
('%s'
'%s' %
('hello',
'world'))
""", ast.BinOp)
self.assertTracesEqual(matches, [((1, 1), _BINMOD_OP, "__mod__", ("str",))])
def test_format_multiline_string(self):
matches = self._get_traces("""
('%s'
'%s' %
(__any_object__,
__any_object__))
""", ast.BinOp)
self.assertTracesEqual(
matches, [((1, 1), _FORMAT_OP, "__mod__", ("str",))])
class MatchLambdaTest(MatchAstTestCase):
def test_basic(self):
matches = self._get_traces("lambda x: x.upper()", ast.Lambda)
sym = "<lambda>"
self.assertTracesEqual(
matches, [((1, 0), "MAKE_FUNCTION", sym, ("Callable[[Any], Any]",))])
def test_function_locals(self):
matches = self._get_traces("""
def f():
return lambda x: x.upper()
""", ast.Lambda)
sym = "f.<locals>.<lambda>"
self.assertTracesEqual(
matches, [((2, 9), "MAKE_FUNCTION", sym, ("Callable[[Any], Any]",))])
def test_multiple_functions(self):
matches = self._get_traces("""
def f():
return (w for w in range(3)), lambda x: x.upper(), lambda y, z: (y, z)
""", ast.Lambda)
sym = "f.<locals>.<lambda>"
self.assertTracesEqual(
matches, [
((2, 32), "MAKE_FUNCTION", sym, ("Callable[[Any], Any]",)),
((2, 53), "MAKE_FUNCTION", sym, ("Callable[[Any, Any], Any]",))])
if __name__ == "__main__":
unittest.main()
| google/pytype | pytype/tools/traces/traces_test.py | traces_test.py | py | 11,794 | python | en | code | 4,405 | github-code | 36 |
19074906476 | from collections import Iterator, Iterable
#global set_num
#set_num = 0
class Disjoint_set(Iterable):
def __init__(self, element=None):
self.head = element
self.tail = element
element.set = self
#global set_num
#set_num += 1
def add_element(self, element):
if self.head != None:
self.tail.next = element
else:
self.head = element
self.tail = element
element.set = self
def __str__(self):
if not self.is_empty():
return str([self.head.label, self.tail.label])
else:
return 'empty set'
def get_elements(self):
if not self.is_empty():
elements_ls = [self.head.label]
ele = self.head
while ele.next:
elements_ls.append(ele.next.label)
ele = ele.next
return elements_ls
else:
return []
def get_len(self):
if not self.is_empty():
set_len = 1
ele = self.head
while ele.next:
set_len+=1
ele = ele.next
return set_len
else:
return 0
def is_empty(self):
return self.head == None
def __iter__(self):
if not self.head:
return
ele = self.head
yield ele
while ele.next:
ele = ele.next
yield ele
class Element():
def __init__ (self, label):
self.set = None
self.next = False
self.label = label
def show_set(self):
return(self.set.get_elements())
def __str__(self):
return str(self.label)
def union(set1, set2):
if set1.get_len() > set2.get_len():
max_set = set1
min_set = set2
else:
min_set = set1
max_set = set2
max_set.tail.next = min_set.head
max_set.tail = min_set.tail
#min_set.tail.next = None
cur = min_set.head
cur.set = max_set
while cur.next:
cur = cur.next
cur.set = max_set
min_set.head = None
min_set.tail = False
#global set_num
#set_num -= 1
return max_set
| LouisYLWang/Algorithms | Clustering_algorithm/Disjoint_set.py | Disjoint_set.py | py | 2,199 | python | en | code | 0 | github-code | 36 |
36740712303 | # coding=UTF-8
# Importamos las librerías
import sys
import os
import math
import csv
import numpy as np
from itertools import groupby
from mpl_toolkits.mplot3d import axes3d
import matplotlib.pyplot as plt
from matplotlib import cm
# Función que permite reiniciar el programa
def reiniciar():
python = sys.executable
os.execl(python, python, * sys.argv)
# Función de cálculo que genera valores según la formula de distribucion normal de Gauss dadas unas coordenadas.
def funcionGauss(a,s,x,y,mux,muy):
f = (a / (math.sqrt(2.0 * math.pi) * s)) * math.exp(-(0.5 / (s ** 2)) * ((x - mux) ** 2.0 + (y - muy) ** 2.0))
return f
# Función que lee el fichero de datos y pinta un mapa de contornos en 3D
def generarGrafico():
data = []
try:
# Abrimos el fichero de datos generado
ficheroDatos = open('datos.csv')
csv_reader = csv.reader(ficheroDatos)
next(csv_reader, None) # Quitamos la cabecera con el nombre de las variables
# Cargamos los datos del fichero línea a línea
for line in csv_reader:
data.append(map(float, line))
# Procesamos los datos cargados y creamos los arrays pertinentes para generar el gráfico
X, Z = [], []
for x, g in groupby(data, key=lambda line: line[0]):
X.append(x)
Y = []
new_Z = []
for y, gg in groupby(g, key=lambda line: line[1]):
Y.append(y)
new_Z.append(list(gg)[-1][2])
Z.append(new_Z)
# Transformamos X, Y y Z en formato de array válido para el gráfico
X, Y = np.meshgrid(X, Y)
Z = np.array(Z)
# Instanciamos un gráfico 3d de contornos
fig = plt.figure()
ax = fig.gca(projection='3d')
# Generamos la supercicie de datos con los valores de X, Y y Z
ax.plot_surface(X, Y, Z, rstride=1, cstride=1, alpha=0.3)
# Generamos los gráficos de contorno para cada una de las coordenadas
cset = ax.contour(X, Y, Z, zdir='z', offset=-50, cmap=cm.coolwarm)
cset = ax.contour(X, Y, Z, zdir='x', offset=-100, cmap=cm.coolwarm)
cset = ax.contour(X, Y, Z, zdir='y', offset=-100, cmap=cm.coolwarm)
# Añadimos el nombre a cada coordenada del gráfico y su rango de valores
ax.set_xlabel('X')
ax.set_xlim(-100, 1200)
ax.set_ylabel('Y')
ax.set_ylim(-100, 1200)
ax.set_zlabel('Z')
ax.set_zlim(-50, 130)
# Pintamos el gráfico
plt.show()
finally:
# Cerramos el fichero
ficheroDatos.close()
def generarDatos():
# Factor de corrección para los valores generados para evitar que sean demasiado bajos
factorCorreccion = 0.00001
# Inicializamos las varianzas que marcarán la dispersión de los datos generados respecto a la localización de las medias
s1=100.0
s2=130.0
s3=60.0
# Inicializamos las coordenadas donde se van a ubicar las medias
mu1x=250.0
mu1y=250.0
mu2x=550.0
mu2y=850.0
mu3x=830.0
mu3y=300.0
# Inicializamos las medias
a1=11500.0
a2=12000.0
a3=15500.0
# Abrimos el fichero csv o dat de datos (o lo creamos en su defecto).
# Formato dat -> visualización de datos con GNUPlot.
# Formato csv -> tratamiento de datos con WEKA.
ficheroDatosCSV = open('datos.csv', 'w')
ficheroDatosDat = open('datos.dat', 'w')
# Creamos la cabecera con los nombres de las variables
ficheroDatosCSV.write("x"+","+"y"+","+"f"+"\n")
# Bucles anidados que genera los datos y los escribe en los ficheros
for i in range(0, 100,4):
# Discretizamos los valores del eje x en porciones de 10 unidades
x = 100.0 + i * 10.0
for j in range(0, 100,4):
# Discretizamos los valores del eje y en porciones de 10 unidades
y = 100.0 + j * 10.0
# Creamos 3 distribuiciones normales con las diferentes medias y varianzas y recogemos el resultado
f1 = funcionGauss(a1,s1,x,y,mu1x,mu1y)
f2 = funcionGauss(a2,s2,x,y,mu2x,mu2y)
f3 = funcionGauss(a3,s3,x,y,mu3x,mu3y)
# Escribimos los valores en los diferentes ficheros
ficheroDatosCSV.write( str(x) + "," + str(y) + "," + str(f1 + f2 + f3 + factorCorreccion)+"\n")
ficheroDatosDat.write( str(x) + " " + str(y) + " " + str(f1 + f2 + f3 + factorCorreccion)+"\n")
# Cerramos el fichero csv y dat
ficheroDatosCSV.close()
ficheroDatosDat.close()
def main():
# Generamos los datos de ejemplo
generarDatos()
# Creamos el gráfico de superficie con los datos generados
generarGrafico()
if __name__ == "__main__":
main()
| DNC87/EM-Dataset-Generator | generador_datos/main.py | main.py | py | 4,438 | python | es | code | 0 | github-code | 36 |
26486114880 | # Following information from PEP 440 (https://peps.python.org/pep-0440/)
__version__ = "2022.02.dev1"
class TableParseError(Exception):
"""Excpetion when error hit while converting a *_table file
to YAML"""
def __init__(self, file, lineno, line, message=None):
self.file = file
self.lineno = lineno
self.line = line
if message is None:
self.message = f"Parse error: file: {self.file}({self.lineno}\n"
self.message += f"line: {self.line}"
else:
self.message = message
def __str__(self):
return self.message
| NOAA-GFDL/fms_yaml_tools | fms_yaml_tools/__init__.py | __init__.py | py | 614 | python | en | code | 0 | github-code | 36 |
9286381152 | """
## Max Value ##
Write a function, max_value, that takes in list of numbers as an argument.
The function should return the largest number in the list.
Solve this without using any built-in list methods.
You can assume that the list is non-empty.
"""
from time import time
# Defining a decorator to time execution of any fucntion
def timer_func(func):
# This function shows the execution time of
# the function object passed
def wrap_func(*args, **kwargs):
t1 = time()
result = func(*args, **kwargs)
t2 = time()
print(f'In {(t2-t1):.4f}s ', end="")
return result
return wrap_func
# --- Solution ---
@timer_func
def max_value(nums):
max = float('-inf') # Assigned infinity as an initial value to 'max'
for num in nums:
if num > max:
max = num
return max
# --- Tests ---
test_input_values = [
[4, 7, 2, 8, 10, 9],
[10, 5, 40, 40.3],
[-5, -2, -1, -11],
[42],
[1000, 8],
[1000, 8, 9000],
[2, 5, 1, 1, 4],
]
expected_results = [
10, 40.3, -1, 42, 1000, 9000, 5
]
for i in range(0,len(test_input_values)):
result = max_value(test_input_values[i])
assert result == expected_results[i], \
f'Expected max value as {expected_results[i]}, got: {result}'
print(f'test [{i}] passed, with correct result as {expected_results[i]}.') | RuthraVed/programming-practice-solutions | structy-practice-solutions/01-max-value.py | 01-max-value.py | py | 1,393 | python | en | code | 0 | github-code | 36 |
74588531623 | # coding=utf-8
__author__ = "Arnaud KOPP"
__copyright__ = "© 2015-2016 KOPP Arnaud All Rights Reserved"
__credits__ = ["KOPP Arnaud"]
__license__ = "GNU GPL V3.0"
__maintainer__ = "Arnaud KOPP"
__email__ = "kopp.arnaud@gmail.com"
__status__ = "Production"
from collections import OrderedDict
import logging
import pandas as pd
log = logging.getLogger(__name__)
class MultiFASTA(object):
"""
Class for FASTA files
"""
def __init__(self):
# fetch the sequence using this attribute
self._fasta_fetcher = FASTA()
# an ordered dictionary to store the fasta contents
self._fasta = OrderedDict()
def __len__(self):
return len(self._fasta)
def _get_fasta(self):
return self._fasta
fasta = property(_get_fasta, doc="Returns all FASTA instances ")
def _get_ids(self):
return [f for f in self._fasta.keys()]
ids = property(_get_ids, doc="returns list of keys/accession identifiers")
def load_fasta(self, ids):
"""
Loads a single FASTA file into the dictionary
:param ids:
"""
if isinstance(ids, str):
ids = [ids]
for id_ in ids:
self._fasta_fetcher.load(id_)
# create a new instance of FASTA and save fasta data
f = FASTA()
f._fasta = self._fasta_fetcher._fasta[:]
# append in the ordered dictionary
self._fasta[id_] = f
log.info("%s loaded" % id_)
def save_fasta(self, filename):
"""
Save all FASTA into a file
:param filename:
"""
fh = open(filename, "w")
for f in self._fasta.values():
fh.write(f.fasta)
fh.close()
def read_fasta(self, filename):
"""
Load several FASTA from a filename
:param filename:
"""
fh = open(filename, "r")
data = fh.read()
fh.close()
# we split according to ">2 character
for thisfasta in data.split(">")[1:]:
f = FASTA()
f._fasta = f._interpret(thisfasta)
if f.accession is not None and f.accession not in self.ids:
self._fasta[f.accession] = f
else:
log.warning("Accession %s is already in the ids list or could not be interpreted. skipped" %
str(f.accession))
def _get_df(self):
df = pd.concat([self.fasta[id_].df for id_ in self.fasta.keys()])
df.reset_index(inplace=True)
return df
df = property(_get_df)
def hist_size(self, **kargs):
"""
:param kargs:
"""
try:
import pylab
self.df.Size.hist(**kargs)
pylab.title("Histogram length of the sequences")
pylab.xlabel("Length")
except:
pass
class FASTA(object):
"""
Fasta class
"""
known_dbtypes = ["sp", "gi"]
def __init__(self):
self._fasta = None
def _get_fasta(self):
return self._fasta
fasta = property(_get_fasta, doc="returns FASTA content")
# for all types
def _get_sequence(self):
if self.fasta:
return "".join(self.fasta.split("\n")[1:])
else:
raise ValueError("You need to load a fasta sequence first using get_fasta or read_fasta")
sequence = property(_get_sequence, doc="returns the sequence only")
# for all types
def _get_header(self):
if self.fasta:
return self.fasta.split("\n")[0]
else:
raise ValueError("You need to load a fasta sequence first using get_fasta or read_fasta")
header = property(_get_header, doc="returns header only")
def _get_dbtype(self):
dbtype = self.header.split("|")[0].replace(">", "")
return dbtype
dbtype = property(_get_dbtype)
# for all types
def _get_identifier(self):
return self.header.split(" ")[0]
identifier = property(_get_identifier)
def _get_entry(self):
return self.header.split("|")[2].split(" ")[0]
entry = property(_get_entry, doc="returns entry only")
# swiss prot only
def _get_accession(self):
if self.dbtype == "sp":
# header = self.header
return self.identifier.split("|")[1]
elif self.dbtype == "gi":
return self.identifier.split("|")[1]
accession = property(_get_accession)
# swiss prot only
def _get_name_sp(self):
if self.dbtype == "sp":
header = self.header
return header.split(" ")[0].split("|")[2]
name = property(_get_name_sp)
def _get_df(self):
df = pd.DataFrame({
"Identifiers": [self.identifier],
"Accession": [self.accession],
"Entry": [self.entry],
"Database": [self.dbtype],
"Organism": [self.organism],
"PE": [self.PE],
"SV": [self.SV],
"Sequence": [self.sequence],
"Header": [self.header],
"Size": [len(self.sequence)]})
return df
df = property(_get_df)
def _get_info_from_header(self, prefix):
if prefix not in self.header:
return None
# finds the prefix
index = self.header.index(prefix + "=")
# remove it
name = self.header[index:][3:]
# figure out if there is anothe = sign to split the string
# otherwise, the prefix we looked for is the last one anyway
if "=" in name:
name = name.split("=")[0]
# here each = sign in FASTA is preceded by 2 characters that we must remove
name = name[0:-2]
name = name.strip()
else:
name = name.strip()
return name
def _get_gene_name(self):
return self._get_info_from_header("GN")
gene_name = property(_get_gene_name,
doc="returns gene name from GN keyword found in the header if any")
def _get_organism(self):
return self._get_info_from_header("OS")
organism = property(_get_organism,
doc="returns organism from OS keyword found in the header if any")
def _get_PE(self):
pe = self._get_info_from_header("PE")
if pe is not None:
return int(pe)
PE = property(_get_PE,
doc="returns PE keyword found in the header if any")
def _get_SV(self):
sv = self._get_info_from_header("SV")
if sv is not None:
return int(sv)
SV = property(_get_SV,
doc="returns SV keyword found in the header if any")
def __str__(self):
str_ = self.fasta
return str_
def load(self, id_):
self.load_fasta(id_)
def load_fasta(self, id_):
"""
:param id_:
:raise Exception:
"""
from BioREST.Uniprot import Uniprot
u = Uniprot()
try:
res = u.retrieve(id_, frmt="fasta")
# some entries in uniprot are valid but obsolet and return empty string
if res == "":
raise Exception
self._fasta = res[:]
except:
pass
def save_fasta(self, filename):
"""
Save FASTA file into a filename
:param str filename: where to save it
"""
if self._fasta is None:
raise ValueError("No fasta was read or downloaded. Nothing to save.")
fh = open(filename, "w")
fh.write(self._fasta)
fh.close()
def read_fasta(self, filename):
"""
:param filename:
:raise ValueError:
"""
fh = open(filename, "r")
data = fh.read()
fh.close()
# Is there more than one sequence ?
data = data.split(">")[1:]
if len(data) > 1 or len(data) == 0:
raise ValueError(
"""Only one sequence expected to be found. Found %s. Please use MultiFASTA class instead""" % len(data))
self._data = data
if data.count(">sp|") > 1:
raise ValueError("""It looks like your FASTA file contains more than
one FASTA. You must use MultiFASTA class instead""")
self._fasta = data[:]
self._fasta = self._fasta[0]
if self.dbtype not in self.known_dbtypes:
log.warning("Only sp and gi header are recognised so far but sequence and header are loaded")
@staticmethod
def _interpret(data):
# cleanup the data in case of empty spaces or \n characters
return data
| ArnaudKOPP/BioREST | BioREST/Fasta.py | Fasta.py | py | 8,602 | python | en | code | 0 | github-code | 36 |
9048437267 | fire_stations = ["Alpha", "Beta", "Theta",
"Center", "Railway", "Harbor", "Suburb"]
personnel = [12,13,23,44,23,11,42]
fire_duty = []
station_on_duty = ""
a = 0
i = 0
min = personnel[0]
understaffed = ""
input_device = ""
for i in range(7):
fire_duty.append(fire_stations[i])
for m in range(7):
if min >= personnel[m]:
min = personnel[m]
understaffed = fire_stations[m]
while i < 52:
input_device = input("Input true or false: ")
if input_device == "true":
if a < 7:
station_on_duty = fire_duty[a]
if station_on_duty == understaffed:
print("This station is understaffed")
print(station_on_duty)
a += 1
else:
a = 0
station_on_duty = fire_duty[a]
if station_on_duty == understaffed:
print("This station is understaffed")
print(station_on_duty)
a += 1
elif input_device == "false":
i = 53
print("Emergency stop of procedure")
i += 1
| Mierln/Computer-Science | Dylan/Fire_Station.py | Fire_Station.py | py | 1,062 | python | en | code | 0 | github-code | 36 |
9786188527 | import cv2
import numpy as np
from scipy.ndimage.filters import gaussian_filter
from scipy.ndimage.interpolation import map_coordinates
def threshold_normalize(data,transform):
threshold = 254
maxVal = 255
ret, thresh = cv2.threshold(np.uint8(data), threshold, maxVal, cv2.THRESH_BINARY)
if transform:
copy = thresh.copy()
copy = elastic_transform(copy)
return thresh/255.0, copy/255.0
return thresh/255.0
def elastic_transform(data):
"""referenced from https://gist.github.com/fmder/e28813c1e8721830ff9c"""
alpha = 15
sigma = 15
print("Elastic Transform")
np.random.seed(1234)
rand_state = np.random.RandomState()
for i in range(len(data)):
img_shape = data[i].shape
dx = gaussian_filter((rand_state.rand(*img_shape) * 2 - 1), sigma, mode="constant") * alpha
dy = gaussian_filter((rand_state.rand(*img_shape) * 2 - 1), sigma, mode="constant") * alpha
x, y = np.meshgrid(np.arange(img_shape[0]), np.arange(img_shape[1]))
indices = np.reshape(y+dy, (-1, 1)), np.reshape(x+dx, (-1, 1))
data[i] = map_coordinates(data[i], indices, order=1).reshape(img_shape)
return data
| sheldon-benard/DigitClassification | 551-project/preprocessing.py | preprocessing.py | py | 1,107 | python | en | code | 0 | github-code | 36 |
35609489128 | from math import sqrt
import torch
from torch import nn
class FSRCNN(nn.Module):
"""
Args:
upscale_factor (int): Image magnification factor.
"""
def __init__(self, upscale_factor: int) -> None:
super(FSRCNN, self).__init__()
# Feature extraction layer.
self.feature_extraction = nn.Sequential(
nn.Conv2d(1, 56, (5, 5), (1, 1), (2, 2)),
nn.PReLU(56)
)
# Shrinking layer.
self.shrink = nn.Sequential(
nn.Conv2d(56, 12, (1, 1), (1, 1), (0, 0)),
nn.PReLU(12)
)
# Mapping layer.
self.map = nn.Sequential(
nn.Conv2d(12, 12, (3, 3), (1, 1), (1, 1)),
nn.PReLU(12),
nn.Conv2d(12, 12, (3, 3), (1, 1), (1, 1)),
nn.PReLU(12),
nn.Conv2d(12, 12, (3, 3), (1, 1), (1, 1)),
nn.PReLU(12),
nn.Conv2d(12, 12, (3, 3), (1, 1), (1, 1)),
nn.PReLU(12)
)
# Expanding layer.
self.expand = nn.Sequential(
nn.Conv2d(12, 56, (1, 1), (1, 1), (0, 0)),
nn.PReLU(56)
)
# Deconvolution layer.
self.deconv = nn.ConvTranspose2d(56, 1, (9, 9), (upscale_factor, upscale_factor), (4, 4), (upscale_factor - 1, upscale_factor - 1))
# Initialize model weights.
self._initialize_weights()
def forward(self, x: torch.Tensor) -> torch.Tensor:
return self._forward_impl(x)
# Support torch.script function.
def _forward_impl(self, x: torch.Tensor) -> torch.Tensor:
out = self.feature_extraction(x)
out = self.shrink(out)
out = self.map(out)
out = self.expand(out)
out = self.deconv(out)
return out
# The filter weight of each layer is a Gaussian distribution with zero mean and standard deviation initialized by random extraction 0.001 (deviation is 0).
def _initialize_weights(self) -> None:
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.normal_(m.weight.data, mean=0.0, std=sqrt(2 / (m.out_channels * m.weight.data[0][0].numel())))
nn.init.zeros_(m.bias.data)
nn.init.normal_(self.deconv.weight.data, mean=0.0, std=0.001)
nn.init.zeros_(self.deconv.bias.data) | gmlwns2000/sharkshark-4k | src/upscale/model/fsrcnn/model.py | model.py | py | 2,315 | python | en | code | 14 | github-code | 36 |
33844865336 | import sys
banned_words = ["os.sys","rmdir","subprocess","allowed_modules.csv","package.json","pyme.py","server.js","v.py","vx.py","clear.py","index.html","script.js","style.css","sleep","exec","eval"]
def validate(s):
flag=True
for _ in banned_words:
if(_ in s):
print(_)
flag=False
if(flag):
print("$-SUCCESS-$")
if(len(sys.argv)>0):
validate(sys.argv[1])
else:
print("Looks like u forgot to send me the py code") | SayadPervez/py-me | app/vx.py | vx.py | py | 448 | python | en | code | 2 | github-code | 36 |
12201340474 | def one(list, elem):
low = 0
high = len(list) - 1
mid = (low + high) // 2
while elem != list[mid]:
if elem not in list:
return print('-1')
elif elem > list[mid]:
low = mid + 1
else:
high = mid - 1
mid = (low + high) // 2
return print(f'ID: {mid}')
def two(list, elem):
start = -1
f0 = 0
f1 = 1
f2 = 1
while f2 < len(list):
f0 = f1
f1 = f2
f2 = f1 + f0
while f2 > 1:
i = min(start + f0, len(list) - 1)
if list[i] < elem:
f2 = f1
f1 = f0
f0 = f2 - f1
start = i
elif list[i] > elem:
f2 = f0
f1 = f1 - f0
f0 = f2 - f1
else:
return i and print(f'ID: {i}')
if f1 and (list[len(list) - 1] == i):
return len(list) - 1
return None
#two(list=[1, 1, 2, 3, 5, 8, 13], elem=5)
def three(list, direction):
if direction == 'right':
for i in range(len(list)):
minimum = i
for j in range(i + 1, len(list)):
if list[j] < list[minimum]:
minimum = j
list[minimum], list[i] = list[i], list[minimum]
elif direction == 'left':
for i in range(len(list)):
maximum = i
for j in range(i + 1, len(list)):
if list[j] > list[maximum]:
maximum = j
list[maximum], list[i] = list[i], list[maximum]
return print(list)
def four(list, direction):
if direction == 'right':
for i in range(len(list)):
for j in range(len(list) - 1 - i):
if list[j] > list[j + 1]:
list[j], list[j + 1] = list[j + 1], list[j]
elif direction == 'left':
for i in range(len(list)):
for j in range(len(list) - 1 - i):
if list[j] < list[j + 1]:
list[j], list[j + 1] = list[j + 1], list[j]
return print(list)
def five(list, direction):
if direction == 'right':
mid = len(list) // 2
while mid >= 1:
for j in range(mid, len(list)):
i = j
while i > 0:
if list[i] < list[i - mid]:
list[i], list[i - 1] = list[i - 1], list[i]
i -= mid
else:
break
mid //= 2
elif direction == 'left':
mid = len(list) // 2
while mid >= 1:
for j in range(mid, len(list)):
i = j
while i > 0:
if list[i] > list[i - mid]:
list[i], list[i - 1] = list[i - 1], list[i]
i -= mid
else:
break
mid //= 2
return print(list)
def six_default(list):
min = []
mid = []
max = []
if len(list) > 1:
item = list[0]
for x in list:
if x < item:
min.append(x)
elif x == item:
mid.append(x)
elif x > item:
max.append(x)
return six_default(min) + mid + six_default(max)
else:
return list
def six_reverse(list): # ДОДЕЛАТЬ!
min = []
mid = []
max = []
if len(list) > 1:
item = list[-1]
for x in list:
if x < item:
min.append(x)
elif x == item:
mid.append(x)
elif x > item:
max.append(x)
return six_reverse(min) + mid + six_reverse(max)
else:
return list
six_reverse(list=[8, 4, 9, 52, 15, 24])
| Dary311/PythonLabs | laba 4.py | laba 4.py | py | 3,731 | python | en | code | 0 | github-code | 36 |
73574130663 | import torch
import torch.nn as nn
import torch.nn.functional as F
from policy import discrete_policy_net
from critic import attention_critic
import numpy as np
from buffer import replay_buffer
from make_env import make_env
import os
import random
from gym.spaces.discrete import Discrete
from gym.spaces.box import Box
import time
class maac_mpe(object):
def __init__(self, env_id, batch_size, learning_rate, exploration, episode, gamma, alpha, capacity, rho, update_iter, update_every, head_dim, traj_len, render):
self.env_id = env_id
#self.env = make_env(self.env_id, discrete_action=True)
self.env = make_env(self.env_id)
self.batch_size = batch_size
self.learning_rate = learning_rate
self.exploration = exploration
self.episode = episode
self.gamma = gamma
self.capacity = capacity
self.rho = rho
self.update_iter = update_iter
self.update_every = update_every
self.head_dim = head_dim
self.traj_len = traj_len
self.render = render
self.observation_dims = [int(self.env.observation_space[i].shape[0]) for i in range(self.env.n)]
self.action_dims = [int(self.env.action_space[i].n) if isinstance(self.env.action_space[i], Discrete) else int(sum(self.env.action_space[i].high) + self.env.action_space[i].shape) for i in range(self.env.n)]
self.alphas = [alpha for _ in range(self.env.n)]
self.value_net = attention_critic(num_agent=self.env.n, sa_dims=[o + a for o, a in zip(self.observation_dims, self.action_dims)], s_dims=self.observation_dims, head_dim=self.head_dim, output_dim=self.action_dims)
self.target_value_net = attention_critic(num_agent=self.env.n, sa_dims=[o + a for o, a in zip(self.observation_dims, self.action_dims)], s_dims=self.observation_dims, head_dim=self.head_dim, output_dim=self.action_dims)
self.policy_nets = [discrete_policy_net(input_dim=self.observation_dims[n], output_dim=self.action_dims[n]) for n in range(self.env.n)]
self.target_policy_nets = [discrete_policy_net(input_dim=self.observation_dims[n], output_dim=self.action_dims[n]) for n in range(self.env.n)]
[self.target_policy_nets[n].load_state_dict(self.policy_nets[n].state_dict()) for n in range(self.env.n)]
self.target_value_net.load_state_dict(self.value_net.state_dict())
self.buffer = replay_buffer(capacity=self.capacity)
self.value_optimizer = torch.optim.Adam(self.value_net.parameters(), lr=self.learning_rate, weight_decay=1e-3)
self.policy_optimizers = [torch.optim.Adam(self.policy_nets[n].parameters(), lr=self.learning_rate) for n in range(self.env.n)]
self.count = 0
self.train_count = 0
def soft_value_update(self):
for param, target_param in zip(self.value_net.parameters(), self.target_value_net.parameters()):
target_param.detach().copy_(param.detach() * (1 - self.rho) + target_param.detach() * self.rho)
def soft_policy_update(self, policy_idx):
for param, target_param in zip(self.policy_nets[policy_idx].parameters(), self.target_policy_nets[policy_idx].parameters()):
target_param.detach().copy_(param.detach() * (1 - self.rho) + target_param.detach() * self.rho)
def train(self):
for _ in range(self.update_iter):
observations, actions, rewards, next_observations, dones = self.buffer.sample(self.batch_size)
indiv_observations = [torch.FloatTensor(np.vstack([observations[b][n] for b in range(self.batch_size)])) for n in range(self.env.n)]
indiv_actions = [torch.FloatTensor([actions[b][n] for b in range(self.batch_size)]) for n in range(self.env.n)]
one_hot_indiv_actions = [torch.zeros(self.batch_size, self.action_dims[n]) for n in range(self.env.n)]
one_hot_indiv_actions =[one_hot_indiv_actions[n].scatter(dim=1, index=indiv_actions[n].unsqueeze(1).long(), value=1) for n in range(self.env.n)]
rewards = torch.FloatTensor(rewards)
indiv_rewards = [rewards[:, n] for n in range(self.env.n)]
indiv_next_observations = [torch.FloatTensor(np.vstack([next_observations[b][n] for b in range(self.batch_size)])) for n in range(self.env.n)]
dones = torch.FloatTensor(dones)
indiv_dones = [dones[:, n] for n in range(self.env.n)]
# * many times to train for same batch trajectories
# * Critic training
one_hot_next_actions = []
next_actions = []
next_log_policies = []
for i in range(self.env.n):
# * sampling all actions, a, from all agents’ current policies in order to calculate the gradient estimate for agent i
next_action, next_log_policy = self.target_policy_nets[i].forward(indiv_next_observations[i], log=True)
next_log_policies.append(next_log_policy)
next_actions.append(next_action)
one_hot_next_action = torch.zeros(self.batch_size, self.action_dims[i])
one_hot_next_action.scatter_(dim=1, index=next_action, value=1)
one_hot_next_actions.append(one_hot_next_action)
next_q = self.target_value_net.forward(indiv_next_observations, one_hot_next_actions)
q, reg_atten = self.value_net.forward(indiv_observations, one_hot_indiv_actions, reg=True)
value_loss = 0
for i in range(self.env.n):
# * soft operation: - self.alphas[i] * next_log_policies[i]
target_q = indiv_rewards[i].unsqueeze(1) + (1 - indiv_dones[i].unsqueeze(1)) * self.gamma * next_q[i] - self.alphas[i] * next_log_policies[i]
target_q = target_q.detach()
value_loss += (q[i] - target_q).pow(2).mean()
for reg_a in reg_atten:
value_loss += reg_a
self.value_optimizer.zero_grad()
value_loss.backward()
# * scale the shared parameters' grad
for p in self.value_net.get_shared_parameters():
p.grad.data.mul_(1. / self.env.n)
nn.utils.clip_grad_norm_(self.value_net.parameters(), 10 * self.env.n)
self.value_optimizer.step()
one_hot_sample_actions = []
sample_actions = []
log_policies = []
entropies = []
all_policies = []
reg_policies = []
for i in range(self.env.n):
# * sampling all actions, a, from all agents’ current policies in order to calculate the gradient estimate for agent i
sample_action, reg_policy, log_policy, entropy, all_policy = self.policy_nets[i].forward(indiv_observations[i], explore=True, log=True, reg=True, entropy=True, all=True)
sample_actions.append(sample_action)
reg_policies.append(reg_policy)
log_policies.append(log_policy)
entropies.append(entropy)
all_policies.append(all_policy)
one_hot_sample_action = torch.zeros(self.batch_size, self.action_dims[i])
one_hot_sample_action.scatter_(dim=1, index=sample_action, value=1)
one_hot_sample_actions.append(one_hot_sample_action)
q, all_q = self.value_net(indiv_observations, one_hot_sample_actions, all=True)
for i in range(self.env.n):
b = torch.sum(all_policies[i] * all_q[i], dim=1, keepdim=True).detach()
# * COMA
adv = (q[i] - b).detach()
# * soft operation: self.alphas[i] * log_policies[i]
policy_loss = log_policies[i] * (self.alphas[i] * log_policies[i] - adv).detach()
policy_loss = policy_loss.mean() + reg_policies[i] * 1e-3
self.policy_optimizers[i].zero_grad()
for p in self.value_net.parameters():
p.requires_grad = False
policy_loss.backward()
for p in self.value_net.parameters():
p.requires_grad = True
nn.utils.clip_grad_norm_(self.policy_nets[i].parameters(), 0.5)
self.policy_optimizers[i].step()
self.soft_value_update()
for i in range(self.env.n):
self.soft_policy_update(i)
def run(self):
max_reward = -np.inf
weight_reward = [None for i in range(self.env.n)]
for epi in range(self.episode):
self.env.reset()
if self.render:
self.env.render()
total_reward = [0 for i in range(self.env.n)]
obs = self.env.reset()
while True:
action_indice = []
actions = []
for i in range(self.env.n):
if epi >= self.exploration:
action_idx = self.policy_nets[i].forward(torch.FloatTensor(np.expand_dims(obs[i], 0)), explore=True).item()
else:
action_idx = np.random.choice(list(range(self.action_dims[i])))
action = np.zeros(self.action_dims[i])
action[action_idx] = 1
actions.append(action)
action_indice.append(action_idx)
next_obs, reward, done, _ = self.env.step(actions)
if self.render:
self.env.render()
self.buffer.store(obs, action_indice, reward, next_obs, done)
self.count += 1
total_reward = [tr + r for tr, r in zip(total_reward, reward)]
obs = next_obs
if (self.count % self.update_every) == 0 and epi >= self.exploration and self.batch_size <= len(self.buffer):
self.train_count += 1
self.train()
if self.count % self.traj_len == 0:
done = [True for _ in range(self.env.n)]
if any(done):
if weight_reward[0] is None:
weight_reward = total_reward
else:
weight_reward = [wr * 0.99 + tr * 0.01 for wr, tr in zip(weight_reward, total_reward)]
if sum(weight_reward) > max_reward and epi >= self.exploration:
torch.save(self.value_net, './models/{}/value.pkl'.format(self.env_id))
for i in range(self.env.n):
torch.save(self.policy_nets[i], './models/{}/policy{}.pkl'.format(self.env_id, i))
max_reward = sum(weight_reward)
print(('episode: {}\ttrain_count:{}\tweight_reward:' + '{:.1f}\t' * self.env.n + 'sum:{:.1f}').format(epi + 1, self.train_count, *weight_reward, sum(weight_reward)))
break
def eval(self, render=True):
self.count = 0
for i in range(self.env.n):
self.policy_nets[i] = torch.load('./models/{}/policy{}.pkl'.format(self.env_id, i))
while True:
obs = self.env.reset()
total_reward = [0 for i in range(self.env.n)]
if render:
self.env.render()
while True:
time.sleep(0.05)
actions = []
for n in range(self.env.n):
action = np.zeros(self.action_dims[i])
action_idx = self.policy_nets[i].forward(torch.FloatTensor(np.expand_dims(obs[i], 0)), explore=True).item()
action[action_idx] = 1
actions.append(action)
next_obs, reward, done, info = self.env.step(actions)
if render:
self.env.render()
total_reward = [total_reward[i] + reward[i] for i in range(self.env.n)]
obs = next_obs
self.count += 1
if any(done) or self.count % self.traj_len == 0:
print('episode: {}\treward: {}'.format(i + 1, total_reward))
break
| deligentfool/MAAC_pytorch | model_mpe.py | model_mpe.py | py | 12,313 | python | en | code | 0 | github-code | 36 |
40211358205 | #%% [markdown]
# ## Preliminaries
#%%
from pkg.utils import set_warnings
set_warnings()
import time
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
from giskard.utils import get_random_seed
from myst_nb import glue as default_glue
from pkg.data import load_network_palette, load_node_palette, load_unmatched
from pkg.io import savefig
from pkg.perturb import (
add_edges,
remove_edges,
shuffle_edges,
add_edges_subgraph,
remove_edges_subgraph,
shuffle_edges_subgraph,
)
from pkg.plot import set_theme
from pkg.stats import degree_test, erdos_renyi_test, rdpg_test, stochastic_block_test
from pkg.utils import get_seeds
from tqdm import tqdm
DISPLAY_FIGS = True
FILENAME = "perturbations_unmatched_deep_dive"
def gluefig(name, fig, **kwargs):
savefig(name, foldername=FILENAME, **kwargs)
glue(name, fig, prefix="fig")
if not DISPLAY_FIGS:
plt.close()
def glue(name, var, prefix=None):
savename = f"{FILENAME}-{name}"
if prefix is not None:
savename = prefix + ":" + savename
default_glue(savename, var, display=False)
t0 = time.time()
set_theme()
rng = np.random.default_rng(8888)
network_palette, NETWORK_KEY = load_network_palette()
node_palette, NODE_KEY = load_node_palette()
neutral_color = sns.color_palette("Set2")[2]
GROUP_KEY = "simple_group"
left_adj, left_nodes = load_unmatched("left")
right_adj, right_nodes = load_unmatched("right")
left_labels = left_nodes[GROUP_KEY].values
right_labels = right_nodes[GROUP_KEY].values
left_nodes["inds"] = range(len(left_nodes))
right_nodes["inds"] = range(len(right_nodes))
seeds = get_seeds(left_nodes, right_nodes)
#%%
random_state = np.random.default_rng(8888)
adj = right_adj
nodes = right_nodes
labels1 = right_labels
labels2 = right_labels
n_sims = 1
effect_sizes = np.linspace(0, 3000, 30).astype(int)
seeds = (seeds[1], seeds[1])
n_components = 8
#%%
KCs_nodes = nodes[nodes["simple_group"] == "KCs"]["inds"]
def remove_edges_KCs_KCs(adjacency, **kwargs):
return remove_edges_subgraph(adjacency, KCs_nodes, KCs_nodes, **kwargs)
#%%
rows = []
tests = {
"ER": erdos_renyi_test,
"SBM": stochastic_block_test,
"Degree": degree_test,
# "RDPG": rdpg_test,
# "RDPG-n":rdpg_test,
}
test_options = {
"ER": [{}],
"SBM": [{"labels1": labels1, "labels2": labels2, "combine_method": "min"}],
"Degree": [{}],
# "RDPG": [{"n_components": n_components, "seeds": seeds, "normalize_nodes": False}],
# "RDPG-n": [{"n_components": n_components, "seeds": seeds, "normalize_nodes": True}],
}
perturbations = {
"Remove edges (global)": remove_edges,
r"Remove edges (KCs $\rightarrow$ KCs)": remove_edges_KCs_KCs
# "Add edges (global)": add_edges,
# "Shuffle edges (global)": shuffle_edges,
}
n_runs = len(tests) * n_sims * len(effect_sizes)
for perturbation_name, perturb in perturbations.items():
for effect_size in tqdm(effect_sizes):
for sim in range(n_sims):
currtime = time.time()
seed = get_random_seed(random_state)
perturb_adj = perturb(adj, effect_size=effect_size, random_seed=seed)
perturb_elapsed = time.time() - currtime
for test_name, test in tests.items():
option_sets = test_options[test_name]
for options in option_sets:
currtime = time.time()
stat, pvalue, other = test(adj, perturb_adj, **options)
test_elapsed = time.time() - currtime
if test_name == "SBM":
uncorrected_pvalues = other["uncorrected_pvalues"]
other["KCs_pvalues"] = uncorrected_pvalues.loc["KCs", "KCs"]
row = {
"stat": stat,
"pvalue": pvalue,
"test": test_name,
"perturbation": perturbation_name,
"effect_size": effect_size,
"sim": sim,
"perturb_elapsed": perturb_elapsed,
"test_elapsed": test_elapsed,
**options,
**other,
}
rows.append(row)
results = pd.DataFrame(rows)
#%%
def check_power(pvalues, alpha=0.05):
n_significant = (pvalues <= alpha).sum()
power = (n_significant) / (len(pvalues))
return power
power_results = (
results.groupby(["test", "perturbation", "effect_size"]).mean().reset_index()
)
power = (
results.groupby(["test", "perturbation", "effect_size"])["pvalue"]
.agg(check_power)
.reset_index()
)
power.rename(columns=dict(pvalue="power"), inplace=True)
power_results["power"] = power["power"]
results["power_indicator"] = (results["pvalue"] < 0.05).astype(float)
results["power_indicator"] = results["power_indicator"] + np.random.normal(
0, 0.0025, size=len(results)
)
# %%
grid = sns.FacetGrid(
results,
col="perturbation",
col_wrap=min(3, len(perturbations)),
sharex=False,
sharey=False,
hue="test",
height=6,
)
grid.map_dataframe(sns.lineplot, x="effect_size", y="power_indicator")
grid.add_legend(title="Test")
grid.set_ylabels(r"Empirical power ($\alpha = 0.05$)")
grid.set_xlabels("Effect size")
grid.set_titles("{col_name}")
gluefig("power", grid.figure)
# %%
grid = sns.FacetGrid(
results,
col="perturbation",
col_wrap=min(3, len(perturbations)),
sharex=False,
sharey=False,
hue="test",
height=6,
)
grid.map_dataframe(sns.lineplot, x="effect_size", y="pvalue")
grid.add_legend(title="Test")
grid.set_ylabels(r"p-value")
grid.set_xlabels("Effect size")
grid.set_titles("{col_name}")
gluefig("pvalues", grid.figure)
#%%
subresults = results[results["perturbation"] == r"Remove edges (KCs $\rightarrow$ KCs)"]
subresults = subresults[subresults["test"] == "SBM"].copy()
fig, ax = plt.subplots(1, 1, figsize=(8, 6))
sns.lineplot(
data=subresults,
x="effect_size",
y="KCs_pvalues",
ax=ax,
label=r"KCs $\rightarrow$ KCs",
)
mean_pvalues = []
all_pvalues = []
for i in range(len(subresults)):
row = subresults.iloc[i]
vals = row["uncorrected_pvalues"].values
mean = np.nanmean(vals)
mean_pvalues.append(mean)
for j, pvalue in enumerate(vals.ravel()):
all_pvalues.append(
{"effect_size": row["effect_size"], "pvalue": pvalue, "j": j}
)
all_pvalues = pd.DataFrame(all_pvalues)
subresults["mean_pvalues"] = mean_pvalues
sns.lineplot(
data=subresults, x="effect_size", y="mean_pvalues", ax=ax, label="Mean p-value"
)
ax.set(ylabel="p-value", xlabel="Effect size (# edges removed)")
sns.lineplot(data=subresults, x="effect_size", y="pvalue", label="Fisher's combined")
ax.set_title(r"Remove edges (KCs $\rightarrow$ KCs)")
gluefig("split_pvalues", fig)
| neurodata/bilateral-connectome | misc_scripts/perturbations_unmatched_deep_dive.py | perturbations_unmatched_deep_dive.py | py | 6,901 | python | en | code | 5 | github-code | 36 |
34955294737 | """
Multithreaded JSONRPCServer example
addr = "http://localhost:8848"
requests.post(addr, data='{"method": "get_data", "params":{"parser": "cpuinfo", "get": "model_name"}, "id":456}').json()
curl -X POST http://localhost:8848 -d '{"method": "get_data", "id":"2", "params":{"path":"/proc/uptime"}}'
reply
{"jsonrpc": "2.0", "result": {"uptime": {"found": {"uptime": 55}}}, "id": "2"}
"""
import sys
import parsers
from SocketServer import ThreadingMixIn
from slashproc_parser.jsonrpclib.SimpleJSONRPCServer import SimpleJSONRPCServer
SERVER_PORT = 8848
#do debug mode and
#prevent returning errors through to the json parser
DEBUG = True
class SimpleThreadedJSONRPCServer(ThreadingMixIn, SimpleJSONRPCServer):
pass
class ERR():
err1 = "Parser not Found"
err2 = "get param '%s' not found in groups or vars"
@classmethod
def msg(cls, num, param=''):
msg = getattr(cls, 'err%s' % num)
msg % param if '%s' in msg else msg
return {'err': num, 'msg':msg}
def import_parsers():
"""
Imports the parsers
"""
parsers_name = list()
parsers_cls = dict()
for modpy in parsers.__all__:
mod = __import__('slashproc_parser.parsers.' + modpy, fromlist=[modpy])
classes = [getattr(mod, modpy) for modpy in dir(mod)
if isinstance(getattr(mod, modpy), type) and modpy not in
['BasicSPParser']]
for cls in classes:
parsers_name.append(cls.__name__.lower())
parsers_cls[cls.__name__.lower()] = cls
return (parsers_name, parsers_cls)
def input_validation(path, parser, get):
SEPARATORS = "., |"
# Will not fail on dot locations
def make_list(txt):
if not txt:
return list()
if isinstance(txt, list):
txt = '/'.join(txt)
for i in SEPARATORS:
txt = txt.replace(i, '/')
txt = [i for i in txt.split('/') if i != '']
return txt
# if path, ignore the rest
path = make_list(path)
if path:
if path[0] == 'proc':
path.pop(0)
return path[0], path[1:]
parser = make_list(parser)
if not parser:
return None, None
else:
if parser[0] == 'proc':
parser.pop(0)
get = make_list(get)
get.extend(parser[1:])
return parser[0], get
def get_parsers():
names, classes = import_parsers()
return names
def get_groups(path=None, parser=None, get=None):
"""
Method to return one or more group descriptors
{"method": "get_groups",
"params": {
"path": "/core1",
#or
"parser": "[/proc/cpuinfo|cpuinfo]",
"get": "core1"
}}
Usage:
path: location to single var or group
or
parser: the parser
get: a csv string or list of groups
"""
names, classes = import_parsers()
parser, get = input_validation(path, parser, get)
if not parser or parser not in names:
return ERR.msg(1)
groups = classes[parser].get_groups()
if not get or 'all' in get or 'star' in get:
return {'found': groups}
#TODO if desc just return desc
ret = dict()
for g in get:
if g in groups:
if 'found' in ret:
ret['found'][g] = groups[g]
else:
ret['found'] = {g: groups[g]}
else:
if 'notfound' in ret:
notfound.append(g)
else:
ret['notfound'] = [g]
return ret
def get_vars(path=None, parser=None, get=None):
"""
Method to return the var descriptors
{"method": "get_vars",
"params": {
"path": "cpuinfo/v1",
#or
"parser": "cpuinfo",
"get": "v1 v2"
}}
Usage:
path: location to single var
parser: the parser
get: a csv string or list of vars
"""
names, classes = import_parsers()
parser, get = input_validation(path, parser, get)
if not parser or parser not in names:
return ERR.msg(1)
thevars = classes[parser].get_vars()
if not get or 'all' in get or 'star' in get:
return {'found': thevars}
ret = dict()
for g in get:
if g in thevars:
if 'found' in ret:
ret['found'][g] = thevars[g]
else:
ret['found'] = {g: thevars[g]}
else:
if 'notfound' in ret:
notfound.append(g)
else:
ret['notfound'] = [g]
return ret
def get_data(path=None, parser=None, get=None):
"""
Method to return the data
{"method": "get_data",
"params": {
"path": "/core1",
#or
"parser": "cpuinfo",
"get": "g1, g2, var1, var2"
}}
Usage:
path: location to single var or group
parser: the parser
get: a csv string or list of groups and vars
"""
names, classes = import_parsers()
parser, get = input_validation(path, parser, get)
if not parser or parser not in names:
return ERR.msg(1)
groups = classes[parser].get_groups()
vars = classes[parser].get_vars()
data = classes[parser].get_data()
if not get:
return {'found': data}
ret = dict()
found = list()
def recurse_dict(dct, pth, get):
for k in dct.keys():
if k in get:
if k not in found:
found.append(k)
ret[pth+'/'+k] = dct[k]
elif isinstance(dct[k], dict):
recurse_dict(dct[k], pth+'/'+k, get)
recurse_dict(data, '', get)
for i in found:
get.remove(i)
retdict = dict()
if ret:
retdict['found'] = ret
if get:
retdict['notfound'] = get
return retdict
def main():
server = SimpleThreadedJSONRPCServer(('localhost', SERVER_PORT))
server.register_function(get_parsers)
server.register_function(get_groups)
server.register_function(get_vars)
server.register_function(get_data)
server.serve_forever()
if __name__ == '__main__':
main()
| niallobroin/slashproc_parsers | slashproc_parser/basic_server.py | basic_server.py | py | 6,073 | python | en | code | 0 | github-code | 36 |
16528708499 | from pywebio.input import *
from pywebio.output import *
from pywebio import start_server
import matplotlib.pyplot as plt
import numpy as np
from PIL import Image
import io
def data_gen(num=100):
"""
Generates random samples for plotting
"""
a = np.random.normal(size=num)
return a
def plot_raw(a):
"""
Plots line graph
"""
plt.close()
plt.figure(figsize=(12,5))
plt.title(f"Line plot of {len(a)} samples",fontsize=16)
plt.plot(a)
return plt.gcf()
def plot_hist(a):
"""
Plots histogram
"""
plt.close()
plt.figure(figsize=(12,5))
plt.title(f"Histogram of {len(a)} samples",fontsize=16)
plt.hist(a,color='orange',edgecolor='k')
return plt.gcf()
def fig2img(fig):
"""
Convert a Matplotlib figure to a PIL Image and return it
"""
buf = io.BytesIO()
fig.savefig(buf)
buf.seek(0)
img = Image.open(buf)
return img
def Generate(num=100):
"""
Generates plot, called from the `Generate` button
"""
remove(scope='raw')
with use_scope(name='raw',clear=True,) as img:
a = data_gen(num)
f1 = plot_raw(a)
im1 = fig2img(f1)
put_image(im1)
f2 = plot_hist(a)
im2 = fig2img(f2)
put_image(im2)
def app():
"""
Main app
"""
put_markdown("""
# Matplotlib plot demo
## [Dr. Tirthajyoti Sarkar](https://www.linkedin.com/in/tirthajyoti-sarkar-2127aa7/)
We show two plots from [random gaussian samples](https://en.wikipedia.org/wiki/Normal_distribution). You choose the number of data points to generate.
- A line plot
- A histogram
""", strip_indent=4)
num_samples = input("Number of samples", type=NUMBER)
Generate(num_samples)
put_markdown("""## Code for this app is here: [Code repo](https://github.com/tirthajyoti/PyWebIO/tree/main/apps)""")
if __name__ == '__main__':
start_server(app,port=9999,debug=True) | tirthajyoti/PyWebIO | apps/matplotlib_demo.py | matplotlib_demo.py | py | 1,955 | python | en | code | 9 | github-code | 36 |
26090415788 | import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from collections import defaultdict
from basic.bupt_2017_11_28.type_deco import prt
import joblib
from sklearn import preprocessing
from sklearn.metrics import accuracy_score
from sklearn.model_selection import train_test_split
from basic.bupt_2017_11_28.type_deco import prt
import seaborn as sns
from basic.bupt_2018_1_19.unionfind import UF
'''
User:waiting
Date:2018-01-19
Time:9:45
'''
class Point:
def __init__(self,x,y):
self.x = x
self.y = y
def mxpotontheline(points:list):
uf_x = UF(points,lambda p1,p2:p1.x == p2.x)
uf_y = UF(points,lambda p1,p2:p1.y == p2.y)
uf_x.grouping()
ans = 0
for k,v in uf_x.groups.items():
ans = max(ans,len(v))
uf_y.grouping()
for k,v in uf_y.groups.items():
ans = max(ans,len(v))
return ans
def cal_slope(p1,p2):
return Decimal(p1.y -p2.y) / Decimal(p1.x - p2.x) if p1.x != p2.x else float('inf')
def mxpotontheline2(points:list):
if len(points) < 1:
return 0
if len(points) == 2:
return 2
ans = 1
from collections import defaultdict
for i in range(len(points)):
d = defaultdict(int)
same = 0
for j in range(i+1,len(points)):
if points[i].x == points[j].x and points[i].y == points[j].y:
same += 1
else:
d[cal_slope(points[i],points[j])] += 1
if not d:
d[float('inf')] = 0
for key in d:
d[key] += same
print(d)
ans = max(ans,max(d.values())+1) if d else ans
return ans
if __name__ == '__main':
from decimal import Decimal
d = defaultdict(int)
print(mxpotontheline2([Point(0,0),Point(94911151,94911150),Point(94911152,94911151)]))
x = Decimal(94911150) /Decimal(94911151)
y = Decimal(949111500) /Decimal(949111510) | Mr-cpc/idea_wirkspace | learnp/basic/bupt_2018_1_19/mxpoontheline.py | mxpoontheline.py | py | 1,897 | python | en | code | 0 | github-code | 36 |
22568917957 | from .workspace import get_workspace_location, get_workspace_state, resolve_this
from .cache import Cache
from .config import Config
from .resolver import find_dependees
from .ui import warning, fatal, show_conflicts
from .cmd_git import has_package_path, get_head_branch
from .util import iteritems, yaml_dump
from pygit2 import Repository
import os
def compute_git_subdir(name, used_paths):
index = 1
result = name
while result in used_paths:
index += 1
result = "%s-%d" % (name, index)
used_paths.add(result)
return result
def get_current_remote(path):
repo = Repository(os.path.join(path, ".git"))
if not repo.remotes:
warning("no remote found for Git repository in %s\n" % path)
return None, None
head_branch = get_head_branch(repo)
tracking_branch = head_branch.upstream if head_branch else None
remote_name = tracking_branch.remote_name if tracking_branch else None
remote = repo.remotes[remote_name] if remote_name else repo.remotes[0]
url = remote.url
version = None
if tracking_branch:
b = tracking_branch.branch_name
if b.startswith(remote_name + "/"):
b = b[len(remote_name) + 1:]
version = b
return url, version
def run(args):
wsdir = get_workspace_location(args.workspace)
config = Config(wsdir)
cache = Cache(wsdir)
if args.offline is None:
args.offline = config.get("offline_mode", False)
if args.offline:
warning("offline mode. Run 'rosrepo config --online' to disable\n")
ws_state = get_workspace_state(wsdir, config, cache, offline_mode=args.offline)
if args.this:
args.packages = resolve_this(wsdir, ws_state)
if args.all:
args.packages = ws_state.ws_packages.keys()
if not args.packages:
args.packages = config.get("default_build", []) + config.get("pinned_build", [])
protocol = args.protocol or config.get("git_default_transport", "ssh")
depends, _, conflicts = find_dependees(args.packages, ws_state)
show_conflicts(conflicts)
if conflicts:
fatal("cannot resolve dependencies\n")
paths = set()
remote_projects = set()
for name, pkg in iteritems(depends):
if hasattr(pkg, "workspace_path") and pkg.workspace_path is not None:
paths.add(pkg.workspace_path)
elif name in ws_state.remote_packages:
remote_projects.add(pkg.project)
ws_projects = set([p for p in ws_state.ws_projects if has_package_path(p, paths)])
other_git = set([g for g in ws_state.other_git if has_package_path(g, paths)])
yaml = []
for prj in ws_projects:
url, version = get_current_remote(os.path.join(wsdir, "src", prj.workspace_path))
if args.protocol:
url = prj.url[args.protocol]
packages = {}
for p in prj.packages:
if p.manifest.name in depends.keys():
packages[p.manifest.name] = p.project_path or "."
meta = {"packages": packages}
d = {"local-name": prj.workspace_path, "uri": url, "meta": meta}
if version:
d["version"] = version
yaml.append({"git": d})
for p in other_git:
url, version = get_current_remote(os.path.join(wsdir, "src", p))
d = {"local-name": p, "uri": url}
if version:
d["version"] = version
yaml.append({"git": d})
for prj in remote_projects:
packages = {}
for p in prj.packages:
if p.manifest.name in depends.keys():
packages[p.manifest.name] = p.project_path or "."
meta = {"packages": packages}
d = {"local-name": compute_git_subdir(prj.server_path, paths), "uri": prj.url[protocol], "version": prj.master_branch, "meta": meta}
yaml.append({"git": d})
if yaml:
args.output.write(yaml_dump(yaml, encoding="UTF-8", default_flow_style=False))
| fkie/rosrepo | src/rosrepo/cmd_export.py | cmd_export.py | py | 3,924 | python | en | code | 5 | github-code | 36 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.