blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 281 | content_id stringlengths 40 40 | detected_licenses listlengths 0 57 | license_type stringclasses 2
values | repo_name stringlengths 6 116 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 313
values | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 18.2k 668M ⌀ | star_events_count int64 0 102k | fork_events_count int64 0 38.2k | gha_license_id stringclasses 17
values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 107
values | src_encoding stringclasses 20
values | language stringclasses 1
value | is_vendor bool 2
classes | is_generated bool 2
classes | length_bytes int64 4 6.02M | extension stringclasses 78
values | content stringlengths 2 6.02M | authors listlengths 1 1 | author stringlengths 0 175 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
edb0dd2db6e36653e6d52a2316956c6790eed846 | e3fe44de92acb664c65dfc53c4dc7450ea994c81 | /utils/fast_inverse.py | 1ede5e863c4570d8c6bc7af2a213ff1c3677bc66 | [] | no_license | chixma/Visual-Template-Free-Form-Parsing | a723022857b1a9adb641d1f9c9f39564ed65f67f | b05e80f5fbc5b1296ab1e4b968ae206b7e556e4b | refs/heads/master | 2020-07-22T13:27:39.873315 | 2019-09-06T20:42:32 | 2019-09-06T20:42:32 | 207,217,082 | 1 | 0 | null | 2019-09-09T03:33:43 | 2019-09-09T03:33:43 | null | UTF-8 | Python | false | false | 1,420 | py | import numpy as np
import torch
def adjoint(A):
"""compute inverse without division by det; ...xv3xc3 input, or array of matrices assumed"""
AI = np.empty_like(A)
for i in xrange(3):
AI[...,i,:] = np.cross(A[...,i-2,:], A[...,i-1,:])
return AI
def inverse_transpose(A):
"""
efficiently compute the inverse-transpose for stack of 3x3 matrices
"""
I = adjoint(A)
det = dot(I, A).mean(axis=-1)
return I / det[...,None,None]
def inverse(A):
"""inverse of a stack of 3x3 matrices"""
return np.swapaxes( inverse_transpose(A), -1,-2)
def dot(A, B):
"""dot arrays of vecs; contract over last indices"""
return np.einsum('...i,...i->...', A, B)
def adjoint_torch(A):
AI = A.clone()
for i in xrange(3):
AI[...,i,:] = torch.cross(A[...,i-2,:], A[...,i-1,:])
return AI
def inverse_transpose_torch(A):
I = adjoint_torch(A)
det = dot_torch(I, A).mean(dim=-1)
return I / det[:,None,None]
def inverse_torch(A):
return inverse_transpose_torch(A).transpose(1, 2)
def dot_torch(A, B):
A_view = A.view(-1,1,3)
B_view = B.contiguous().view(-1,3,1)
out = torch.bmm(A_view, B_view)
out_view = out.view(A.size()[:-1])
return out_view
if __name__ == "__main__":
A = np.random.rand(2,3,3)
I = inverse(A)
A_torch = torch.from_numpy(A)
I_torch = inverse_torch(A_torch)
print(I)
print(I_torch)
| [
"herobd@gmail.com"
] | herobd@gmail.com |
77026452cda18ad85c5e6d02b5d3099f9aa2fe12 | 067be5ec092bc7d54d1a7baa7c1400d5aa6df7fc | /Kim2/Ksamplefun.py | ac3924d3e47194466ea574a024a9f3ce7d8ec471 | [] | no_license | haru105/Masters-projects | 0d7506a21be4f04f0455001e5cb79545aea54667 | e1ab384809a4bd4a3de5759047d36d8cf715a171 | refs/heads/main | 2023-08-26T05:27:13.431747 | 2021-11-12T13:55:07 | 2021-11-12T13:55:07 | 402,170,759 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,268 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Mar 20 22:55:44 2021
@author: amreethrajan
"""
import sys
import networkx as nx
import matplotlib.pyplot as plt
import random
import networkx.algorithms.graphical as funs
import copy
import binarySearch as bs
class KimG:
def __init__(self,n,degrees):
self.number_of_nodes=n
self.DegList=degrees
self.DegList.sort(reverse=True)
self.NodeList=[]
self.G=nx.Graph()
for i in range(1,n+1):
self.NodeList.append(i)
self.G.add_nodes_from(self.NodeList)
self.DS=[]
#DS is a double list with nodes and degrees
for i in range(0,self.number_of_nodes):
self.DS.append([self.NodeList[i],self.DegList[i]])
if funs.is_valid_degree_sequence_erdos_gallai(self.DegList)==True:
self.SampGen()
nx.draw(self.G,with_labels=True, node_color="white",edgecolors='black', font_weight='bold')
plt.title("Kim's sampling Graph")
plt.show()
else:
print("The inputted sequence is not graphical and cannot be processed")
def SampGen(self):
self.DS1=copy.copy(self.DS)
saturation=0
hub=0
chosen_node=0
#the loop goes on until saturation
while saturation==False:
allowed_nodes=[]
forbidden_nodes=[]
#First node is assigned as hub node, and is added to the forbidden list
forbidden_nodes.append(self.DS[0][0])
hub=self.DS[0][0]
print("\n\nHub node: ",hub)
print("Degree sequence before making any connections: ",self.DS)
print("Connection #1 i.e. the straightforward one:")
#The first connection to the hub node never causes failure
#So, we pick a random node and make connections
#Everything other than the first node is added to the allowed list
allowed_nodes=[x[0] for x in self.DS[1:]]
#Node is randomly picked from the allowed list
chosen_node=random.choice(allowed_nodes)
print(chosen_node," is the chosen node from the list ",allowed_nodes)
#Locate the randomly chosen node in the double list DS
#and reduce the degrees of the hub node and the chosen node
for i in self.DS:
if i[0]==chosen_node:
i[1]-=1
self.DS[0][1]-=1
break
#Introduce edge between hub node and chosen node
self.G.add_edge(hub,int(chosen_node))
#Add chosen node to the forbidden nodes as well
forbidden_nodes.append(chosen_node)
print("Connection successful")
print("Forbidden nodes: ",forbidden_nodes)
hub_degree=self.DS[0][1]
#This loop goes on until the hub node is saturated
while hub_degree!=0:
print("-----------------------------")
print("The degree to be satisfied: ",hub_degree)
#reset allowed nodes
allowed_nodes=[]
non_forbidden_nodes=[]
#first collect nodes that are not in the forbidden list
for i in self.DS:
if (i[0] not in forbidden_nodes) and (i[1]!=0):
non_forbidden_nodes.append(i)
print("Nodes that might be eligible for connection ",[x[0] for x in non_forbidden_nodes])
non_forbidden_nodes.sort(key=lambda x:x[1], reverse=True)
leftmost_set=[]
DS2=copy.deepcopy(self.DS)
if hub_degree!=1:
#build leftmost set [if degree of the hub node is three,
#then in a non increasing list, the first three nodes
#form the leftmost set. leftmost nodes don't cause failure
#, so they need to be removed to locate where the failure
# happens]
for i in range(0,hub_degree):
leftmost_set.append(non_forbidden_nodes.pop(0))
#we connect the hub node to all but one nodes from the leftmost
#set so that the hub node's degree is 1
for i in self.DS:
if i in leftmost_set[0:(len(leftmost_set)-1)]:
self.DS[0][1]-=1
i[1]-=1
else:
#if the hub degree is already one, we don't need to make
#temporary connections
leftmost_set.append(non_forbidden_nodes.pop(0))
print("Leftmost adjacent set: ",leftmost_set)
print("Temporarily joining hub node to reduce it's degree to 1")
print("Temporarily changed degree sequence: ",self.DS)
#the bigger degree sequence
if non_forbidden_nodes!=[]:
for i in range(0,len(self.DS)):
if self.DS[i][0]==hub:
self.DS[i][1]-=1
#we reduce the hub node's degree to 0 before checking for
#failure nodes.
valid_nodes=bs.binarySearch(non_forbidden_nodes,self.DS)
#the binary search function returns the non forbidden list
#with all the failure nodes returned
else:
#sometimes there are no valid non-forbidden nodes
valid_nodes=[]
print("Leftmost set is ",leftmost_set)
leftmost_nodes=[x[0] for x in leftmost_set]
#ultimately the leftmost nodes and the other valid nodes are
#combined to make up the allowed list
print("appending ",leftmost_nodes," and ",valid_nodes)
for i in leftmost_nodes:
allowed_nodes.append(i)
if valid_nodes!=[]:
for i in valid_nodes:
allowed_nodes.append(i)
print("Allowed nodes after removing failure nodes: ",allowed_nodes)
#Restoring degree sequence to before temporary test connections
self.DS=copy.deepcopy(DS2)
#Once the allowed set is built, a node is picked at random
chosen_node=random.choice(allowed_nodes)
print("Chosen node : ",chosen_node)
#Make connection and update degree sequence
for i in self.DS:
if i[0]==chosen_node:
i[1]-=1
self.DS[0][1]-=1
break
self.G.add_edge(hub,int(chosen_node))
print("Connection successful")
#After every connection for a hub node, we add the node to the
#forbidden list
forbidden_nodes.append(chosen_node)
hub_degree=self.DS[0][1]
print("-----------------------------")
print(hub," has been fully saturated")
DS3=copy.copy(self.DS)
#Remove nodes that have been saturated
for i in range(0,len(DS3)):
if DS3[i][1]==0:
self.DS.remove(DS3[i])
self.DS.sort(key= lambda x:x[1],reverse=True)
print("Newly sorted list: ",self.DS)
self.DegList=[x[1] for x in self.DS]
#Check if all nodes are saturated
if self.DegList==len(self.DegList)*[0] or self.DegList==[]:
saturation=True
print("All nodes have been saturated\nGraph generation done! ") | [
"noreply@github.com"
] | noreply@github.com |
548341b3609c2e37fb84bdeb89713f9e20cfe4e5 | 160213f69c7e8e7fe3286a18e5844a043f3316c0 | /sirenorder/wsgi.py | 261723f33581245f1e7bf1b53b5857064b50727c | [] | no_license | wecode-bootcamp-korea/siren-order-backend | beb355fa911075a1912e6600ecadfed15b79cf0b | 7d80994190a8cb3b5b97019f6e5a4e693b442f02 | refs/heads/develop | 2022-12-10T09:38:54.748469 | 2019-07-26T07:50:03 | 2019-07-26T07:50:03 | 196,929,298 | 0 | 4 | null | 2022-12-08T05:53:04 | 2019-07-15T05:22:47 | Python | UTF-8 | Python | false | false | 397 | py | """
WSGI config for sirenorder project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'sirenorder.settings')
application = get_wsgi_application()
| [
"fergith@naver.com"
] | fergith@naver.com |
40723c472ada5bfb1875a6690c1f32bc5eb18c60 | ee193c0df3494673769e107094402a28bfbeba74 | /remove_topics.py | 49749093c06a9e673d5cc0b266c106b56ab9ef08 | [] | no_license | always-akshat/summarization | a41ad675989712def846af32d49ec87a65fe826d | 25f65abfcd2301c4879e8f21e5f5d0e4973aa31c | refs/heads/master | 2020-04-14T09:25:03.713117 | 2014-10-28T05:50:40 | 2014-10-28T05:50:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 22 | py | __author__ = 'akshat'
| [
"always.akshat@gmail.com"
] | always.akshat@gmail.com |
71c6916798618aaa9ad552378b8683d49506a7c7 | ac790944937b6e33f84372ce2d16bde55a84e3b9 | /doc_builder/markdown_extractor.py | 171481e2747cbeb70475be84151c354673826557 | [] | no_license | barriebarnes/markacross | 25bcfcddb762e2234d256532d39e043cd32b1c54 | ee5cf45fd5880ae160778a05be2a8982740b5df6 | refs/heads/master | 2020-12-25T14:49:13.892101 | 2016-07-12T08:18:15 | 2016-07-12T08:18:15 | 63,105,594 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,065 | py | import fileinput
import re
import pprint
class MarkdownExtractor(object):
""" M<<<<<<<<<<<<<<<<<<<<<<<<<
# Class: MarkdownExtractor
Extracts all Markdown from a given file
>>>>>>>>>>>>>>>>>>>>>>>>>>>>>M """
def __init__(self, container):
self.container = container
def extract(self, filename):
""" M<<<<<<<<<<<<<<<<<<<<<<<<<
### Method: extract
Reads the given file and extracts and concatenates all markdown and returns it as an array of paragraphs.
Markdown is held between the following lines
```
/* M<<<<<<<<<
>>>>>>>>>M */
```
where there should be 10 or more "<" or >" signs.
N.B. the markdown between these two boundaries is referred to as a paragraph.
As most Markdown will be indented to match the surrounding code, that indentation is removed during extraction.
As some Markdown will be contained with DocBlocks with a leading "*", this too is removed during extraction.
DocBlock syntax is formatted appropriately
@param string filename
@return array - paragraphs of Markdown extracted from the given file
>>>>>>>>>>>>>>>>>>>>>>>>M """
markdown_found = False
self.markdown_paragraphs = []
paragraph = ""
line_num = 0
self.doc_block_prefix = None
docblock_formatter = self.container.get("DocBlockFormatter")
for line in fileinput.input(filename):
line_num += 1
self._check_for_docblock_start(line)
self._check_for_docblock_end(line)
if self._is_end_marker(line):
if markdown_found:
markdown_found = False
self.markdown_paragraphs.append(paragraph)
paragraph = ""
else:
self.markdown_paragraphs.append("On line %i, markdown end-marker found without a previous start-marker in file %s\n" % (line_num, filename))
elif self._is_start_marker(line):
if not markdown_found:
self._determine_start_marker_line_offset(line)
markdown_found = True
else:
self.markdown_paragraphs.append("On line %i, markdown start-marker found when expecting an end-marker in file %s\n" % (line_num, filename))
elif markdown_found:
line = self._remove_line_prefix(line)
line = docblock_formatter.reformat_line(line)
line = line.replace("\n", " \n")
paragraph = self._add_markdown_line_to_paragraph(line, paragraph)
fileinput.close
# Capture remaining markdown (if any)
if paragraph != "":
self.markdown_paragraphs.append(paragraph)
return self.markdown_paragraphs
def _check_for_docblock_start(self, line):
"""
Checks whether the given line is the start of a DocBlock and, if so, records its style
N.B. this is needed to remove (for example) stars from the start of all subsequent lines
"""
patn = re.compile('^[ \t]*\/\*\*')
match = patn.match(line)
if match:
self.doc_block_prefix = "\*"
def _check_for_docblock_end(self, line):
"""
Checks whether the given line is the end of a DocBlock and, if so, reset its style record
"""
patn = re.compile('[\s\S]*\*\/[ \t]*[\r\n]+$')
match = patn.match(line)
if match:
self.doc_block_prefix = None
def _is_start_marker(self, line):
"""
Check for a line that starts a comment and contains > 10 ">" characters
e.g. /* M<<<<<<<<<
"""
patn = re.compile('(^[\s\S]*[Mm]{1}[<]{10,})[ \t]*[\r\n]+$')
match = patn.match(line)
return (match != None)
def _is_end_marker(self, line):
"""
Check for a line that ends a comment and contains > 10 ">" characters
e.g. >>>>>>>>>M */
"""
patn = re.compile('^[ \t]*[>]{10,}[Mm]{1}[\t \S]*[\S]+[ \t]*[\r\n]+$')
match = patn.match(line)
return (match != None)
def _determine_start_marker_line_offset(self, line):
"""
Count the number of spaces before the start marker in the given line.
"""
offset = self._count_leading_white_spaces(line)
self.line_offset = offset
def _remove_line_prefix(self, line):
"""
If there's leading white space on the string, strip off as much as was found for the paragraph's start marker.
The property self.line_offset was set to this value.
Also remove any doc_block prefix that maybe applied
"""
leading_spaces_count = self._count_leading_white_spaces(line)
min_offset = min(leading_spaces_count, self.line_offset)
line = line[min_offset:]
if self.doc_block_prefix == None:
return line
else:
patn = re.compile('^\s*' + self.doc_block_prefix + '[ ]?([\s\S]*)$')
match = patn.match(line)
if match == None:
return line
else:
return match.group(1)
def _add_markdown_line_to_paragraph(self, line, paragraph):
"""
Add the given line to the given paragraph appending a line feed in the process.
"""
return paragraph + line
def _count_leading_white_spaces(self, line):
"""
Determine the number of spaces at the start of the given line.
Expand tabs into 4 spaces (might not always be the case but at least internally consistent)
"""
patn = re.compile('^(\s*)[\s\S]*$')
match = patn.match(line)
starting_spaces = match.group(1)
starting_spaces = starting_spaces.replace("\t", " ")
return len(starting_spaces)
| [
"barrie.barnes@sainsburys.co.uk"
] | barrie.barnes@sainsburys.co.uk |
77b98aa829530524b65763daa60caaacde33ebbb | 6a0ae86bca2d2ece6c92efd5594c0e3b1777ead7 | /EDBRCommon/python/datasets/summer12_MWp_350_gg_cff.py | 990eb65ae0bde2bdcbdbdfc05e23b0bd86d0be75 | [] | no_license | wangmengmeng/ExoDiBosonResonances | c4b5d277f744e1b1986df9317ac60b46d202a29f | bf5d2e79f59ad25c7a11e7f97552e2bf6a283428 | refs/heads/master | 2016-09-06T14:54:53.245508 | 2014-06-05T15:02:37 | 2014-06-05T15:02:37 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 16,971 | py | import FWCore.ParameterSet.Config as cms
readFiles = cms.untracked.vstring()
source = cms.Source("PoolSource",
noEventSort = cms.untracked.bool(True),
duplicateCheckMode = cms.untracked.string("noDuplicateCheck"),
fileNames = readFiles
)
readFiles.extend([
'/store/cmst3/group/exovv/mwang/EDBR_PATtuple_edbr_wh_20140210_Summer12MC_WprimeWH_gg_20140213_153713/mwang/EXOWH_Wprime_M350_GENSIM_V2/EDBR_PATtuple_edbr_wh_20140210/0d71bd6eec2b8c7cc5eafcee05a85e30/EXOWH_Wprime_M350_GENSIM_V2__mwang-EXOWH_Wprime_M350_AODSIM_V2-2c74483358b1f8805e5601fc325d256c__USER_10_1_BYR.root',
'/store/cmst3/group/exovv/mwang/EDBR_PATtuple_edbr_wh_20140210_Summer12MC_WprimeWH_gg_20140213_153713/mwang/EXOWH_Wprime_M350_GENSIM_V2/EDBR_PATtuple_edbr_wh_20140210/0d71bd6eec2b8c7cc5eafcee05a85e30/EXOWH_Wprime_M350_GENSIM_V2__mwang-EXOWH_Wprime_M350_AODSIM_V2-2c74483358b1f8805e5601fc325d256c__USER_11_1_mnN.root',
'/store/cmst3/group/exovv/mwang/EDBR_PATtuple_edbr_wh_20140210_Summer12MC_WprimeWH_gg_20140213_153713/mwang/EXOWH_Wprime_M350_GENSIM_V2/EDBR_PATtuple_edbr_wh_20140210/0d71bd6eec2b8c7cc5eafcee05a85e30/EXOWH_Wprime_M350_GENSIM_V2__mwang-EXOWH_Wprime_M350_AODSIM_V2-2c74483358b1f8805e5601fc325d256c__USER_12_1_p9d.root',
'/store/cmst3/group/exovv/mwang/EDBR_PATtuple_edbr_wh_20140210_Summer12MC_WprimeWH_gg_20140213_153713/mwang/EXOWH_Wprime_M350_GENSIM_V2/EDBR_PATtuple_edbr_wh_20140210/0d71bd6eec2b8c7cc5eafcee05a85e30/EXOWH_Wprime_M350_GENSIM_V2__mwang-EXOWH_Wprime_M350_AODSIM_V2-2c74483358b1f8805e5601fc325d256c__USER_13_1_mZc.root',
'/store/cmst3/group/exovv/mwang/EDBR_PATtuple_edbr_wh_20140210_Summer12MC_WprimeWH_gg_20140213_153713/mwang/EXOWH_Wprime_M350_GENSIM_V2/EDBR_PATtuple_edbr_wh_20140210/0d71bd6eec2b8c7cc5eafcee05a85e30/EXOWH_Wprime_M350_GENSIM_V2__mwang-EXOWH_Wprime_M350_AODSIM_V2-2c74483358b1f8805e5601fc325d256c__USER_14_1_I6o.root',
'/store/cmst3/group/exovv/mwang/EDBR_PATtuple_edbr_wh_20140210_Summer12MC_WprimeWH_gg_20140213_153713/mwang/EXOWH_Wprime_M350_GENSIM_V2/EDBR_PATtuple_edbr_wh_20140210/0d71bd6eec2b8c7cc5eafcee05a85e30/EXOWH_Wprime_M350_GENSIM_V2__mwang-EXOWH_Wprime_M350_AODSIM_V2-2c74483358b1f8805e5601fc325d256c__USER_15_1_NBO.root',
'/store/cmst3/group/exovv/mwang/EDBR_PATtuple_edbr_wh_20140210_Summer12MC_WprimeWH_gg_20140213_153713/mwang/EXOWH_Wprime_M350_GENSIM_V2/EDBR_PATtuple_edbr_wh_20140210/0d71bd6eec2b8c7cc5eafcee05a85e30/EXOWH_Wprime_M350_GENSIM_V2__mwang-EXOWH_Wprime_M350_AODSIM_V2-2c74483358b1f8805e5601fc325d256c__USER_16_1_L2E.root',
'/store/cmst3/group/exovv/mwang/EDBR_PATtuple_edbr_wh_20140210_Summer12MC_WprimeWH_gg_20140213_153713/mwang/EXOWH_Wprime_M350_GENSIM_V2/EDBR_PATtuple_edbr_wh_20140210/0d71bd6eec2b8c7cc5eafcee05a85e30/EXOWH_Wprime_M350_GENSIM_V2__mwang-EXOWH_Wprime_M350_AODSIM_V2-2c74483358b1f8805e5601fc325d256c__USER_17_1_WWV.root',
'/store/cmst3/group/exovv/mwang/EDBR_PATtuple_edbr_wh_20140210_Summer12MC_WprimeWH_gg_20140213_153713/mwang/EXOWH_Wprime_M350_GENSIM_V2/EDBR_PATtuple_edbr_wh_20140210/0d71bd6eec2b8c7cc5eafcee05a85e30/EXOWH_Wprime_M350_GENSIM_V2__mwang-EXOWH_Wprime_M350_AODSIM_V2-2c74483358b1f8805e5601fc325d256c__USER_18_1_O71.root',
'/store/cmst3/group/exovv/mwang/EDBR_PATtuple_edbr_wh_20140210_Summer12MC_WprimeWH_gg_20140213_153713/mwang/EXOWH_Wprime_M350_GENSIM_V2/EDBR_PATtuple_edbr_wh_20140210/0d71bd6eec2b8c7cc5eafcee05a85e30/EXOWH_Wprime_M350_GENSIM_V2__mwang-EXOWH_Wprime_M350_AODSIM_V2-2c74483358b1f8805e5601fc325d256c__USER_19_1_gsK.root',
'/store/cmst3/group/exovv/mwang/EDBR_PATtuple_edbr_wh_20140210_Summer12MC_WprimeWH_gg_20140213_153713/mwang/EXOWH_Wprime_M350_GENSIM_V2/EDBR_PATtuple_edbr_wh_20140210/0d71bd6eec2b8c7cc5eafcee05a85e30/EXOWH_Wprime_M350_GENSIM_V2__mwang-EXOWH_Wprime_M350_AODSIM_V2-2c74483358b1f8805e5601fc325d256c__USER_1_1_WXk.root',
'/store/cmst3/group/exovv/mwang/EDBR_PATtuple_edbr_wh_20140210_Summer12MC_WprimeWH_gg_20140213_153713/mwang/EXOWH_Wprime_M350_GENSIM_V2/EDBR_PATtuple_edbr_wh_20140210/0d71bd6eec2b8c7cc5eafcee05a85e30/EXOWH_Wprime_M350_GENSIM_V2__mwang-EXOWH_Wprime_M350_AODSIM_V2-2c74483358b1f8805e5601fc325d256c__USER_20_1_J7a.root',
'/store/cmst3/group/exovv/mwang/EDBR_PATtuple_edbr_wh_20140210_Summer12MC_WprimeWH_gg_20140213_153713/mwang/EXOWH_Wprime_M350_GENSIM_V2/EDBR_PATtuple_edbr_wh_20140210/0d71bd6eec2b8c7cc5eafcee05a85e30/EXOWH_Wprime_M350_GENSIM_V2__mwang-EXOWH_Wprime_M350_AODSIM_V2-2c74483358b1f8805e5601fc325d256c__USER_21_1_mmN.root',
'/store/cmst3/group/exovv/mwang/EDBR_PATtuple_edbr_wh_20140210_Summer12MC_WprimeWH_gg_20140213_153713/mwang/EXOWH_Wprime_M350_GENSIM_V2/EDBR_PATtuple_edbr_wh_20140210/0d71bd6eec2b8c7cc5eafcee05a85e30/EXOWH_Wprime_M350_GENSIM_V2__mwang-EXOWH_Wprime_M350_AODSIM_V2-2c74483358b1f8805e5601fc325d256c__USER_22_1_I5O.root',
'/store/cmst3/group/exovv/mwang/EDBR_PATtuple_edbr_wh_20140210_Summer12MC_WprimeWH_gg_20140213_153713/mwang/EXOWH_Wprime_M350_GENSIM_V2/EDBR_PATtuple_edbr_wh_20140210/0d71bd6eec2b8c7cc5eafcee05a85e30/EXOWH_Wprime_M350_GENSIM_V2__mwang-EXOWH_Wprime_M350_AODSIM_V2-2c74483358b1f8805e5601fc325d256c__USER_23_1_Viu.root',
'/store/cmst3/group/exovv/mwang/EDBR_PATtuple_edbr_wh_20140210_Summer12MC_WprimeWH_gg_20140213_153713/mwang/EXOWH_Wprime_M350_GENSIM_V2/EDBR_PATtuple_edbr_wh_20140210/0d71bd6eec2b8c7cc5eafcee05a85e30/EXOWH_Wprime_M350_GENSIM_V2__mwang-EXOWH_Wprime_M350_AODSIM_V2-2c74483358b1f8805e5601fc325d256c__USER_24_1_Voj.root',
'/store/cmst3/group/exovv/mwang/EDBR_PATtuple_edbr_wh_20140210_Summer12MC_WprimeWH_gg_20140213_153713/mwang/EXOWH_Wprime_M350_GENSIM_V2/EDBR_PATtuple_edbr_wh_20140210/0d71bd6eec2b8c7cc5eafcee05a85e30/EXOWH_Wprime_M350_GENSIM_V2__mwang-EXOWH_Wprime_M350_AODSIM_V2-2c74483358b1f8805e5601fc325d256c__USER_25_1_uHN.root',
'/store/cmst3/group/exovv/mwang/EDBR_PATtuple_edbr_wh_20140210_Summer12MC_WprimeWH_gg_20140213_153713/mwang/EXOWH_Wprime_M350_GENSIM_V2/EDBR_PATtuple_edbr_wh_20140210/0d71bd6eec2b8c7cc5eafcee05a85e30/EXOWH_Wprime_M350_GENSIM_V2__mwang-EXOWH_Wprime_M350_AODSIM_V2-2c74483358b1f8805e5601fc325d256c__USER_26_1_uew.root',
'/store/cmst3/group/exovv/mwang/EDBR_PATtuple_edbr_wh_20140210_Summer12MC_WprimeWH_gg_20140213_153713/mwang/EXOWH_Wprime_M350_GENSIM_V2/EDBR_PATtuple_edbr_wh_20140210/0d71bd6eec2b8c7cc5eafcee05a85e30/EXOWH_Wprime_M350_GENSIM_V2__mwang-EXOWH_Wprime_M350_AODSIM_V2-2c74483358b1f8805e5601fc325d256c__USER_27_1_Gaa.root',
'/store/cmst3/group/exovv/mwang/EDBR_PATtuple_edbr_wh_20140210_Summer12MC_WprimeWH_gg_20140213_153713/mwang/EXOWH_Wprime_M350_GENSIM_V2/EDBR_PATtuple_edbr_wh_20140210/0d71bd6eec2b8c7cc5eafcee05a85e30/EXOWH_Wprime_M350_GENSIM_V2__mwang-EXOWH_Wprime_M350_AODSIM_V2-2c74483358b1f8805e5601fc325d256c__USER_28_1_e7z.root',
'/store/cmst3/group/exovv/mwang/EDBR_PATtuple_edbr_wh_20140210_Summer12MC_WprimeWH_gg_20140213_153713/mwang/EXOWH_Wprime_M350_GENSIM_V2/EDBR_PATtuple_edbr_wh_20140210/0d71bd6eec2b8c7cc5eafcee05a85e30/EXOWH_Wprime_M350_GENSIM_V2__mwang-EXOWH_Wprime_M350_AODSIM_V2-2c74483358b1f8805e5601fc325d256c__USER_29_1_FfO.root',
'/store/cmst3/group/exovv/mwang/EDBR_PATtuple_edbr_wh_20140210_Summer12MC_WprimeWH_gg_20140213_153713/mwang/EXOWH_Wprime_M350_GENSIM_V2/EDBR_PATtuple_edbr_wh_20140210/0d71bd6eec2b8c7cc5eafcee05a85e30/EXOWH_Wprime_M350_GENSIM_V2__mwang-EXOWH_Wprime_M350_AODSIM_V2-2c74483358b1f8805e5601fc325d256c__USER_2_1_HVa.root',
'/store/cmst3/group/exovv/mwang/EDBR_PATtuple_edbr_wh_20140210_Summer12MC_WprimeWH_gg_20140213_153713/mwang/EXOWH_Wprime_M350_GENSIM_V2/EDBR_PATtuple_edbr_wh_20140210/0d71bd6eec2b8c7cc5eafcee05a85e30/EXOWH_Wprime_M350_GENSIM_V2__mwang-EXOWH_Wprime_M350_AODSIM_V2-2c74483358b1f8805e5601fc325d256c__USER_30_1_gOp.root',
'/store/cmst3/group/exovv/mwang/EDBR_PATtuple_edbr_wh_20140210_Summer12MC_WprimeWH_gg_20140213_153713/mwang/EXOWH_Wprime_M350_GENSIM_V2/EDBR_PATtuple_edbr_wh_20140210/0d71bd6eec2b8c7cc5eafcee05a85e30/EXOWH_Wprime_M350_GENSIM_V2__mwang-EXOWH_Wprime_M350_AODSIM_V2-2c74483358b1f8805e5601fc325d256c__USER_31_1_V1o.root',
'/store/cmst3/group/exovv/mwang/EDBR_PATtuple_edbr_wh_20140210_Summer12MC_WprimeWH_gg_20140213_153713/mwang/EXOWH_Wprime_M350_GENSIM_V2/EDBR_PATtuple_edbr_wh_20140210/0d71bd6eec2b8c7cc5eafcee05a85e30/EXOWH_Wprime_M350_GENSIM_V2__mwang-EXOWH_Wprime_M350_AODSIM_V2-2c74483358b1f8805e5601fc325d256c__USER_32_1_c1S.root',
'/store/cmst3/group/exovv/mwang/EDBR_PATtuple_edbr_wh_20140210_Summer12MC_WprimeWH_gg_20140213_153713/mwang/EXOWH_Wprime_M350_GENSIM_V2/EDBR_PATtuple_edbr_wh_20140210/0d71bd6eec2b8c7cc5eafcee05a85e30/EXOWH_Wprime_M350_GENSIM_V2__mwang-EXOWH_Wprime_M350_AODSIM_V2-2c74483358b1f8805e5601fc325d256c__USER_33_1_rI6.root',
'/store/cmst3/group/exovv/mwang/EDBR_PATtuple_edbr_wh_20140210_Summer12MC_WprimeWH_gg_20140213_153713/mwang/EXOWH_Wprime_M350_GENSIM_V2/EDBR_PATtuple_edbr_wh_20140210/0d71bd6eec2b8c7cc5eafcee05a85e30/EXOWH_Wprime_M350_GENSIM_V2__mwang-EXOWH_Wprime_M350_AODSIM_V2-2c74483358b1f8805e5601fc325d256c__USER_34_1_JGi.root',
'/store/cmst3/group/exovv/mwang/EDBR_PATtuple_edbr_wh_20140210_Summer12MC_WprimeWH_gg_20140213_153713/mwang/EXOWH_Wprime_M350_GENSIM_V2/EDBR_PATtuple_edbr_wh_20140210/0d71bd6eec2b8c7cc5eafcee05a85e30/EXOWH_Wprime_M350_GENSIM_V2__mwang-EXOWH_Wprime_M350_AODSIM_V2-2c74483358b1f8805e5601fc325d256c__USER_35_1_agq.root',
'/store/cmst3/group/exovv/mwang/EDBR_PATtuple_edbr_wh_20140210_Summer12MC_WprimeWH_gg_20140213_153713/mwang/EXOWH_Wprime_M350_GENSIM_V2/EDBR_PATtuple_edbr_wh_20140210/0d71bd6eec2b8c7cc5eafcee05a85e30/EXOWH_Wprime_M350_GENSIM_V2__mwang-EXOWH_Wprime_M350_AODSIM_V2-2c74483358b1f8805e5601fc325d256c__USER_36_1_Yfb.root',
'/store/cmst3/group/exovv/mwang/EDBR_PATtuple_edbr_wh_20140210_Summer12MC_WprimeWH_gg_20140213_153713/mwang/EXOWH_Wprime_M350_GENSIM_V2/EDBR_PATtuple_edbr_wh_20140210/0d71bd6eec2b8c7cc5eafcee05a85e30/EXOWH_Wprime_M350_GENSIM_V2__mwang-EXOWH_Wprime_M350_AODSIM_V2-2c74483358b1f8805e5601fc325d256c__USER_37_1_TY7.root',
'/store/cmst3/group/exovv/mwang/EDBR_PATtuple_edbr_wh_20140210_Summer12MC_WprimeWH_gg_20140213_153713/mwang/EXOWH_Wprime_M350_GENSIM_V2/EDBR_PATtuple_edbr_wh_20140210/0d71bd6eec2b8c7cc5eafcee05a85e30/EXOWH_Wprime_M350_GENSIM_V2__mwang-EXOWH_Wprime_M350_AODSIM_V2-2c74483358b1f8805e5601fc325d256c__USER_38_1_zd0.root',
'/store/cmst3/group/exovv/mwang/EDBR_PATtuple_edbr_wh_20140210_Summer12MC_WprimeWH_gg_20140213_153713/mwang/EXOWH_Wprime_M350_GENSIM_V2/EDBR_PATtuple_edbr_wh_20140210/0d71bd6eec2b8c7cc5eafcee05a85e30/EXOWH_Wprime_M350_GENSIM_V2__mwang-EXOWH_Wprime_M350_AODSIM_V2-2c74483358b1f8805e5601fc325d256c__USER_39_1_L7L.root',
'/store/cmst3/group/exovv/mwang/EDBR_PATtuple_edbr_wh_20140210_Summer12MC_WprimeWH_gg_20140213_153713/mwang/EXOWH_Wprime_M350_GENSIM_V2/EDBR_PATtuple_edbr_wh_20140210/0d71bd6eec2b8c7cc5eafcee05a85e30/EXOWH_Wprime_M350_GENSIM_V2__mwang-EXOWH_Wprime_M350_AODSIM_V2-2c74483358b1f8805e5601fc325d256c__USER_3_1_Scl.root',
'/store/cmst3/group/exovv/mwang/EDBR_PATtuple_edbr_wh_20140210_Summer12MC_WprimeWH_gg_20140213_153713/mwang/EXOWH_Wprime_M350_GENSIM_V2/EDBR_PATtuple_edbr_wh_20140210/0d71bd6eec2b8c7cc5eafcee05a85e30/EXOWH_Wprime_M350_GENSIM_V2__mwang-EXOWH_Wprime_M350_AODSIM_V2-2c74483358b1f8805e5601fc325d256c__USER_40_1_cVm.root',
'/store/cmst3/group/exovv/mwang/EDBR_PATtuple_edbr_wh_20140210_Summer12MC_WprimeWH_gg_20140213_153713/mwang/EXOWH_Wprime_M350_GENSIM_V2/EDBR_PATtuple_edbr_wh_20140210/0d71bd6eec2b8c7cc5eafcee05a85e30/EXOWH_Wprime_M350_GENSIM_V2__mwang-EXOWH_Wprime_M350_AODSIM_V2-2c74483358b1f8805e5601fc325d256c__USER_41_1_Wg0.root',
'/store/cmst3/group/exovv/mwang/EDBR_PATtuple_edbr_wh_20140210_Summer12MC_WprimeWH_gg_20140213_153713/mwang/EXOWH_Wprime_M350_GENSIM_V2/EDBR_PATtuple_edbr_wh_20140210/0d71bd6eec2b8c7cc5eafcee05a85e30/EXOWH_Wprime_M350_GENSIM_V2__mwang-EXOWH_Wprime_M350_AODSIM_V2-2c74483358b1f8805e5601fc325d256c__USER_42_1_ffs.root',
'/store/cmst3/group/exovv/mwang/EDBR_PATtuple_edbr_wh_20140210_Summer12MC_WprimeWH_gg_20140213_153713/mwang/EXOWH_Wprime_M350_GENSIM_V2/EDBR_PATtuple_edbr_wh_20140210/0d71bd6eec2b8c7cc5eafcee05a85e30/EXOWH_Wprime_M350_GENSIM_V2__mwang-EXOWH_Wprime_M350_AODSIM_V2-2c74483358b1f8805e5601fc325d256c__USER_43_1_heS.root',
'/store/cmst3/group/exovv/mwang/EDBR_PATtuple_edbr_wh_20140210_Summer12MC_WprimeWH_gg_20140213_153713/mwang/EXOWH_Wprime_M350_GENSIM_V2/EDBR_PATtuple_edbr_wh_20140210/0d71bd6eec2b8c7cc5eafcee05a85e30/EXOWH_Wprime_M350_GENSIM_V2__mwang-EXOWH_Wprime_M350_AODSIM_V2-2c74483358b1f8805e5601fc325d256c__USER_44_1_46A.root',
'/store/cmst3/group/exovv/mwang/EDBR_PATtuple_edbr_wh_20140210_Summer12MC_WprimeWH_gg_20140213_153713/mwang/EXOWH_Wprime_M350_GENSIM_V2/EDBR_PATtuple_edbr_wh_20140210/0d71bd6eec2b8c7cc5eafcee05a85e30/EXOWH_Wprime_M350_GENSIM_V2__mwang-EXOWH_Wprime_M350_AODSIM_V2-2c74483358b1f8805e5601fc325d256c__USER_45_1_YhP.root',
'/store/cmst3/group/exovv/mwang/EDBR_PATtuple_edbr_wh_20140210_Summer12MC_WprimeWH_gg_20140213_153713/mwang/EXOWH_Wprime_M350_GENSIM_V2/EDBR_PATtuple_edbr_wh_20140210/0d71bd6eec2b8c7cc5eafcee05a85e30/EXOWH_Wprime_M350_GENSIM_V2__mwang-EXOWH_Wprime_M350_AODSIM_V2-2c74483358b1f8805e5601fc325d256c__USER_46_1_OZD.root',
'/store/cmst3/group/exovv/mwang/EDBR_PATtuple_edbr_wh_20140210_Summer12MC_WprimeWH_gg_20140213_153713/mwang/EXOWH_Wprime_M350_GENSIM_V2/EDBR_PATtuple_edbr_wh_20140210/0d71bd6eec2b8c7cc5eafcee05a85e30/EXOWH_Wprime_M350_GENSIM_V2__mwang-EXOWH_Wprime_M350_AODSIM_V2-2c74483358b1f8805e5601fc325d256c__USER_47_1_f6v.root',
'/store/cmst3/group/exovv/mwang/EDBR_PATtuple_edbr_wh_20140210_Summer12MC_WprimeWH_gg_20140213_153713/mwang/EXOWH_Wprime_M350_GENSIM_V2/EDBR_PATtuple_edbr_wh_20140210/0d71bd6eec2b8c7cc5eafcee05a85e30/EXOWH_Wprime_M350_GENSIM_V2__mwang-EXOWH_Wprime_M350_AODSIM_V2-2c74483358b1f8805e5601fc325d256c__USER_48_1_A3s.root',
'/store/cmst3/group/exovv/mwang/EDBR_PATtuple_edbr_wh_20140210_Summer12MC_WprimeWH_gg_20140213_153713/mwang/EXOWH_Wprime_M350_GENSIM_V2/EDBR_PATtuple_edbr_wh_20140210/0d71bd6eec2b8c7cc5eafcee05a85e30/EXOWH_Wprime_M350_GENSIM_V2__mwang-EXOWH_Wprime_M350_AODSIM_V2-2c74483358b1f8805e5601fc325d256c__USER_49_1_bqW.root',
'/store/cmst3/group/exovv/mwang/EDBR_PATtuple_edbr_wh_20140210_Summer12MC_WprimeWH_gg_20140213_153713/mwang/EXOWH_Wprime_M350_GENSIM_V2/EDBR_PATtuple_edbr_wh_20140210/0d71bd6eec2b8c7cc5eafcee05a85e30/EXOWH_Wprime_M350_GENSIM_V2__mwang-EXOWH_Wprime_M350_AODSIM_V2-2c74483358b1f8805e5601fc325d256c__USER_4_1_dUY.root',
'/store/cmst3/group/exovv/mwang/EDBR_PATtuple_edbr_wh_20140210_Summer12MC_WprimeWH_gg_20140213_153713/mwang/EXOWH_Wprime_M350_GENSIM_V2/EDBR_PATtuple_edbr_wh_20140210/0d71bd6eec2b8c7cc5eafcee05a85e30/EXOWH_Wprime_M350_GENSIM_V2__mwang-EXOWH_Wprime_M350_AODSIM_V2-2c74483358b1f8805e5601fc325d256c__USER_50_1_GNz.root',
'/store/cmst3/group/exovv/mwang/EDBR_PATtuple_edbr_wh_20140210_Summer12MC_WprimeWH_gg_20140213_153713/mwang/EXOWH_Wprime_M350_GENSIM_V2/EDBR_PATtuple_edbr_wh_20140210/0d71bd6eec2b8c7cc5eafcee05a85e30/EXOWH_Wprime_M350_GENSIM_V2__mwang-EXOWH_Wprime_M350_AODSIM_V2-2c74483358b1f8805e5601fc325d256c__USER_51_1_bYJ.root',
'/store/cmst3/group/exovv/mwang/EDBR_PATtuple_edbr_wh_20140210_Summer12MC_WprimeWH_gg_20140213_153713/mwang/EXOWH_Wprime_M350_GENSIM_V2/EDBR_PATtuple_edbr_wh_20140210/0d71bd6eec2b8c7cc5eafcee05a85e30/EXOWH_Wprime_M350_GENSIM_V2__mwang-EXOWH_Wprime_M350_AODSIM_V2-2c74483358b1f8805e5601fc325d256c__USER_5_1_QW1.root',
'/store/cmst3/group/exovv/mwang/EDBR_PATtuple_edbr_wh_20140210_Summer12MC_WprimeWH_gg_20140213_153713/mwang/EXOWH_Wprime_M350_GENSIM_V2/EDBR_PATtuple_edbr_wh_20140210/0d71bd6eec2b8c7cc5eafcee05a85e30/EXOWH_Wprime_M350_GENSIM_V2__mwang-EXOWH_Wprime_M350_AODSIM_V2-2c74483358b1f8805e5601fc325d256c__USER_6_1_U9k.root',
'/store/cmst3/group/exovv/mwang/EDBR_PATtuple_edbr_wh_20140210_Summer12MC_WprimeWH_gg_20140213_153713/mwang/EXOWH_Wprime_M350_GENSIM_V2/EDBR_PATtuple_edbr_wh_20140210/0d71bd6eec2b8c7cc5eafcee05a85e30/EXOWH_Wprime_M350_GENSIM_V2__mwang-EXOWH_Wprime_M350_AODSIM_V2-2c74483358b1f8805e5601fc325d256c__USER_7_1_KVy.root',
'/store/cmst3/group/exovv/mwang/EDBR_PATtuple_edbr_wh_20140210_Summer12MC_WprimeWH_gg_20140213_153713/mwang/EXOWH_Wprime_M350_GENSIM_V2/EDBR_PATtuple_edbr_wh_20140210/0d71bd6eec2b8c7cc5eafcee05a85e30/EXOWH_Wprime_M350_GENSIM_V2__mwang-EXOWH_Wprime_M350_AODSIM_V2-2c74483358b1f8805e5601fc325d256c__USER_8_1_OQf.root',
'/store/cmst3/group/exovv/mwang/EDBR_PATtuple_edbr_wh_20140210_Summer12MC_WprimeWH_gg_20140213_153713/mwang/EXOWH_Wprime_M350_GENSIM_V2/EDBR_PATtuple_edbr_wh_20140210/0d71bd6eec2b8c7cc5eafcee05a85e30/EXOWH_Wprime_M350_GENSIM_V2__mwang-EXOWH_Wprime_M350_AODSIM_V2-2c74483358b1f8805e5601fc325d256c__USER_9_1_mjs.root',
] )
| [
"mengmeng.wang@cern.ch"
] | mengmeng.wang@cern.ch |
b083a4b112bddf7fa409807323a683b89426fb47 | 871bd3a9a1cea64b1c61d351dec3e2400a6bc411 | /pymetabolism/tests/test_singletonmixin.py | 135ad064e0a339b3ca95f8849c9613ad9f1cc74d | [
"BSD-2-Clause",
"BSD-3-Clause"
] | permissive | Midnighter/pymetabolism | 16aac5d2ff37b0b62e885439fddc732990e492d4 | 20773e6b3e722a7c353171211ceedb019b012951 | refs/heads/master | 2020-04-06T06:52:21.763077 | 2012-05-29T10:01:41 | 2012-05-29T10:01:41 | 1,536,961 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 8,550 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
===============
Singleton Tests
===============
:Authors:
Moritz Emanuel Beber
:Date:
2011-08-02
:Copyright:
Copyright(c) 2011 Jacobs University of Bremen. All rights reserved.
:File:
test_singletonmixin.py
"""
import os
import threading
import time
import unittest
import nose.tools as nt
from ..singletonmixin import Singleton
from ..singletonmixin import SingletonException
from ..singletonmixin import forgetAllSingletons
class singletonmixin_Public_TestCase(unittest.TestCase):
def testReturnsSameObject(self):
"""
Demonstrates normal use -- just call getInstance and it returns a singleton instance
"""
class A(Singleton):
def __init__(self):
super(A, self).__init__()
a1 = A.getInstance()
a2 = A.getInstance()
self.assertEquals(id(a1), id(a2))
def testInstantiateWithMultiArgConstructor(self):
"""
If the singleton needs args to construct, include them in the first
call to get instances.
"""
class B(Singleton):
def __init__(self, arg1, arg2):
super(B, self).__init__()
self.arg1 = arg1
self.arg2 = arg2
b1 = B.getInstance('arg1 value', 'arg2 value')
b2 = B.getInstance()
self.assertEquals(b1.arg1, 'arg1 value')
self.assertEquals(b1.arg2, 'arg2 value')
self.assertEquals(id(b1), id(b2))
def testInstantiateWithKeywordArg(self):
class B(Singleton):
def __init__(self, arg1=5):
super(B, self).__init__()
self.arg1 = arg1
b1 = B.getInstance('arg1 value')
b2 = B.getInstance()
self.assertEquals(b1.arg1, 'arg1 value')
self.assertEquals(id(b1), id(b2))
def testTryToInstantiateWithoutNeededArgs(self):
class B(Singleton):
def __init__(self, arg1, arg2):
super(B, self).__init__()
self.arg1 = arg1
self.arg2 = arg2
self.assertRaises(SingletonException, B.getInstance)
def testPassTypeErrorIfAllArgsThere(self):
"""
Make sure the test for capturing missing args doesn't interfere with a normal TypeError.
"""
class B(Singleton):
def __init__(self, arg1, arg2):
super(B, self).__init__()
self.arg1 = arg1
self.arg2 = arg2
raise TypeError, 'some type error'
self.assertRaises(TypeError, B.getInstance, 1, 2)
# def testTryToInstantiateWithoutGetInstance(self):
# """
# Demonstrates that singletons can ONLY be instantiated through
# getInstance, as long as they call Singleton.__init__ during construction.
#
# If this check is not required, you don't need to call Singleton.__init__().
# """
#
# class A(Singleton):
# def __init__(self):
# super(A, self).__init__()
#
# self.assertRaises(SingletonException, A)
def testDontAllowNew(self):
def instantiatedAnIllegalClass():
class A(Singleton):
def __init__(self):
super(A, self).__init__()
def __new__(metaclass, strName, tupBases, dct):
return super(MetaSingleton, metaclass).__new__(metaclass, strName, tupBases, dct)
self.assertRaises(SingletonException, instantiatedAnIllegalClass)
def testDontAllowArgsAfterConstruction(self):
class B(Singleton):
def __init__(self, arg1, arg2):
super(B, self).__init__()
self.arg1 = arg1
self.arg2 = arg2
B.getInstance('arg1 value', 'arg2 value')
self.assertRaises(SingletonException, B, 'arg1 value', 'arg2 value')
def test_forgetClassInstanceReferenceForTesting(self):
class A(Singleton):
def __init__(self):
super(A, self).__init__()
class B(A):
def __init__(self):
super(B, self).__init__()
# check that changing the class after forgetting the instance produces
# an instance of the new class
a = A.getInstance()
assert a.__class__.__name__ == 'A'
A._forgetClassInstanceReferenceForTesting()
b = B.getInstance()
assert b.__class__.__name__ == 'B'
# check that invoking the 'forget' on a subclass still deletes the instance
B._forgetClassInstanceReferenceForTesting()
a = A.getInstance()
B._forgetClassInstanceReferenceForTesting()
b = B.getInstance()
assert b.__class__.__name__ == 'B'
def test_forgetAllSingletons(self):
# Should work if there are no singletons
forgetAllSingletons()
class A(Singleton):
ciInitCount = 0
def __init__(self):
super(A, self).__init__()
A.ciInitCount += 1
A.getInstance()
self.assertEqual(A.ciInitCount, 1)
A.getInstance()
self.assertEqual(A.ciInitCount, 1)
forgetAllSingletons()
A.getInstance()
self.assertEqual(A.ciInitCount, 2)
def test_threadedCreation(self):
# Check that only one Singleton is created even if multiple
# threads try at the same time. If fails, would see assert in _addSingleton
class Test_Singleton(Singleton):
def __init__(self):
super(Test_Singleton, self).__init__()
class Test_SingletonThread(threading.Thread):
def __init__(self, fTargetTime):
super(Test_SingletonThread, self).__init__()
self._fTargetTime = fTargetTime
self._eException = None
def run(self):
try:
fSleepTime = self._fTargetTime - time.time()
if fSleepTime > 0:
time.sleep(fSleepTime)
Test_Singleton.getInstance()
except Exception, e:
self._eException = e
fTargetTime = time.time() + 0.1
lstThreads = []
for _ in xrange(100):
t = Test_SingletonThread(fTargetTime)
t.start()
lstThreads.append(t)
eException = None
for t in lstThreads:
t.join()
if t._eException and not eException:
eException = t._eException
if eException:
raise eException
def testNoInit(self):
"""
Demonstrates use with a class not defining __init__
"""
class A(Singleton):
pass
#INTENTIONALLY UNDEFINED:
#def __init__(self):
# super(A, self).__init__()
A.getInstance() #Make sure no exception is raised
def testMultipleGetInstancesWithArgs(self):
class A(Singleton):
ignoreSubsequent = True
def __init__(self, a, b=1):
pass
a1 = A.getInstance(1)
a2 = A.getInstance(2) # ignores the second call because of ignoreSubsequent
class B(Singleton):
def __init__(self, a, b=1):
pass
b1 = B.getInstance(1)
self.assertRaises(SingletonException, B.getInstance, 2) # No ignoreSubsequent included
class C(Singleton):
def __init__(self, a=1):
pass
c1 = C.getInstance(a=1)
self.assertRaises(SingletonException, C.getInstance, a=2) # No ignoreSubsequent included
def testInheritance(self):
"""
It's sometimes said that you can't subclass a singleton (see, for instance,
http://steve.yegge.googlepages.com/singleton-considered-stupid point e). This
test shows that at least rudimentary subclassing works fine for us.
"""
class A(Singleton):
def setX(self, x):
self.x = x
def setZ(self, z):
raise NotImplementedError
class B(A):
def setX(self, x):
self.x = -x
def setY(self, y):
self.y = y
a = A.getInstance()
a.setX(5)
b = B.getInstance()
b.setX(5)
b.setY(50)
self.assertEqual((a.x, b.x, b.y), (5, -5, 50))
self.assertRaises(AttributeError, eval, 'a.setY', {}, locals())
self.assertRaises(NotImplementedError, b.setZ, 500)
| [
"m.beber@jacobs-university.de"
] | m.beber@jacobs-university.de |
3fe25f6ea1a150f843cd43ceb8afa5891519991e | d5c5bd81e8328b544beed20cb308b7e9ba79cac1 | /manage.py | f91f5edab547685106f9e0e446d836792c299254 | [] | no_license | yazan-alshekha/django-crud | e8ea364bddf86c174d681e07ce4da22d1f4a182d | 28b8225fa2dbf2cb85254a1eb5e9ffc69e9c8437 | refs/heads/master | 2022-12-16T03:59:08.001231 | 2020-09-25T20:21:24 | 2020-09-25T20:21:24 | 297,794,374 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 682 | py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'telecommunications_project.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| [
"yazan.alshekha@outlook.com"
] | yazan.alshekha@outlook.com |
a1963fbee8f38d58fb1e69c02240634325f7240f | a2ffb742fb1396dd14a13aed7e86d05e1a819c1f | /products/migrations/0001_initial.py | 0f6ea958ad244bae07b02bb7d65c18e72380f629 | [] | no_license | Code-Institute-Submissions/mug_shots | ee46c86d02a1fb7eaf7dfdd84131d266ec28d90d | 00918862703253c32ef734c1e9f54d9bfc2c0d34 | refs/heads/master | 2023-01-06T22:44:09.153923 | 2020-10-30T23:34:08 | 2020-10-30T23:34:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,446 | py | # Generated by Django 3.1.1 on 2020-09-22 18:48
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Category',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=200)),
('friendly_name', models.CharField(blank=True, max_length=200, null=True)),
],
),
migrations.CreateModel(
name='Product',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('sku', models.CharField(blank=True, max_length=200, null=True)),
('name', models.CharField(max_length=200)),
('description', models.TextField()),
('price', models.DecimalField(blank=True, decimal_places=2, max_digits=6, null=True)),
('image_irl', models.URLField(blank=True, max_length=1024, null=True)),
('image', models.ImageField(blank=True, null=True, upload_to='')),
('category', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='products.category')),
],
),
]
| [
"amy.bru39@gmail.com"
] | amy.bru39@gmail.com |
5bf1c3fcd512c1e389e2f7280476b3433ecf2119 | c1b8ff60ed4d8c70e703f71b7c96a649a75c0cec | /ostPython1/multuple.py | c6e25d4906e5ab8d55c5aa5fce4761928a3d621c | [] | no_license | deepbsd/OST_Python | 836d4fae3d98661a60334f66af5ba3255a0cda5c | b32f83aa1b705a5ad384b73c618f04f7d2622753 | refs/heads/master | 2023-02-14T17:17:28.186060 | 2023-01-31T02:09:05 | 2023-01-31T02:09:05 | 49,534,454 | 1 | 2 | null | null | null | null | UTF-8 | Python | false | false | 685 | py | #!/usr/bin/env python3
#
#
# multiple.py
#
# Lesson 7: String Formatting
#
# by David S. Jackson
# 11/30/2014
#
# OST Python1: Beginning Python
# for Pat Barton, Instructor
#
"""
takes as data a tuple of two-element tuples, such as ((1,1), 2,2), (12,13),
(4,4), (99,98)). This and/or similar data should be hard-coded (no need for
user input). Loop over the tuple and print out the results of multiplying the
numbers together, and use string formatting to display nicely.
"""
my_tuple = ( (8, 9), (11, 13), (4, 5), (19, 23), (9, 18))
for n1, n2 in my_tuple :
print("{0:2d}{a:^5}{1:2d}{b:>4}{2:4d}".format(n1, n2, n1*n2, a="X", b="="))
| [
"deepbsd@yahoo.com"
] | deepbsd@yahoo.com |
1347ece238e08d92a8903165e9b040ea820981c3 | 9531e597cd3f865cc6b6f780498a18281c2413f8 | /user_notifications/views.py | 82f196088698131ef8e60ab25accfb76388764e8 | [] | no_license | dpitkevics/DevNet | 7133b80ce5d56b9c11aa4c500d530faed7cb13f4 | 98ebc3916346e6c2bda79711a3896f7c2a8e2ac8 | refs/heads/master | 2020-04-15T12:04:00.245848 | 2015-09-14T17:45:39 | 2015-09-14T17:45:39 | 41,320,800 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,663 | py | import json
import redis
from notifications import notify
from django.http import HttpResponse, JsonResponse
from django.contrib.auth.decorators import login_required
from django.contrib.auth.models import User
from django.db.models import Q
from django.db.models.signals import post_save
from django.dispatch import receiver
from notifications.models import Notification
from .serializers import NotificationSerializer
@login_required
def get_notifications(request):
query = request.POST.get('query', None)
notification_serializer_set = []
if query is not None:
notifications = request.user.notifications.order_by('-timestamp').filter(Q(verb__contains=query) | Q(description__contains=query)).exclude(verb="")[:5]
else:
notifications = request.user.notifications.order_by('-timestamp').all().exclude(verb="")[:5]
for notification in notifications:
notification_serializer = NotificationSerializer(notification)
notification_serializer_set.append(notification_serializer.data)
return JsonResponse(notification_serializer_set, safe=False)
@login_required
def send_notification(request):
recipient_username = request.POST.get('recipient_username', None)
if recipient_username:
recipients = User.objects.filter(username=recipient_username)
else:
recipients = User.objects.all()
for recipient in recipients:
notify.send(
request.user,
recipient=recipient,
verb=request.POST.get('verb', ''),
description=request.POST.get('description', '')
)
return HttpResponse(json.dumps({"success": True}), content_type="application/json")
@login_required
def mark_as_read(request):
request.user.notifications.unread().mark_all_as_read()
redis_client = redis.StrictRedis(host='localhost', port=6379, db=0)
for session in request.user.session_set.all():
redis_client.publish(
'notifications.%s' % session.session_key,
json.dumps({"mark_as_read": True, "unread_count": 0})
)
return HttpResponse(json.dumps({"success": True}), content_type="application/json")
@receiver(post_save, sender=Notification)
def on_notification_post_save(sender, **kwargs):
redis_client = redis.StrictRedis(host='localhost', port=6379, db=0)
notification = kwargs['instance']
recipient = notification.recipient
for session in recipient.session_set.all():
redis_client.publish(
'notifications.%s' % session.session_key,
json.dumps(dict(
count=recipient.notifications.unread().count()
))
)
| [
"daniels.pitkevics@gmail.com"
] | daniels.pitkevics@gmail.com |
25665b81cb5da57ef38a4f8531be6763ea87272c | a7f68cc4500c85d591265d40599c3e12f07e94b9 | /day-15/part-1/lucas.py | a7b18833af904608dcfaff011f245116f15088c3 | [] | no_license | vidalmaxime/adventofcode-2020 | ff9b19e941e579eed0b590b356c0e161210268fe | 67ed20552f104067c5cd6664777de56ee293f4c1 | refs/heads/master | 2023-02-05T09:44:49.234298 | 2020-12-26T15:41:24 | 2020-12-26T15:41:24 | 316,728,993 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 676 | py | from tool.runners.python import SubmissionPy
class LucasSubmission(SubmissionPy):
def run(self, s):
# :param s: input in string format
data = [int(x) for x in s.split(',')]
numbers = dict()
numbers = {**numbers, **{data[i]: [i + 1] for i in range(len(data))}}
last = data[-1]
for i in range(len(data) + 1, 2020 + 1):
if len(numbers[last]) > 1:
last = numbers[last][-1] - numbers[last][-2]
else:
last = 0
if last in numbers.keys():
numbers[last].append(i)
else:
numbers[last] = [i]
return last
| [
"lucas.stoffl@tum.de"
] | lucas.stoffl@tum.de |
0cdc5d64c675c8d17b205f04ec8710c1c8b01ace | d8c3981c4be27031f8885e28e242632c5ae34dcb | /k.application.building.blocks/ConfigParser/ConfigParser_read_many.py | 59a79b0d621c6de0016644c2241b3984678b6dd5 | [] | no_license | capricorntb/Python.Standard.Library.By.Example | 94f932b7d0c39ff6c10d772e7a7ea0f26de0bcb2 | a64bda2f8fa25cbd96c657cfe180409c2c9ee58d | refs/heads/master | 2021-04-28T21:47:17.989555 | 2016-08-02T14:03:41 | 2016-08-02T14:03:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 463 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Reading a configuration file.
"""
from ConfigParser import SafeConfigParser
import glob
parser = SafeConfigParser()
candidates = ['does_not_exist.ini', 'also-does-not-exist.ini',
'simple.ini', 'multisection.ini',
]
found = parser.read(candidates)
missing = set(candidates) - set(found)
print 'Found config files:', sorted(found)
print 'Missing files :', sorted(missing)
| [
"Adil.Zhu@Qisda.com"
] | Adil.Zhu@Qisda.com |
924e817f7df665a0dd09849ca931b75d6b22a05f | 8bcceea28f4e4345f4a0f99c3761c31abfb7a94e | /Probabilistic_model/trade_dynamics_prob_clean.py | 7f4bce4575675e158a27dc8119573112348d2ab9 | [] | no_license | jasperverschuur/Food_security_lesotho | c7b1e91da6036c486eb5d1fd88b27f85010ff534 | d3992b681809862a518678e611a1cbcaae257737 | refs/heads/master | 2022-09-08T12:12:30.440836 | 2020-06-01T19:31:20 | 2020-06-01T19:31:20 | 268,612,945 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,889 | py | #!/usr/bin/env python
import scipy as sc
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from scipy import signal
import scipy.stats as stats
import seaborn as sns
from sklearn.preprocessing import PolynomialFeatures
from sklearn.linear_model import HuberRegressor,LinearRegression
from sklearn.metrics import mean_squared_error
from math import sqrt
from sklearn.metrics import mean_absolute_error
from sklearn.metrics import r2_score
from scipy.stats import norm
from sklearn import linear_model
from scipy.stats import genextreme as gev
import netCDF4 as nc
from netCDF4 import Dataset
import scipy as sp
import statsmodels.api as sm
import statsmodels.formula.api as smf
import random
plt.rcParams["font.family"] = "Times New Roman"
data = pd.read_csv('agricultural_data.csv') ### for lag index = 26 instead of 28
L_rain = pd.read_csv('Lesotho-ERA5.csv')
SA_rain = pd.read_csv('SA-ERA5.csv')
price_data = pd.read_excel('price_SA.xlsx')[2:]
index_2007 = 26
#SA the values are respectively 2.15 (95% CI: 1.63 – 2.67) and 1.78 (range: 0.5 – 9.16) CMIP5.
m_nat_tot = []
m_nat_nt_tot = []
m_act_nt_tot = []
shor_ano_a = []
shor_ano_n = []
shor_ano_nt = []
a_nat_tot = []
a_nat_nt_tot = []
a_act_nt_tot = []
a_act_tot = []
nat_frac = []
nat_nt_frac = []
act_nt_frac = []
rain_L = []
rain_SA = []
price_a = []
price_n = []
price_n_nt = []
price_a_abs = []
price_n_abs = []
N = 100
RR_L_sampler = np.random.triangular(1.51,5.36, 32.5, N)
RR_SA_sampler = np.random.triangular(1.53, 4.70, 26.3, N)
RR_L_df = pd.DataFrame(RR_L_sampler).to_csv('output_L_SA/RR_L.csv',index = False)
RR_SA_df = pd.DataFrame(RR_SA_sampler).to_csv('output_L_SA/RR_SA.csv',index = False)
for i in range(0,N):
RR_L = RR_L_sampler[i]
RR_SA = RR_SA_sampler[i]
print(i,RR_L,RR_SA)
L_return = 40/RR_L
SA_return = 40/RR_SA
#L_rain = pd.read_csv('/Users/Jasper/Lesotho-ERA5.csv')
#SA_rain = pd.read_csv('/Users/Jasper/SA-ERA5.csv')
return_period = np.linspace(1,len(L_rain),len(L_rain))
return_period = return_period / (len(return_period)+1)
L_rain = L_rain.sort_values(by=['JFM_prec'])
SA_rain = SA_rain.sort_values(by=['JFM_prec'])
shape_SA, loc_SA, scale_SA = gev.fit(SA_rain['JFM_prec'])
xx_SA = np.linspace(100, 1000, 1000)
yy_SA = 1/(gev.cdf(xx_SA, shape_SA, loc_SA, scale_SA))
shape_L, loc_L, scale_L = gev.fit(L_rain['JFM_prec'])
xx_L = np.linspace(100, 1000, 1000)
yy_L = 1/(gev.cdf(xx_L, shape_L, loc_L, scale_L))
### find the index
id_SA_return1 = (np.abs(yy_SA-SA_return)).argmin()
val_SA_return = xx_SA[id_SA_return1]
id_L_return1 = (np.abs(yy_L-L_return)).argmin()
val_L_return= xx_L[id_L_return1]
### find the index
id_SA_return2 = (np.abs(yy_SA-40)).argmin()
val_SA_return_ACT = xx_SA[id_SA_return2]
id_L_return2 = (np.abs(yy_L-40)).argmin()
val_L_return_ACT= xx_L[id_L_return2]
production_SA_detrend = signal.detrend(data['production-SA'])
rain_SA_detrend = signal.detrend(data['rain-SA'])
rain_SA_lag1_detrend = signal.detrend(data['rain-SA-lag1'])
rain_SA_lag2_detrend = signal.detrend(data['rain-SA-lag2'])
rain_L_detrend = signal.detrend(data['rain-L'])
rain_L_lag1_detrend = signal.detrend(data['rain-L-lag1'])
rain_L_lag2_detrend = signal.detrend(data['rain-L-lag2'])
#### fit a lowess with frac 1/2 to the shortage data
x1 = np.arange(0,len(data['Shortage']))
lowess = sm.nonparametric.lowess
z_detrend = lowess(data['Shortage'],x1,frac = 1/1)
shortage_detrend = data['Shortage']-z_detrend[:, 1]
#### check if trend in production is significant
slope, intercept, r_value, p_value, std_err = stats.linregress(data['year'],data['Shortage'])
slope1, intercept1,r_value1, p_value1, std_err1 = stats.linregress(data['year'].iloc[15:],data['Shortage'].iloc[15:])
### find the detrend line of rain SA and rain L
#print(data['rain-SA'].iloc[index_2007])
data1 = data['rain-SA']
x = np.arange(0,len(data1))
y=np.array(data1)
z = np.polyfit(x,y,1)
#
val_rain_SA = data1[index_2007] - 314.6555335957142 + 0.47705378614285701*index_2007
data2 = data['rain-L']
x = np.arange(0,len(data2))
y=np.array(data2)
z = np.polyfit(x,y,1)
val_rain_L = data2[index_2007] - 455.47557053939386 - 0.1802992386363648*index_2007
### get the detrended values of NAT rain 2007
def detrend_SA(value):
val = value - 314.6555335957142 + 0.47705378614285701*index_2007
return val
def detrend_L(value):
val = value - 455.47557053939386 - 0.1802992386363648*index_2007
return val
val_SA_return_det = detrend_SA(val_SA_return)
val_L_return_det = detrend_L(val_L_return)
rain_L.append(val_L_return)
rain_SA.append(val_SA_return)
val_SA_return_ACT_det = detrend_SA(val_SA_return_ACT)
val_L_return_ACT_det = detrend_L(val_L_return_ACT)
## get the value to add it up
data5 = data['production-SA']
x = np.arange(0,len(data5))
y=np.array(data5)
z_prod = np.polyfit(x,y,1)
def retrend_SA_prod(value):
val = value +7823.836007130121 + 97.02540106951885*index_2007
return val
## get the value to add it up
data4 = data['Shortage']
x = np.arange(0,len(data4))
y=np.array(data4)
z_shortage = np.polyfit(x,y,1)
"{0}x + {1}".format(*z_shortage)
def retrend_L_shortage(value):
val = value +54.44935351158645 + 3.601995026737967*index_2007
return val
val_rain_NA_SA_detrend = val_SA_return_det
val_rain_ACT_SA_detrend = val_SA_return_ACT_det
dummy = np.where(rain_SA_detrend<0,1,0)
dummy1 = np.where(rain_SA_lag1_detrend<0,1,0)
dummy2 = np.where(rain_SA_lag2_detrend<0,1,0)
### ypoly fit
x_part = data.filter(['rain-SA','rain-SA-lag1','rain-SA-lag2','production-SA'], axis=1)
x_part['rain-SA'] = rain_SA_detrend
x_part['rain-SA-lag1'] = rain_SA_lag1_detrend
x_part['rain-SA-lag2'] = rain_SA_lag2_detrend
x_part['dummy'] = dummy
x_part['dummy1'] = dummy1
x_part['production-SA'] = production_SA_detrend
x_part = x_part[['rain-SA', 'rain-SA-lag1','rain-SA-lag2','dummy','dummy1','production-SA']]
X = x_part.iloc[:, 0:5].values
y = x_part.iloc[:, 5].values
#### get the polynomial fit
poly = PolynomialFeatures(degree = 2)
X_poly = poly.fit_transform(X)
#poly.fit(X_poly, y)
lin2 = LinearRegression()
lin2.fit(X_poly, y)
prod_pred = lin2.predict(poly.fit_transform(X))
prod_NAT = lin2.predict(poly.fit_transform([[val_rain_NA_SA_detrend,rain_SA_lag1_detrend[index_2007], rain_SA_lag2_detrend[index_2007],1,0]]))
prod_ACT = lin2.predict(poly.fit_transform([[val_rain_ACT_SA_detrend,rain_SA_lag1_detrend[index_2007],rain_SA_lag2_detrend[index_2007],1,0]]))
##### Lesotho
dummy = np.where(rain_L_detrend<0,1,0)
dummy1 = np.where(rain_L_lag1_detrend<0,1,0)
val_rain_NA_L_detrend = val_L_return_det
val_rain_ACT_L_detrend = val_L_return_ACT_det
### ypoly fit
x_part = data.filter(['rain-L','rain-L-lag1','rain-L-lag2','Shortage'], axis=1)
x_part['rain-L'] = rain_L_detrend
x_part['rain-L-lag1'] = rain_L_lag1_detrend
x_part['rain-L-lag2'] = rain_L_lag2_detrend
x_part['dummy'] = dummy
x_part['dummy1'] = dummy1
x_part['Shortage'] = shortage_detrend
x_part = x_part[['rain-L', 'rain-L-lag1','rain-L-lag2','dummy','dummy1','Shortage']]
X = x_part.iloc[:, 0:5].values
y = x_part.iloc[:, 5].values
#print(X[28])
#### get the polynomial fit
poly = PolynomialFeatures(degree = 2)
X_poly = poly.fit_transform(X)
#poly.fit(X_poly, y)
lin2 = LinearRegression()
lin2.fit(X_poly, y)
shortage_pred = lin2.predict(poly.fit_transform(X))
short_NAT = lin2.predict(poly.fit_transform([[val_rain_NA_L_detrend, rain_L_lag1_detrend[index_2007],rain_L_lag2_detrend[index_2007],1,0]]))
short_ACT = lin2.predict(poly.fit_transform([[val_rain_ACT_L_detrend,rain_L_lag1_detrend[index_2007],rain_L_lag2_detrend[index_2007],1,0]]))
#### now get the price of maize and rainfall South Africa
price_data = pd.read_excel('price_SA.xlsx')[2:]
x1 = np.arange(0,len(price_data['Value']))
lowess = sm.nonparametric.lowess
z_price = lowess(price_data['Value'],x1,frac = 1/2)
price_data['price_detrend']= price_data['Value']-z_price[:, 1]
price_data['rain-SA-d'] = signal.detrend(price_data['rain-SA'])
price_data['rain-SA-d-lag1'] = signal.detrend(price_data['rain-SA-lag1'])
price_data['rain-SA-d-lag2'] = signal.detrend(price_data['rain-SA-lag2'])
price_data['dummy'] = np.where(price_data['rain-SA-d']<0,1,0)
price_data['dummy1'] = np.where(price_data['rain-SA-d-lag1']<0,1,0)
y = price_data['price_detrend'].values
X = price_data[['rain-SA-d','rain-SA-d-lag1','rain-SA-d-lag2','dummy','dummy1']]
poly = PolynomialFeatures(degree = 2)
X_poly = poly.fit_transform(X)
lin_poly = LinearRegression()
lin_poly.fit(X_poly, y)
price_SA = lin_poly.predict(poly.fit_transform(X))
diff_rain = val_rain_ACT_SA_detrend-val_rain_NA_SA_detrend
price_NAT = lin_poly.predict(poly.fit_transform([[price_data['rain-SA-d'][16]-diff_rain,price_data['rain-SA-d-lag1'][16],price_data['rain-SA-d-lag2'][16],1,0]]))
price_ACT = lin_poly.predict(poly.fit_transform([[price_data['rain-SA-d'][16],price_data['rain-SA-d-lag1'][16],price_data['rain-SA-d-lag2'][16],1,0]]))
price_data['prediction']= price_SA
### now get the
#### calculate the error
err_L = shortage_detrend- shortage_pred #shortage_detrend_sort - shortage_pred
err_SA1 = production_SA_detrend - prod_pred
err_price = price_data['price_detrend'] -price_data['prediction']
std_L = np.std(err_L)
std_SA1 = np.std(err_SA1)
#### get a distribution of potential values for shortage and export for 2007 event
N_samples = 1
error_L = norm.rvs(0, std_L, size=N_samples)
error_SA_P = norm.rvs(0, std_SA1, size=N_samples)
error_L_NA = norm.rvs(0, std_L, size=N_samples)
error_L_NA_nt = norm.rvs(0, std_L, size=N_samples)
error_SA_P_NA = norm.rvs(0, std_SA1, size=N_samples)
error_L_ACT = norm.rvs(0, std_L, size=N_samples)
error_SA_P_ACT = norm.rvs(0, std_SA1, size=N_samples)
error_price_NAT = norm.rvs(0, np.std(err_price), size=N_samples)
error_price_NAT_nt = norm.rvs(0, np.std(err_price), size=N_samples)
error_price_ACT = norm.rvs(0, np.std(err_price), size=N_samples)
#### export values
val_export_act =2.0
val_export_nat = np.random.uniform(0.5,2.5,N_samples)
val_export_nat1 = np.random.uniform(0.5,2.5,N_samples)
### export SA
pred_NA_shortage_nt = short_NAT + error_L_NA_nt
pred_NA_shortage = short_NAT + error_L_NA
pred_NA_prod = prod_NAT + error_SA_P_NA #### index 27 means 2007
pred_ACT_shortage = short_ACT + error_L_ACT
pred_ACT_prod = prod_ACT + error_SA_P_ACT
pred_NA_prod = retrend_SA_prod(pred_NA_prod)
pred_ACT_prod = retrend_SA_prod(pred_ACT_prod)
shor_ano_a.append(pred_ACT_shortage[0])
shor_ano_n.append(pred_NA_shortage[0])
shor_ano_nt.append(pred_NA_shortage_nt[0])
pred_NA_shortage = pred_NA_shortage +z_detrend[index_2007][1]
pred_NA_shortage_nt = pred_NA_shortage_nt +z_detrend[index_2007][1]
pred_ACT_shortage = pred_ACT_shortage+z_detrend[index_2007][1]
pred_price_NAT = price_NAT+error_price_NAT
pred_price_ACT = price_ACT+error_price_ACT
pred_price_NAT_nt = price_NAT+error_price_NAT_nt
pred_price_NAT =pred_price_NAT + z_price[14][1]
pred_price_NAT_nt =pred_price_NAT_nt + z_price[14][1]
pred_price_ACT =pred_price_ACT + z_price[14][1]
### calculate the food security
secur_ind_NAT = pred_NA_prod*(val_export_nat/100)-pred_NA_shortage
secur_ind_ACT_GEV = pred_ACT_prod*(val_export_act/100)-pred_ACT_shortage
secur_ind_NAT_nt = pred_NA_prod*(val_export_nat1 /100)-pred_NA_shortage_nt+116
print(secur_ind_ACT_GEV,secur_ind_NAT,secur_ind_NAT_nt)
###price of imports
total_price_import_NAT = pred_price_NAT*pred_NA_shortage*1000/(1e6)
total_price_import_NAT_nt = pred_price_NAT_nt*(pred_NA_shortage_nt-116)*1000/(1e6)
total_price_import_ACT = pred_price_ACT*pred_ACT_shortage*1000/(1e6)
diff_mean_nat = secur_ind_NAT- secur_ind_ACT_GEV
diff_mean_nat_nt = secur_ind_NAT_nt- secur_ind_ACT_GEV
abs_mean_nat = secur_ind_NAT
abs_mean_nat_nt = secur_ind_NAT_nt
abs_mean_act = secur_ind_ACT_GEV
m_nat_tot.append(diff_mean_nat[0])
m_nat_nt_tot.append(diff_mean_nat_nt[0])
a_nat_tot.append(abs_mean_nat[0])
a_nat_nt_tot.append(abs_mean_nat_nt[0])
a_act_tot.append(abs_mean_act[0])
nat_frac.append(val_export_nat[0])
nat_nt_frac.append(val_export_nat1[0])
price_a.append(total_price_import_ACT[0])
price_n.append(total_price_import_NAT[0])
price_n_nt.append(total_price_import_NAT_nt[0])
price_a_abs.append(pred_price_ACT[0])
price_n_abs.append(pred_price_NAT[0])
data_nat = pd.DataFrame({'anomaly':shor_ano_n[:],'diff_mean':m_nat_tot[:],'abs_mean':a_nat_tot[:],'price_abs':price_n_abs[:],'price':price_n[:],'frac':nat_frac[:],'rain_L':rain_L,'rain_SA':rain_SA}).to_csv('output_L_SA/data_nat.csv',index = False)
data_nat_nt = pd.DataFrame({'anomaly':shor_ano_nt[:],'diff_mean':m_nat_nt_tot[:],'abs_mean':a_nat_nt_tot[:],'price_abs':price_n_abs[:],'price':price_n_nt[:],'frac':nat_nt_frac[:],'rain_L':rain_L,'rain_SA':rain_SA}).to_csv('output_L_SA/data_nat_nt.csv',index = False)
data_act= pd.DataFrame({'anomaly':shor_ano_a[:],'abs_mean':a_act_tot[:],'price_abs':price_a_abs[:],'price':price_a[:],'frac':val_export_act,'rain_L':rain_L,'rain_SA':rain_SA}).to_csv('output_L_SA/data_act.csv',index = False)
#plt.gca().spines['left'].set_color('none')
| [
"noreply@github.com"
] | noreply@github.com |
4d9685bae094c34f6844353f599ed8a19c912a5c | d305e9667f18127e4a1d4d65e5370cf60df30102 | /tests/st/ops/gpu/test_unpack_op.py | 9a0d8cfda90f7c500d8e6fae7395c0f17d50f593 | [
"Apache-2.0",
"MIT",
"Libpng",
"LicenseRef-scancode-proprietary-license",
"LGPL-2.1-only",
"AGPL-3.0-only",
"MPL-2.0-no-copyleft-exception",
"IJG",
"Zlib",
"MPL-1.1",
"BSD-3-Clause",
"BSD-3-Clause-Open-MPI",
"MPL-1.0",
"GPL-2.0-only",
"MPL-2.0",
"BSL-1.0",
"LicenseRef-scancode-unknow... | permissive | imyzx2017/mindspore_pcl | d8e5bd1f80458538d07ef0a8fc447b552bd87420 | f548c9dae106879d1a83377dd06b10d96427fd2d | refs/heads/master | 2023-01-13T22:28:42.064535 | 2020-11-18T11:15:41 | 2020-11-18T11:15:41 | 313,906,414 | 6 | 1 | Apache-2.0 | 2020-11-18T11:25:08 | 2020-11-18T10:57:26 | null | UTF-8 | Python | false | false | 5,063 | py | # Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import numpy as np
import pytest
import mindspore.context as context
import mindspore.nn as nn
import mindspore.ops.operations.array_ops as P
from mindspore import Tensor
from mindspore.common.api import ms_function
from mindspore.common.initializer import initializer
from mindspore.common.parameter import Parameter
class UnpackNet(nn.Cell):
def __init__(self, nptype):
super(UnpackNet, self).__init__()
self.unpack = P.Unpack(axis=3)
self.data_np = np.array([[[[[0, 0],
[0, 1]],
[[0, 0],
[2, 3]]],
[[[0, 0],
[4, 5]],
[[0, 0],
[6, 7]]]],
[[[[0, 0],
[8, 9]],
[[0, 0],
[10, 11]]],
[[[0, 0],
[12, 13]],
[[0, 0],
[14, 15]]]]]).astype(nptype)
self.x1 = Parameter(initializer(Tensor(self.data_np), [2, 2, 2, 2, 2]), name='x1')
@ms_function
def construct(self):
return self.unpack(self.x1)
def unpack(nptype):
context.set_context(mode=context.GRAPH_MODE, device_target='GPU')
unpack_ = UnpackNet(nptype)
output = unpack_()
expect = (np.reshape(np.array([0] * 16).astype(nptype), (2, 2, 2, 2)),
np.arange(2 * 2 * 2 * 2).reshape(2, 2, 2, 2).astype(nptype))
for i, exp in enumerate(expect):
assert (output[i].asnumpy() == exp).all()
def unpack_pynative(nptype):
context.set_context(mode=context.PYNATIVE_MODE, device_target='GPU')
x1 = np.array([[[[[0, 0],
[0, 1]],
[[0, 0],
[2, 3]]],
[[[0, 0],
[4, 5]],
[[0, 0],
[6, 7]]]],
[[[[0, 0],
[8, 9]],
[[0, 0],
[10, 11]]],
[[[0, 0],
[12, 13]],
[[0, 0],
[14, 15]]]]]).astype(nptype)
x1 = Tensor(x1)
expect = (np.reshape(np.array([0] * 16).astype(nptype), (2, 2, 2, 2)),
np.arange(2 * 2 * 2 * 2).reshape(2, 2, 2, 2).astype(nptype))
output = P.Unpack(axis=3)(x1)
for i, exp in enumerate(expect):
assert (output[i].asnumpy() == exp).all()
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_unpack_graph_float32():
unpack(np.float32)
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_unpack_graph_float16():
unpack(np.float16)
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_unpack_graph_int32():
unpack(np.int32)
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_unpack_graph_int16():
unpack(np.int16)
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_unpack_graph_uint8():
unpack(np.uint8)
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_unpack_graph_bool():
unpack(np.bool)
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_unpack_pynative_float32():
unpack_pynative(np.float32)
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_unpack_pynative_float16():
unpack_pynative(np.float16)
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_unpack_pynative_int32():
unpack_pynative(np.int32)
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_unpack_pynative_int16():
unpack_pynative(np.int16)
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_unpack_pynative_uint8():
unpack_pynative(np.uint8)
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_unpack_pynative_bool():
unpack_pynative(np.bool)
| [
"513344092@qq.com"
] | 513344092@qq.com |
30cb1bd441c9ba73147ce2a690de1ff07a8ab41d | 4144c279f65bf4d0966a3c654f0c1e121339d7c9 | /core/components/game/game_key_press.py | 852c9129c49adb515f8020c2418d33721bd14df5 | [] | no_license | mysty0/pydungeons | 06aa42bf534c48b28399f86179218d2fc554d5ea | 5183b791a4a57843d1f1720b43f203ddb5e76f6f | refs/heads/master | 2022-10-08T22:14:28.600607 | 2019-10-30T17:05:10 | 2019-10-30T17:05:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 564 | py | import pygame
from core.game_component import GameComponent
from .game_event_manager import GameEventManagerComponent
from core.events.key_pressed import KeyPressedEvent
class GameKeyPressComponent(GameComponent):
def __init__(self):
super().__init__()
self.exec_priority = 1
def game_tick(self):
em = self.game.get_component(GameEventManagerComponent)
if em is None:
return
for i, k in enumerate(pygame.key.get_pressed()):
if k != 0:
em.trigger_event(KeyPressedEvent(i))
| [
"maplegend@mail.ru"
] | maplegend@mail.ru |
3d8de22492fa0e73db7e2c8f0a596c46fb7797d6 | f3366f982ac6d5419cea207d7c123b306f4928bc | /U10-8/U10_8.py | ce1b866309807d3d2b4265acb6ba4950fd3b9e43 | [] | no_license | MrWifeRespecter/TECHNOLOGY | 7d8199ee348dcaa679910b586e7f7761e764f2b4 | 0d163d64ae24fdd21ef7db7583753eda689f023c | refs/heads/master | 2023-05-12T00:35:21.908681 | 2021-04-27T10:24:16 | 2021-04-27T10:24:16 | 292,079,460 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,732 | py | from math import *
from random import *
def s(x): #bara för att jag inte orkade skriva försök så mycket. Min kod ser mer fancy ut nu dessutom
return(x/försök)
def p(x): #Ibland kan den här skicka iväg antingen 59 eller 61 asterisker. Men det händer inte ofta och det är ett så litet problem så jag skiter i det.
mängd=round(60*((x/försök)))
stapel=("*"*mängd)
return(stapel)
försök=0
frek2=0 #Här är frekvensen som summorna förekommer med
frek3=0
frek4=0
frek5=0
frek6=0
frek7=0
frek8=0
frek9=0
frek10=0
frek11=0
frek12=0
for i in range(1,10001):
x=randrange(1,7)
y=randrange(1,7)
temp_summa=(x+y)
if temp_summa==2:
frek2+=1
if temp_summa==3:
frek3+=1
if temp_summa==4:
frek4+=1
if temp_summa==5:
frek5+=1
if temp_summa==6:
frek6+=1
if temp_summa==7:
frek7+=1
if temp_summa==8:
frek8+=1
if temp_summa==9:
frek9+=1
if temp_summa==10:
frek10+=1
if temp_summa==11:
frek11+=1
if temp_summa==12:
frek12+=1
försök+=1
print("Med vilken frekvens förekom de olika summorna? ")
print(" 2|", s(frek2))
print(" 3|", s(frek3))
print(" 4|", s(frek4))
print(" 5|", s(frek5))
print(" 6|", s(frek6))
print(" 7|", s(frek7))
print(" 8|", s(frek8))
print(" 9|", s(frek9))
print("10|", s(frek10))
print("11|", s(frek11))
print("12|", s(frek12))
print("\n")
print("Stapeldiagrammet skulle se ut såhär: ")
print(" 2|", p(frek2))
print(" 3|", p(frek3))
print(" 4|", p(frek4))
print(" 5|", p(frek5))
print(" 6|", p(frek6))
print(" 7|", p(frek7))
print(" 8|", p(frek8))
print(" 9|", p(frek9))
print("10|", p(frek10))
print("11|", p(frek11))
print("12|", p(frek12)) | [
"viggo034@gmail.com"
] | viggo034@gmail.com |
c125cd6fc695135e666fa60d10742905db16d06e | a0334ac9bc03d5fde52b668529daf5ea88dd4b7b | /15_DJANGO_three/basicforms/basicapp/forms.py | 2bfbe346b09898b044d9cb1973d39df9a46a996b | [] | no_license | m-kashani/Python-and-Django-Full-Stack-Web-Developer-Bootcamp | b9f1cfb2f788d10657d9d4613b3fec6e7a3f19e4 | 2218801cf6efc731908ea858f05c8b56ec3aa793 | refs/heads/master | 2020-05-17T17:51:56.580430 | 2019-04-29T07:27:42 | 2019-04-29T07:27:42 | 183,867,578 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 509 | py | from django import forms
from django.core import validators
class FormName(forms.Form):
name=forms.CharField()
email=forms.EmailField()
verify_email=forms.EmailField(label='enter email again')
text=forms.CharField(widget=forms.Textarea)
def clean(self):
all_clean_data=super().clean()
email=all_clean_data['email']
vmail=all_clean_data['verify_email']
if email !=vmail:
raise forms.ValidationError('Make email and vemail same')
| [
"noreply@github.com"
] | noreply@github.com |
6034daa88e22d195d322297c1c2d0e5fd897e872 | 4106239d770367d28424bc48724f176ad73a4bd5 | /django_basics/settings.py | 88c4326cb4e09d637a52ed455c3657759e3b82d4 | [] | no_license | Hyped-247/django_basics | 323843aaa27263097a24554bd7ea0d2cd7cf32c4 | 6d05a27bd954fee4520c2bfb385f6b833b3346ab | refs/heads/master | 2020-04-16T12:03:59.719641 | 2019-01-13T22:55:00 | 2019-01-13T22:55:00 | 165,563,679 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,154 | py | """
Django settings for django_basics project.
Generated by 'django-admin startproject' using Django 2.1.3.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.1/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '(b7o91j-u(l!-=yg++pj$c*dra7o*x+-sf#65$)5%ehz-)q(ow'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'django_basics.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join (BASE_DIR, 'templates')]
,
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'django_basics.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
STATIC_URL = '/static/'
| [
"mmahjoub@westmont.edu"
] | mmahjoub@westmont.edu |
8759703c894c53be16af893e316195c0e1e4b1d2 | 1e484ace2dbd2883986b6e6eced5fede9fb6f06c | /zh/venv/Scripts/easy_install-script.py | 8a69d059d24124a398abc206532e82a0ac31d972 | [] | no_license | ZiZiJunWu/python_chinese_word | bb96c8a1e880907e2ce8f1f371f6f9e4984b59b2 | 7f6784f6fcefb869cf629cfeb8b3f94c31b3d567 | refs/heads/master | 2020-05-03T02:40:09.706497 | 2019-03-23T16:56:42 | 2019-03-23T16:56:42 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 433 | py | #!G:\PycharmProjects\zh\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'setuptools==39.1.0','console_scripts','easy_install'
__requires__ = 'setuptools==39.1.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('setuptools==39.1.0', 'console_scripts', 'easy_install')()
)
| [
"1280053803@qq.com"
] | 1280053803@qq.com |
b2ee8f80c02ee3702fd9e086b4bde77e3d31854d | 6790915cb130d2c7e770f2ff3f06aac3147fc16e | /home/migrations/0004_updateme.py | 6a8a88eca3b3166c46da1381a167fa1fd93df3b6 | [] | no_license | tommyjamess/shopitng | 7a56a302a076df3dc54508adff3bde68a6b26f6b | b0ed0dd08df4c1af84094a596a8e7e5c63f9207e | refs/heads/master | 2023-07-03T16:25:04.950197 | 2021-08-25T15:24:32 | 2021-08-25T15:24:32 | 390,338,448 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 579 | py | # Generated by Django 3.2.4 on 2021-06-26 07:33
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('home', '0003_auto_20210622_1135'),
]
operations = [
migrations.CreateModel(
name='UpdateMe',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('email', models.CharField(blank=True, max_length=50)),
('status', models.BooleanField()),
],
),
]
| [
"meadigun@gmail.com"
] | meadigun@gmail.com |
eca5c7e778194e922ed36993773422734dc629b7 | 08ed5925fad1951a351e86d357d266434c3e671c | /hostscripts/animations/matrix_animation.py | ff05cae97098218d62cbce372a727d29960c948e | [
"MIT"
] | permissive | icve/liv-Ard | 83c5be86d3afb7c1295cd21b1eef5a407a7ff169 | 6930c523322aeb13c6c80d2c19be7a8a1b2c59c2 | refs/heads/master | 2021-01-24T08:21:42.157247 | 2019-08-27T16:35:11 | 2019-08-27T16:35:11 | 93,381,863 | 0 | 0 | null | 2017-07-23T07:09:06 | 2017-06-05T08:23:36 | Python | UTF-8 | Python | false | false | 2,941 | py | from time import localtime
def _get_ring(side=8, ring=0):
seqgen = range(ring, side - ring)
coors = [(ring, x) for x in seqgen] +\
[(x, seqgen[-1]) for x in seqgen[1:]] +\
[(seqgen[-1], x) for x in reversed(seqgen[:-1])] +\
[(x, ring) for x in reversed(seqgen[1:-1])]
return coors
class Led_clock_pointer(object):
def __init__(self, mtxdev, pointertype="sec", ring=0, dsp=None):
self.pt = pointertype
dsp = dsp if dsp else int(4 - ring)
self.linear_display = Linear_display(_get_ring(ring=ring), dsp)
self.mtxdev = mtxdev
self.point_generator = {
"sec": lambda: localtime().tm_sec / 59,
"min": lambda: localtime().tm_min / 59,
"hour": lambda: localtime().tm_hour / 23
}[pointertype]
self.lastcood = None
self.off_queue = []
def update(self):
""" this method 'renders' a frame"""
# handle queue before jumping out
if self.off_queue:
self.mtxdev.setled(*self.off_queue.pop(), 0)
n = self.point_generator()
cood = self.linear_display.get_dot(n)
if cood == self.lastcood:
return
if n == 0:
# self.mtxdev.clear()
# clear Linear display slowly
self.off_queue += reversed(self.linear_display.get_coods()[1:])
self.mtxdev.setled(*cood, 1)
self.lastcood = cood
class Led_clock_flasher:
def __init__(self, mtxdev, coods=None, speed=1):
self.mtxdev = mtxdev
self.speed = speed
# default to the inner 4 dots
self.coods = coods if coods else _get_ring(ring=3)
self.state = None
self.state_generator = lambda: int(localtime().tm_sec * speed) % 2
def update(self):
new_state = self.state_generator()
if new_state != self.state:
self.set_state(new_state)
self.state = new_state
def set_state(self, on):
"""turn on/off all led in coods by flagging 1/0"""
for cood in self.coods:
self.mtxdev.setled(*cood, on)
class Linear_display:
def __init__(self, coods, dsp=0):
self.coods = coods
self.dsp = dsp
def get_idx(self, n):
"""get index base on percentage(0 - 1)"""
clen = len(self.coods)
idx = int(n * (clen - 1)) + self.dsp
safeidx = idx % clen
return safeidx
def get_dot(self, n):
""" get cood of dot base on percentage(0 - 1)"""
return self.coods[self.get_idx(n)]
def get_line(self, n):
"""get list of cood base on percentage(0 - 1)"""
return self.coods[:self.get_idx(n)]
def get_coods(self, use_dsp=True):
""" get content of coods, flag to enable displacing items"""
if not use_dsp or self.dsp == 0:
return self.coods
return self.coods[self.dsp:] + self.coods[:self.dsp]
| [
"ic.ve.cv@gmail.com"
] | ic.ve.cv@gmail.com |
123692f1d83c0d850298be8ebd18dc3df003f4e0 | fb4b70ad38d0fc810cb9ee034c8fb963c079f64b | /easy/Self_Dividing_Numbers.py | 33c51ce1cd72ee84467d7802a1ee8de8713c2bb0 | [] | no_license | ChrisLiu95/Leetcode | 0e14f0a7b7aa557bb2576589da8e73dbeeae8483 | baa3342ebe2600f365b9348455f6342e19866a44 | refs/heads/master | 2021-07-11T12:01:00.249208 | 2018-09-26T21:27:42 | 2018-09-26T21:27:42 | 117,451,180 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,069 | py | """
A self-dividing number is a number that is divisible by every digit it contains.
For example, 128 is a self-dividing number because 128 % 1 == 0, 128 % 2 == 0, and 128 % 8 == 0.
Also, a self-dividing number is not allowed to contain the digit zero.
Given a lower and upper number bound, output a list of every possible self dividing number, including the bounds if possible.
Example 1:
Input:
left = 1, right = 22
Output: [1, 2, 3, 4, 5, 6, 7, 8, 9, 11, 12, 15, 22]
Note:
The boundaries of each input argument are 1 <= left <= right <= 10000.
"""
class Solution(object):
def selfDividingNumbers(self, left, right):
res = []
for num in range(left, right + 1):
flag = True
temp = num
while temp != 0:
if temp % 10 == 0:
flag = False
break
elif num % (temp % 10) != 0:
flag = False
break
temp = temp / 10
if flag:
res.append(num)
return res
| [
"xiangchong95@gmail.com"
] | xiangchong95@gmail.com |
7e408c1b6dd5131c7581e5707518d2f2b4f6ea73 | 398df168b3a29c137e7e512368fbb352d596bef7 | /venv/Scripts/sshtunnel-script.py | 92a7373c99059139cb45e4a1227fde7e3b22c0d2 | [] | no_license | FerasDaoud94/demo | 25bcdda2d1a3e7454fedfd48f05cfcba68479ecd | 0bc92e286773a88d3d6531dc07a0b95903617cc4 | refs/heads/master | 2020-09-07T19:27:03.003102 | 2019-11-11T03:12:14 | 2019-11-11T03:12:14 | 220,891,261 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 436 | py | #!C:\Users\hp\PycharmProjects\TestProj\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'sshtunnel==0.1.5','console_scripts','sshtunnel'
__requires__ = 'sshtunnel==0.1.5'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('sshtunnel==0.1.5', 'console_scripts', 'sshtunnel')()
)
| [
"ferasdaoud1994@gmail.com"
] | ferasdaoud1994@gmail.com |
e56077f2118675fa8d6e93167ead9674cff11524 | 49ab56af18fd9ae25090e6b07d59cec24da79504 | /osa12-11_suoritukset/test/test_koodi.py | f544d81826648a0327b87aa1f9181ac592a77cf5 | [] | no_license | TomiSar/ProgrammingMOOC2020 | 13031c7c27d574a55537c1c24cd5c216521c8f9a | 4316b7d38cefa50535cf1ed503260a0d9140f36b | refs/heads/master | 2023-02-15T19:33:42.109417 | 2021-01-16T00:22:50 | 2021-01-16T00:22:50 | 322,437,801 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,494 | py | import unittest
from unittest.mock import patch
from tmc import points, reflect
from tmc.utils import load, load_module, reload_module, get_stdout, check_source, sanitize
from functools import reduce
import os
import os.path
import textwrap
from random import choice, randint
from datetime import date, datetime, timedelta
exercise = 'src.koodi'
def f(attr: list):
return ",".join(attr)
def s(vastaus):
output = ""
for n in vastaus:
output += n + "\n"
return output
class SuorituksetTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
with patch('builtins.input', side_effect=[AssertionError("Syötteen pyytämistä ei odotettu")]):
cls.module = load_module(exercise, 'fi')
def test_00a_paaohjelma_kunnossa(self):
ok, line = check_source(self.module)
message = """Funktioita testaava koodi tulee sijoittaa lohkon
if __name__ == "__main__":
sisälle. Seuraava rivi tulee siirtää:
"""
self.assertTrue(ok, message+line)
@points('12.suoritukset_osa1')
def test_01_funktio_suorittajien_nimet_olemassa(self):
try:
from src.koodi import suorittajien_nimet
except:
self.assertTrue(False, "Ohjelmastasi pitäisi löytyä funktio nimeltä suorittajien_nimet(suoritukset: list)")
@points('12.suoritukset_osa1')
def test_02_suorittajien_nimet_paluuarvon_tyyppi(self):
from src.koodi import suorittajien_nimet
from src.koodi import Suoritus
koodi = """
s1 = Suoritus("Pekka Python", "Ohjelmoinnin perusteet", 3)
s2 = Suoritus("Olivia Ohjelmoija", "Ohjelmoinnin perusteet", 5)
s3 = Suoritus("Pekka Python", "Ohjelmoinnin jatkokurssi", 2)
suorittajien_nimet([s1, s2, s3])
"""
try:
s1 = Suoritus("Pekka Python", "Ohjelmoinnin perusteet", 3)
s2 = Suoritus("Olivia Ohjelmoija", "Ohjelmoinnin perusteet", 5)
s3 = Suoritus("Pekka Python", "Ohjelmoinnin jatkokurssi", 2)
vastaus = suorittajien_nimet([s1, s2, s3])
except:
self.fail(f"Varmista, että seuraavan koodin suoritus onnistuu\n{koodi}")
m = map(None, [])
taip = str(type(vastaus)).replace("<class '","").replace("'>","")
self.assertTrue(type(vastaus) == type(m) or type(vastaus) == list, f"Funktion suorittajien_nimet(kurssit: list) tulee palauttaa map tai list, nyt palautettu arvo oli tyypiltään {taip}")
for alkio in vastaus:
etaip = str(type("")).replace("<class '","").replace("'>","")
taip = str(type(alkio)).replace("<class '","").replace("'>","")
self.assertTrue(type(alkio) == type(""), f"Kun suoritetaan koodi {koodi}palautettujen alkioiden tulee olla tyypiltään {etaip} nyt niiden tyyppi on {taip}")
@points('12.suoritukset_osa1')
def test_03_suorittajien_nimet_toimii_1(self):
from src.koodi import suorittajien_nimet
from src.koodi import Suoritus
koodi = """
s1 = Suoritus("Pekka Python", "Ohjelmoinnin perusteet", 3)
s2 = Suoritus("Olivia Ohjelmoija", "Ohjelmoinnin perusteet", 5)
s3 = Suoritus("Pekka Python", "Ohjelmoinnin jatkokurssi", 2)
suorittajien_nimet([s1, s2, s3])
"""
try:
s1 = Suoritus("Pekka Python", "Ohjelmoinnin perusteet", 3)
s2 = Suoritus("Olivia Ohjelmoija", "Ohjelmoinnin perusteet", 5)
s3 = Suoritus("Pekka Python", "Ohjelmoinnin jatkokurssi", 2)
vastaus = suorittajien_nimet([s1, s2, s3])
except:
self.fail(f"Varmista, että seuraavan koodin suoritus onnistuu\n{koodi}")
exp = [ s.opiskelijan_nimi for s in [s1, s2, s3]]
output = ""
vast = []
for n in vastaus:
output += n + "\n"
vast.append(n)
self.assertEquals(sorted(vast), sorted(exp), f"Kun suoritetaan koodi {koodi}pitäisi palauttaa nimet \n{s(exp)}\nfunktio palautti\n{output}")
@points('12.suoritukset_osa1')
def test_04_suorittajien_nimet_map_kaytossa(self):
src_file = os.path.join('src', 'koodi.py')
lines = []
p = False
with open(src_file) as f:
for line in f:
if "def suorittajien_nimet" in line:
p = True
elif p and ('__name__ == "__main__":' in line or "def kurssien_nimet" in line):
p = False
elif p:
lines.append(line)
on = False
for line in lines:
if "map" in line:
on = True
self.assertTrue(on, f"Funktio suorittajien_nimet(suoritukset: list) on toteutettava map-funktion avulla")
@points('12.suoritukset_osa1')
def test_05_suorittajien_nimet_toimii_2(self):
from src.koodi import suorittajien_nimet
from src.koodi import Suoritus
koodi = """
s1 = Suoritus("Pekka Python", "Ohjelmoinnin perusteet", 3)
s2 = Suoritus("Olivia Ohjelmoija", "Ohjelmoinnin perusteet", 5)
s3 = Suoritus("Pekka Python", "Ohjelmoinnin jatkokurssi", 2)
s4 = Suoritus("Heikki Helastinen", "Ohjelmoinnin perusteet", 3)
s5 = Suoritus("Lady Gaga", "Ohjelmoinnin perusteet", 5)
s6 = Suoritus("Eila Karkki", "Ohjelmoinnin jatkokurssi", 2)
suorittajien_nimet([s1, s2, s3, s4, s5, s6])
"""
try:
s1 = Suoritus("Pekka Python", "Ohjelmoinnin perusteet", 3)
s2 = Suoritus("Olivia Ohjelmoija", "Ohjelmoinnin perusteet", 5)
s3 = Suoritus("Pekka Python", "Ohjelmoinnin jatkokurssi", 2)
s4 = Suoritus("Heikki Helastinen", "Ohjelmoinnin perusteet", 3)
s5 = Suoritus("Lady Gaga", "Ohjelmoinnin perusteet", 5)
s6 = Suoritus("Eila Karkki", "Ohjelmoinnin jatkokurssi", 2)
vastaus = suorittajien_nimet([s1, s2, s3, s4, s5, s6])
except:
self.fail(f"Varmista, että seuraavan koodin suoritus onnistuu\n{koodi}")
exp = [ s.opiskelijan_nimi for s in [s1, s2, s3, s4, s5, s6]]
output = ""
vast = []
for n in vastaus:
output += n + "\n"
vast.append(n)
self.assertEquals(sorted(vast), sorted(exp), f"Kun suoritetaan koodi {koodi}pitäisi palauttaa nimet \n{s(exp)}\nfunktio palautti\n{output}")
@points('12.suoritukset_osa2')
def test_06_funktio_kurssien_nimet_olemassa(self):
try:
from src.koodi import kurssien_nimet
except:
self.assertTrue(False, "Ohjelmastasi pitäisi löytyä funktio nimeltä kurssien_nimet(suoritukset: list)")
@points('12.suoritukset_osa2')
def test_07_kurssien_nimet_paluuarvon_tyyppi(self):
from src.koodi import kurssien_nimet
from src.koodi import Suoritus
koodi = """
s1 = Suoritus("Pekka Python", "Ohjelmoinnin perusteet", 3)
s2 = Suoritus("Olivia Ohjelmoija", "Ohjelmoinnin perusteet", 5)
s3 = Suoritus("Pekka Python", "Ohjelmoinnin jatkokurssi", 2)
kurssien_nimet([s1, s2, s3])
"""
try:
s1 = Suoritus("Pekka Python", "Ohjelmoinnin perusteet", 3)
s2 = Suoritus("Olivia Ohjelmoija", "Ohjelmoinnin perusteet", 5)
s3 = Suoritus("Pekka Python", "Ohjelmoinnin jatkokurssi", 2)
vastaus = kurssien_nimet([s1, s2, s3])
except:
self.fail(f"Varmista, että seuraavan koodin suoritus onnistuu\n{koodi}")
m = map(None, [])
taip = str(type(vastaus)).replace("<class '","").replace("'>","")
self.assertTrue(type(vastaus) == type(m) or type(vastaus) == list or type(vastaus) == set, f"Funktion kurssien_nimet(kurssit: list) tulee palauttaa map tai list, nyt palautettu arvo oli tyypiltään {taip}")
for alkio in vastaus:
etaip = str(type("")).replace("<class '","").replace("'>","")
taip = str(type(alkio)).replace("<class '","").replace("'>","")
self.assertTrue(type(alkio) == type(""), f"Kun suoritetaan koodi {koodi}palautettujen alkioiden tulee olla tyypiltään {etaip} nyt niiden tyyppi on {taip}")
@points('12.suoritukset_osa2')
def test_08_kurssien_nimet_toimii_1(self):
from src.koodi import kurssien_nimet
from src.koodi import Suoritus
koodi = """
s1 = Suoritus("Pekka Python", "Ohjelmoinnin perusteet", 3)
s2 = Suoritus("Olivia Ohjelmoija", "Ohjelmoinnin perusteet", 5)
s3 = Suoritus("Pekka Python", "Ohjelmoinnin jatkokurssi", 2)
kurssien_nimet([s1, s2, s3])
"""
try:
s1 = Suoritus("Pekka Python", "Ohjelmoinnin perusteet", 3)
s2 = Suoritus("Olivia Ohjelmoija", "Ohjelmoinnin perusteet", 5)
s3 = Suoritus("Pekka Python", "Ohjelmoinnin jatkokurssi", 2)
vastaus = kurssien_nimet([s1, s2, s3])
except:
self.fail(f"Varmista, että seuraavan koodin suoritus onnistuu\n{koodi}")
m = map(None, [])
taip = str(type(vastaus)).replace("<class '","").replace("'>","")
self.assertTrue(type(vastaus) == type(m) or type(vastaus) == list or type(vastaus) == set, f"Funktion kurssien_nimet(kurssit: list) tulee palauttaa map tai list, nyt palautettu arvo oli tyypiltään {taip}")
vastaus = list(vastaus)
for alkio in vastaus:
etaip = str(type("")).replace("<class '","").replace("'>","")
taip = str(type(alkio)).replace("<class '","").replace("'>","")
self.assertTrue(type(alkio) == type(""), f"Kun suoritetaan koodi {koodi}palautettujen alkioiden tulee olla tyypiltään {etaip} nyt niiden tyyppi on {taip}")
exp = sorted(set( s.kurssi for s in [s1, s2, s3]))
output = ""
vast = []
for n in vastaus:
output += n + "\n"
vast.append(n)
self.assertTrue(sorted(vast) == sorted(exp), f"Kun suoritetaan koodi {koodi}pitäisi palauttaa kurssit \n{s(exp)}\nfunktio palautti\n{output}")
@points('12.suoritukset_osa2')
def test_09_kurssin_nimet_map_kaytossa(self):
src_file = os.path.join('src', 'koodi.py')
lines = []
p = False
with open(src_file) as f:
for line in f:
if "def kurssien_nimet" in line:
p = True
elif p and ('__name__ == "__main__":' in line or "def suorittajien_nimet" in line):
p = False
elif p:
lines.append(line)
on = False
for line in lines:
if "map" in line:
on = True
self.assertTrue(on, f"Funktio kurssien_nimet(suoritukset: list) on toteutettava map-funktion avulla")
@points('12.suoritukset_osa2')
def test_10_kurssien_nimet_toimii_2(self):
from src.koodi import kurssien_nimet
from src.koodi import Suoritus
koodi = """
s1 = Suoritus("Pekka Python", "Ohjelmoinnin perusteet", 3)
s2 = Suoritus("Olivia Ohjelmoija", "Ohjelmoinnin perusteet", 5)
s3 = Suoritus("Pekka Python", "Tietorakenteet", 2)
s4 = Suoritus("Heikki Helastinen", "Full stack -websovelluskehitys", 3)
s5 = Suoritus("Lady Gaga", "Ohjelmoinnin jatkokurssi", 5)
s6 = Suoritus("Eila Karkki", "Tietoliikenne 1", 2)
kurssien_nimet([s1, s2, s3, s4, s5, s6])
"""
try:
s1 = Suoritus("Pekka Python", "Ohjelmoinnin perusteet", 3)
s2 = Suoritus("Olivia Ohjelmoija", "Ohjelmoinnin perusteet", 5)
s3 = Suoritus("Pekka Python", "Tietorakenteet", 2)
s4 = Suoritus("Heikki Helastinen", "Full stack -websovelluskehitys", 3)
s5 = Suoritus("Lady Gaga", "Ohjelmoinnin jatkokurssi", 5)
s6 = Suoritus("Eila Karkki", "Tietoliikenne 1", 2)
vastaus = kurssien_nimet([s1, s2, s3, s4, s5, s6])
except:
self.fail(f"Varmista, että seuraavan koodin suoritus onnistuu\n{koodi}")
exp = sorted(set( s.kurssi for s in [s1, s2, s3, s4, s5, s6]))
output = ""
vast = []
for n in vastaus:
output += n + "\n"
vast.append(n)
self.assertEquals(sorted(vast), sorted(exp), f"Kun suoritetaan koodi {koodi}pitäisi palauttaa kurssit\n{s(exp)}\nfunktio palautti\n{output}")
if __name__ == '__main__':
unittest.main()
| [
"tomppa.sarjamo@gmail.com"
] | tomppa.sarjamo@gmail.com |
f408f0c7692e6b3353b95f90ce51424a9631acfa | 617f4b4a7c12e019e31c8a652fbda6044631dde1 | /venv/Scripts/pip3.8-script.py | 842cd74962252675a3cde8c100d1994327b31ec9 | [] | no_license | sharifahmeeed/opencv_4_videoshow | 7cbe0a41c4d318ac5f1a1bd61847fb4b8e674eb0 | 9d2b076de449121cd9adda04920be0e24a35b21b | refs/heads/master | 2023-07-17T10:35:28.705475 | 2021-09-07T05:30:51 | 2021-09-07T05:30:51 | 403,853,354 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 424 | py | #!C:\Users\SharifAhmed\PycharmProjects\opencv_1\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==19.0.3','console_scripts','pip3.8'
__requires__ = 'pip==19.0.3'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==19.0.3', 'console_scripts', 'pip3.8')()
)
| [
"sharifahmeeed@gmail.com"
] | sharifahmeeed@gmail.com |
59fd525d369e613a5aa523d38e228234a238fefc | 433947a5e18c57628d86ee9182fdba44f5ee6748 | /checkScore.py | 0e3565ae233cb85d0f5b392f778df6394d66406f | [
"MIT"
] | permissive | maturban/cs595-f13 | e4210eed78a3fbdd30a8d56ae66f417620a82461 | ba4f396bfb2412712c9d90d5015f1717e2725477 | refs/heads/master | 2021-01-16T20:35:55.697415 | 2013-12-06T05:17:00 | 2013-12-06T05:17:00 | 12,801,710 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 2,161 | py | #!/usr/bin/python
# Mohamed Aturban
import re
import sys
import signal
import urllib2
from bs4 import BeautifulSoup
from time import localtime, strftime, sleep
# Ctrl+C handler
def signal_handler(signal, frame):
print '\n\n See you soon, goodby ... \n'
sys.exit(0)
signal.signal(signal.SIGINT, signal_handler)
# Check arguments
if len(sys.argv) != 4:
print '\n ** Please enter correct arguments: '\
'$checkScore <Team> <Freq. in Sec.> <Week#> \n'
sys.exit(0)
uri = "http://sports.yahoo.com/college-football/scoreboard/"\
"?week="+sys.argv[3]+"&conf="
print "\n\nWeek [%s], every %s second(s) the following uri"\
" will be checked for scores:\n '%s'\n"%(sys.argv[3],sys.argv[2],uri)
while 1:
# read the HTML page
redditFile = urllib2.urlopen(uri)
redditHtml = redditFile.read()
redditFile.close()
# parsing using BeautifulSoup
soup = BeautifulSoup(redditHtml)
i = 0
list1 = []
ls2 = []
# filter it
for row in soup.findAll('a', attrs={"data-ylk": "lt:s;sec:"\
"mod-sch;slk:game;itc:0;ltxt:;tar:sports.yahoo.com;"}):
if row.get_text().encode('ascii') == " ":
continue
i = i + 1
# Extract trams and scores
if row.get_text().encode('ascii')[-1:] == ";":
ls2.append(row.get_text().encode('ascii')[:-1])
else:
ls2.append(row.get_text().encode('ascii'))
if i == 3:
list1.append(ls2)
i = 0
ls2 = []
# create final list
final_list = []
for row in list1:
if row[1] == "@":
temp_list = [row[0], "@" , "@", row[2]]
else:
temp_list = [row[0],row[1].split()[0] , row[1].split()[2],row[2]]
final_list.append(temp_list)
flag = 0
# Search for the team in the final list
for row in final_list:
if row[0] == sys.argv[1] or row[3] == sys.argv[1]:
flag = 1
time_str = strftime("%Y-%m-%d %H:%M:%S", localtime())
if row[1] == "@":
print "\n At %s \n This game is scheduled but has not been"\
" played yet : [%s] ? - ? [%s] " %(time_str, row[0], row[3])
else:
print "\n At %s \n Score is: [%s] %s - %s [%s] " %(time_str,\
row[0], row[1] , row[2] , row[3])
if flag == 0:
print "\n No game scheduled for '%s' " %(sys.argv[1])
# Delay for second(s)
sleep(float(sys.argv[2]))
| [
"maturban@cs.odu.edu"
] | maturban@cs.odu.edu |
1b06adbfe627e4844b06a78194d193083b1f3a66 | 8d336d773169d502724f59ccaf7d767c08f755cf | /Prediction/views.py | 48a1528348bb17153c85a5b70a2477ec471c1739 | [] | no_license | jawharjoe/Venter_CMS_old | 14120b5a2c882f566fa884b6c6d8ee7cb3f297dd | df24d1af3577c25592a8416508c44b5597144283 | refs/heads/master | 2022-12-12T14:18:08.530860 | 2018-12-03T05:33:50 | 2018-12-03T05:33:50 | 160,137,687 | 0 | 1 | null | 2022-12-08T02:27:44 | 2018-12-03T05:34:13 | JavaScript | UTF-8 | Python | false | false | 8,161 | py | """
Author: Meet Shah, Shivam Sharma
This view will render a simple html form if the request is GET. If request is POST then will collect the
uploaded csv file and save it in appropriate user account.
"""
from django.shortcuts import redirect
from django.contrib.auth.models import Group
from django.shortcuts import render
from .forms import upload_file_form
from django.conf import settings
from .manipulate_csv import EditCsv
from django.http import HttpResponse
import os
from Prediction import upload_to_google_drive
def upload_file(request):
"""This method handles the file uploaded and send the content to the frontend"""
if not request.user.is_authenticated:
# If not authenticated, redirect to upload_file.html
return render(request, 'Prediction/upload_file.html')
else:
# Get the group of the user
query_set = Group.objects.filter(user=request.user)
query_set_size = query_set.count()
error_dict = {'error': "Please contact admin to add you in group"}
if query_set_size == 0:
# If the group is not assigned, display error message
return render(request, 'Prediction/error_message.html', error_dict)
else:
# Saving the group as company. This will be used for getting different prediction lists and category lists for different companies
company = str(query_set.all()[0])
request.session['company'] = company
# This post method is from clicking on 'Submit' button while uploading the csv file
if request.method == 'POST':
# Getting the data after all the validations
form = upload_file_form(request.POST, request.FILES)
user_name = request.user.username
file_name = str(request.FILES['file'].name)
if form.is_valid():
# Execute the prediction of data only if the form is valid
handle_uploaded_file(request.FILES['file'], user_name,
file_name) # This is a precautionary step to see whether the folders for each user has been made or not
# Creating an object of the class which will be used to manipulate all the csv data
csv = EditCsv(file_name, user_name, company)
# Checking whether the headers of the file are in the same format as given in settings.ICMC_HEADERS or settings.SPEAKUP_HEADERS based on their groups
header_flag, category_list = csv.check_csvfile_header()
if header_flag:
# If all the headers are matching in the csv file
dict_list, rows = csv.read_file() # Here we are getting a list of dictionary (structure is in EditCsv.py) in dict_list and the rows which is the rest of the material in the csv file
context = {'dict_list': dict_list, 'category_list': category_list, 'rows': rows}
request.session['Rows'] = rows
request.session['filename'] = file_name
return render(request, 'Prediction/predict_categories.html',
context) # Sending the data in the Frontend to display
else:
# If the header_flag is false, delete the Input file
csv.delete()
form = upload_file_form() # Reinitialize the upload_file_form
return render(request, 'Prediction/upload_file.html',
{'form': form, 'Error': "Please submit CSV file with valid headers !!!"})
else:
# If the request is not POST, display the form to submit
form = upload_file_form()
return render(request, 'Prediction/upload_file.html', {'form': form})
def handle_user_selected_data(request):
"""This function is used to handle the selected categories by the user"""
if not request.user.is_authenticated:
# Authentication security check
return redirect(settings.LOGIN_REDIRECT_URL)
else:
rows = request.session['Rows']
correct_category = []
company = request.session['company']
if request.method == 'POST':
file_name = request.session['filename']
user_name = request.user.username
for i in range(rows):
# We are getting a list of values because the select tag was multiple select
selected_category = request.POST.getlist('select_category' + str(i) + '[]')
if request.POST['other_category' + str(i)]:
# To get a better picture of what we are getting try to print "request.POST.['other_category' + str(i)]", request.POST['other_category' + str(i)
# others_list=request.POST['other_category' + str(i)]
# for element in others_list:
# print(element)
# tuple = (selected_category,element)
tuple = (selected_category, request.POST['other_category' + str(i)])
# print(request.POST['other_category' + str(i)])
# print(tuple)
# So here the correct_category will be needing a touple so the data will be like:
# [(selected_category1, selected_category2), (other_category1, other_category2)] This will be the output of the multi select
correct_category.append(tuple)
else:
# So here the correct_category will be needing a touple so the data will be like:
# [(selected_category1, selected_category2)] This will be the output of the multi select
correct_category.append(selected_category)
csv = EditCsv(file_name, user_name, company)
csv.write_file(correct_category)
if request.POST['radio'] != "no":
# If the user want to send the file to Google Drive
path_folder = request.user.username + "/CSV/output/"
path_file = 'MEDIA/' + request.user.username + "/CSV/output/" + request.session['filename']
path_file_diff = 'MEDIA/' + request.user.username + "/CSV/output/Difference of " + request.session[
'filename']
upload_to_google_drive.upload_to_drive(path_folder,
'results of ' + request.session['filename'],
"Difference of " + request.session['filename'],
path_file,
path_file_diff)
return redirect("/download")
def file_download(request):
if not request.user.is_authenticated:
return redirect(settings.LOGIN_REDIRECT_URL)
else:
# Refer to the source: https://stackoverflow.com/questions/36392510/django-download-a-file/36394206
path = os.path.join(settings.MEDIA_ROOT, request.user.username, "CSV", "output", request.session['filename'])
with open(path, 'rb') as csv:
response = HttpResponse(
csv.read()) # Try using HttpStream instead of this. This method will create problem with large numbers of rows like 25k+
response['Content-Type'] = 'application/force-download'
response['Content-Disposition'] = 'attachment;filename=results of ' + request.session['filename']
return response
def handle_uploaded_file(f, username, filename):
"""Just a precautionary step if signals.py doesn't work for any reason."""
data_directory_root = settings.MEDIA_ROOT
path = os.path.join(data_directory_root, username, "CSV", "input", filename)
path_input = os.path.join(data_directory_root, username, "CSV", "input")
path_output = os.path.join(data_directory_root, username, "CSV", "output")
if not os.path.exists(path_input):
os.makedirs(path_input)
if not os.path.exists(path_output):
os.makedirs(path_output)
with open(path, 'wb+') as destination:
for chunk in f.chunks():
destination.write(chunk)
| [
"jmohd359@gmail.com"
] | jmohd359@gmail.com |
fd91edb1d6fc2d1478993e3ee2f37f4722d90ae6 | 349e9b785f3d26a994441ca9958be9a6f7db3f1b | /client.py | f2bf4e734fd97f4499d1a6358a582f2bb82257b8 | [] | no_license | ivanlukomskiy/MzClient | 5b8ea7a8cde80c8a052b3dd23f2cf4e1c71fed97 | a610245d7cc89099cd39092e44faf6bfc8b86508 | refs/heads/master | 2020-04-22T01:58:06.688989 | 2019-02-10T21:58:01 | 2019-02-10T21:58:01 | 170,031,540 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,686 | py | import json
import time
from threading import Thread
import urllib3
from inputs import get_gamepad
# Controller settings
LEFT_THRESHOLD = -3000
RIGHT_THRESHOLD = 3000
LEFT_MAX = -32768
RIGHT_MAX = 32767
INPUT_CODE = 'ABS_RX'
# Rest client settings
REST_CLIENT_MIN_DELAY = 0.2
API_URI = 'http://localhost:8000'
HEADERS = {'Connection': 'close'}
PATH = "{}/api/servo/y/velocity".format(API_URI)
http = urllib3.PoolManager()
def position_to_percents(value):
if LEFT_THRESHOLD < value < RIGHT_THRESHOLD:
return 0
if value < 0:
return - value * 100 / LEFT_MAX
return value * 100 / RIGHT_MAX
class MzClient:
currentValue = 0
driverValue = 0
def events_handling_loop(self):
while 1:
events = get_gamepad()
for event in events:
if event.code != INPUT_CODE:
continue
self.currentValue = position_to_percents(event.state)
def rest_client_loop(self):
while 1:
time.sleep(0.2)
if self.driverValue == self.currentValue:
continue
self.driverValue = self.currentValue
encoded_data = json.dumps({"value": self.driverValue}).encode('utf-8')
start = time.time()
r = http.request('PUT', PATH, body=encoded_data)
val = json.loads(r.data.decode('utf-8'))['value']
end = time.time()
print("Velocity updated: {0:.2f}, took {1:.4f} sec".format(val, (end - start)))
def startup(self):
Thread(target=self.events_handling_loop).start()
Thread(target=self.rest_client_loop()).start()
client = MzClient()
client.startup()
| [
"ilukomskiy@sbdagroup.com"
] | ilukomskiy@sbdagroup.com |
8ef7930362e2bcc35f6de8fe13f6c54c51214967 | dd998c7abbaa1db0af23b2d0f5e5fd6ab9737c20 | /apps/operation/apps.py | 1a5d95bc9581d0801275317e5e70ba7d07100919 | [] | no_license | seymourgao/xuexionline | 5364737023e6c81b899ead193c581e9811f5f205 | 6d9ffaab76582e35be131a5ee281aec2f837cb82 | refs/heads/master | 2020-04-22T14:10:53.344111 | 2019-02-13T03:35:43 | 2019-02-13T03:35:43 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 142 | py | #*coding=utf-8
from django.apps import AppConfig
class OperationConfig(AppConfig):
name = 'operation'
verbose_name = "用户操作" | [
"gxlibra@gmail.com"
] | gxlibra@gmail.com |
22fc2fece41e824ec59281e1165ad60501b135c6 | e466ec58c03eb781890420756719bce659963bd8 | /page/__init__.py | a4504d2f88b8c35cd4834aed6cd1fd874c9fbe32 | [] | no_license | CuiDan1/day03_project002 | 0654fe26c164e160d9fe2c08ef73abeb9c8c7afe | 4429ee47e826d030816dedb8df027f1d78ca3e8f | refs/heads/master | 2020-08-27T03:09:04.783160 | 2019-10-24T07:48:15 | 2019-10-24T07:48:15 | 217,228,634 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 320 | py | """爱客登录定位信息"""
from selenium.webdriver.common.by import By
app_package = "com.vcooline.aike"
app_activity = '.umanager.LoginActivity'
login_username = By.ID, 'com.vcooline.aike:id/etxt_username'
login_password = By.ID, 'com.vcooline.aike:id/etxt_pwd'
login_btn = By.ID, 'com.vcooline.aike:id/btn_login' | [
"15003125752@163.com"
] | 15003125752@163.com |
f4cefebba561f7e3df125832dd71bfd53f6a98c2 | 64a72c2af381fdfb40cef2583d0dfc80566938d3 | /baxter-ridgeback-kinetic_ws/devel/lib/python2.7/dist-packages/puma_motor_msgs/msg/_Status.py | 504bd856a88b84f3ea3de84393e3e2a8c3928045 | [] | no_license | thibs-sigma/ridgeback-baxter | a3daa2b8425a84c3d75acaec134848215be6f11d | 0bf8ab0df8807ab886644975d63865e5f163dc5a | refs/heads/master | 2020-04-28T16:42:13.902308 | 2019-03-13T14:47:02 | 2019-03-13T14:47:02 | 175,420,996 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 128 | py | /home/thib/baxter-ridgeback-kinetic_ws/devel/.private/puma_motor_msgs/lib/python2.7/dist-packages/puma_motor_msgs/msg/_Status.py | [
"thib.hilt31@gmail.com"
] | thib.hilt31@gmail.com |
ef48116a01eb0432df734850331a5bce5d3b179d | 4ef34e4e23087c354a169fbf8e5e368ad388fca9 | /test.py | 6ee49765c5d1752dca427d00a393f151834de9c7 | [] | no_license | asawho/makerlab-arcade | 6a8ab9851a780ec2db6c0b107e582b12d267df3e | 0556ac803b9bc64bb5f6ebef715c3dc25880b5f1 | refs/heads/master | 2020-09-06T18:43:28.469224 | 2019-11-08T17:07:59 | 2019-11-08T17:07:59 | 220,512,526 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 368 | py | #!/usr/bin/env python3
from pynput import keyboard
def on_press(key):
print('Key {} pressed.'.format(key))
def on_release(key):
print('Key {} released.'.format(key))
if str(key) == 'Key.esc':
print('Exiting...')
return False
with keyboard.Listener(
on_press = on_press,
on_release = on_release) as listener:
listener.join()
| [
"asawho@gmail.com"
] | asawho@gmail.com |
edfc9553907b03130a283867636ecc6b1029c1e4 | f08d0b5d0ce94292493111be42eaf6db051c8eb3 | /venv/bin/pyreverse | a644b70b622ad524abba1c54062c1c21cab2daca | [] | no_license | draxlus/CMPT-370_SoftwareDevProject | 999ac7ddd470b40d2df8f338a51f2a661b747922 | f2205456ba5ff3d1cb7d4d65cd65becfabcf8c2c | refs/heads/main | 2023-04-19T07:52:45.986842 | 2021-05-06T19:18:43 | 2021-05-06T19:18:43 | 365,009,563 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 270 | #!/Users/sherozeajmal/Desktop/cmpt370-project/venv/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from pylint import run_pyreverse
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(run_pyreverse())
| [
"siddhantagrawal777@gmail.com"
] | siddhantagrawal777@gmail.com | |
7a529d56ccc005bfccfb9d8c19c6f483390fffa9 | 46bef3a57cb663991387e02f3cc6c0282bd17496 | /ie/si23tinyyolov2/tflite/Tensor.py | 0cc948eaa14ee73dcd9f9a7202d91d57e2d163ea | [] | no_license | k5iogura/vinosyp | 36964f4c51b9d695d46e19d64a49156eaaac0042 | 1ef35532c6ba392761f73504ed787c074781c400 | refs/heads/master | 2020-04-08T04:27:57.905968 | 2019-09-30T07:34:18 | 2019-09-30T07:34:18 | 159,017,659 | 7 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,271 | py | # automatically generated by the FlatBuffers compiler, do not modify
# namespace: tflite
import flatbuffers
class Tensor(object):
__slots__ = ['_tab']
@classmethod
def GetRootAsTensor(cls, buf, offset):
n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
x = Tensor()
x.Init(buf, n + offset)
return x
@classmethod
def TensorBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
# Tensor
def Init(self, buf, pos):
self._tab = flatbuffers.table.Table(buf, pos)
# Tensor
def Shape(self, j):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
if o != 0:
a = self._tab.Vector(o)
return self._tab.Get(flatbuffers.number_types.Int32Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 4))
return 0
# Tensor
def ShapeAsNumpy(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
if o != 0:
return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Int32Flags, o)
return 0
# Tensor
def ShapeLength(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
if o != 0:
return self._tab.VectorLen(o)
return 0
# Tensor
def Type(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos)
return 0
# Tensor
def Buffer(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Uint32Flags, o + self._tab.Pos)
return 0
# Tensor
def Name(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10))
if o != 0:
return self._tab.String(o + self._tab.Pos)
return None
# Tensor
def Quantization(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12))
if o != 0:
x = self._tab.Indirect(o + self._tab.Pos)
from .QuantizationParameters import QuantizationParameters
obj = QuantizationParameters()
obj.Init(self._tab.Bytes, x)
return obj
return None
def TensorStart(builder): builder.StartObject(5)
def TensorAddShape(builder, shape): builder.PrependUOffsetTRelativeSlot(0, flatbuffers.number_types.UOffsetTFlags.py_type(shape), 0)
def TensorStartShapeVector(builder, numElems): return builder.StartVector(4, numElems, 4)
def TensorAddType(builder, type): builder.PrependInt8Slot(1, type, 0)
def TensorAddBuffer(builder, buffer): builder.PrependUint32Slot(2, buffer, 0)
def TensorAddName(builder, name): builder.PrependUOffsetTRelativeSlot(3, flatbuffers.number_types.UOffsetTFlags.py_type(name), 0)
def TensorAddQuantization(builder, quantization): builder.PrependUOffsetTRelativeSlot(4, flatbuffers.number_types.UOffsetTFlags.py_type(quantization), 0)
def TensorEnd(builder): return builder.EndObject()
| [
"k5i.ogura.v40@gmail.com"
] | k5i.ogura.v40@gmail.com |
e295a394c5093b40b5ef8d9c312b38e6497a13ba | d30aec224cebbc8a92fb05741e690ef1f75d75e9 | /week10/CodingBat/Logic-2/round_sum.py | beb654679a53c72ad783c3a457d69e459bd0cdfe | [] | no_license | bzzitsme/webdev2019 | ce050bcff78e3848c0cabebf27df582f299394d0 | 6671e3c4e5ab4fa6b94fe51dd2974d830d15f543 | refs/heads/master | 2022-04-30T06:05:31.011700 | 2022-03-15T07:31:57 | 2022-03-15T07:45:42 | 167,218,267 | 1 | 0 | null | 2021-05-09T09:47:58 | 2019-01-23T16:49:26 | HTML | UTF-8 | Python | false | false | 173 | py | def round_sum(a, b, c):
return round10(a) + round10(b) + round10(c)
def round10(num):
r = num % 10
if r >= 5:
return num + 10 - r
else:
return num - r | [
"temirlan.dyussyumbayev"
] | temirlan.dyussyumbayev |
c54e99a0862974e1abc8b7eaf5a168c002dff248 | a5a4cee972e487512275c34f308251e6cc38c2fa | /dev/potential/EamPotential/dev_EamPotential.py | 1587069d5e39deda89368cb54c938837b9a44bfc | [
"MIT"
] | permissive | eragasa/pypospack | 4f54983b33dcd2dce5b602bc243ea8ef22fee86b | 21cdecaf3b05c87acc532d992be2c04d85bfbc22 | refs/heads/master | 2021-06-16T09:24:11.633693 | 2019-12-06T16:54:02 | 2019-12-06T16:54:02 | 99,282,824 | 4 | 1 | null | null | null | null | UTF-8 | Python | false | false | 804 | py | import pypospack.potential as potential
symbols = ['Ni']
pot = potential.EamPotential(symbols=symbols)
print('pot.potential_type == {}'.format(\
pot.potential_type))
print('pot.symbols == {}'.format(\
pot.symbols))
print('pot.param_names == {}'.format(\
pot.param_names))
print('pot.is_charge == {}'.format(\
pot.is_charge))
print('pot.param == {}'.format(\
pot.param))
print(80*'-')
symbols = ['Ni','Al']
pot = potential.EamPotential(symbols=symbols)
print('pot.potential_type == {}'.format(\
pot.potential_type))
print('pot.symbols == {}'.format(\
pot.symbols))
print('pot.param_names == {}'.format(\
pot.param_names))
print('pot.is_charge == {}'.format(\
pot.is_charge))
print('pot.param == {}'.format(\
pot.param))
| [
"eragasa@ufl.edu"
] | eragasa@ufl.edu |
a4edf3b458e618945adfa9cdeb83ec205d304adf | 5d8d0015ecdb52228945d47c3d25a7ac269d2bc6 | /flaskr/db.py | ee79c5b1b5cf4cbdc726266c5672f4e0aca29ad4 | [
"Zlib"
] | permissive | MergeCommits/354-project-backend | 4bb6d0b9232b526ef5ed5fbf73174ea578a1284b | 97614a1585b66a5d51f5df8ecd778bf3520b830a | refs/heads/main | 2022-11-05T12:13:05.527252 | 2020-06-23T04:26:37 | 2020-06-23T04:26:37 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 948 | py | from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from flask import current_app, g
from flask.cli import with_appcontext
from contextlib import contextmanager
from sqlalchemy.ext.declarative import declarative_base
Base = declarative_base()
def new_session():
if 'db_engine' not in g:
g.db_engine = create_engine(current_app.config['DATABASE_URL'], echo=True)
g.db_sessionmaker = sessionmaker(bind=g.db_engine)
return g.db_sessionmaker()
def close_db():
db_engine = g.pop('db_engine', None)
if db_engine is not None:
db_engine.close()
def init_app(app):
app.teardown_appcontext(close_db)
@contextmanager
def session_scope():
"""Provide a transactional scope around a series of operations."""
session = new_session()
try:
yield session
session.commit()
except:
session.rollback()
raise
finally:
session.close()
| [
"guillaume.rochefort.mathieu@gmail.com"
] | guillaume.rochefort.mathieu@gmail.com |
f7fb9482be8574be7d00cdcdac9637ddbef5319f | 2d49b1be783091c064dfda11f481e9d9bdd45d5b | /not-validated-prototype_lst/scripts/unit-tests/unit-tests.py | 74092e4ee32923356f22f5df00244ae6328cf2f4 | [] | no_license | xyt556/espa-land-surface-temperature | 8d93829fd307eba6aa656be86c244697c9abbea5 | 4e12f8a967c7c7641bc351cade01694c3c09726f | refs/heads/master | 2021-01-21T15:03:49.239107 | 2017-06-05T16:06:09 | 2017-06-05T16:06:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,155 | py | '''
FILE: unit-tests.py
PURPOSE: Provides unit testing for this directory.
PROJECT: Land Satellites Data Systems Science Research and Development
(LSRD) at the USGS EROS
LICENSE: NASA Open Source Agreement 1.3
HISTORY:
Date Reason
---------------- --------------------------------------------------------
Sep/2015 Initial implementation
'''
import os
import sys
import shutil
import glob
import filecmp
import unittest
# Add the parent directory where the modules to test are located
sys.path.insert(0, '..')
from extract_auxiliary_narr_data import AuxNARRGribProcessor
from lst_environment import Environment
class LSRD_ValidationFramework(unittest.TestCase):
def __init__(self, *args, **kwargs):
super(LSRD_ValidationFramework, self).__init__(*args, **kwargs)
if not self.name:
raise Exception('self.name must be defined')
# Verify the environment
self.lsrd_validation_dir = os.environ.get('LSRD_VALIDATION_DIR')
if self.lsrd_validation_dir is None:
raise Exception('Missing environment variable LSRD_VALIDATION_DIR')
def assertFilesEqual(self, file_1, file_2):
'''Assert that two files are equal or not.'''
self.assertTrue(os.path.exists(file_1),
'{0} Does not exist'.format(file_1))
self.assertTrue(os.path.exists(file_2),
'{0} Does not exist'.format(file_2))
self.assertTrue(filecmp.cmp(file_1, file_2))
class AuxNARRGribProcessor_TestCase(LSRD_ValidationFramework):
'''Tests for Grib file processing.'''
def __init__(self, *args, **kwargs):
self.name = 'AuxNARRGribProcessor_TestCase'
super(AuxNARRGribProcessor_TestCase, self).__init__(*args, **kwargs)
# Validation data is presummed to be available if the directory exists
self.validation_path = os.path.join(self.lsrd_validation_dir,
self.name)
if not os.path.isdir(self.validation_path):
raise Exception('Missing validation data for [{0}]'
.format(self.name))
# Define the directories that are produced
self.directories = ['HGT_1', 'HGT_2',
'SPFH_1', 'SPFH_2',
'TMP_1', 'TMP_2']
def setUp(self):
'''setup'''
self.input_xml = os.path.join(self.validation_path,
'LT50420342011119PAC01.xml')
# Specify the XML metadata file defining the data to process
self.processor = AuxNARRGribProcessor(self.input_xml)
# Process the associated AUX data
self.processor.extract_aux_data()
def tearDown(self):
'''Cleanup'''
for directory in self.directories:
if os.path.isdir(directory):
shutil.rmtree(directory)
def test_process_grib_data(self):
'''Test the processing of grib files from our internal archive.'''
for directory in self.directories:
self.assertEqual(True, os.path.isdir(directory))
# Start with the local files
files = glob.glob(os.path.join(directory, '*'))
# Add the validation files
validation_directory = os.path.join(self.validation_path,
directory)
files.extend(glob.glob(os.path.join(validation_directory, '*')))
# We only want the filenames
files = [os.path.basename(x) for x in files]
# Make a unique list of the filenames
files = sorted(list(set(files)))
# Process through each file
for filename in files:
local_file = os.path.join(directory, filename)
validation_file = os.path.join(validation_directory, filename)
self.assertFilesEqual(validation_file, local_file)
class Environment_TestCase(LSRD_ValidationFramework):
'''Tests Environment Class'''
def __init__(self, *args, **kwargs):
self.name = 'Environment_TestCase'
super(Environment_TestCase, self).__init__(*args, **kwargs)
def setUp(self):
'''setup'''
os.environ['LST_DATA_DIR'] = '/usr/local'
os.environ['LST_AUX_DIR'] = '/usr/local'
os.environ['ASTER_GED_SERVER_NAME'] = 'ASTER_GED_SERVER_NAME'
self.environment = Environment()
def test_LST_DATA_DIR(self):
'''Test the LST_DATA_DIR environment variable'''
self.assertEqual('/usr/local',
self.environment.get_lst_data_directory())
def test_LST_AUX_DIR(self):
'''Test the LST_AUX_DIR environment variable'''
self.assertEqual('/usr/local',
self.environment.get_lst_aux_directory())
def test_ASTER_GED_SERVER_NAME(self):
'''Test the ASTER_GED_SERVER_NAME environment variable'''
self.assertEqual('ASTER_GED_SERVER_NAME',
self.environment.get_aster_ged_server_name())
if __name__ == '__main__':
unittest.main()
| [
"rdilley@usgs.gov"
] | rdilley@usgs.gov |
731c94417bf0b8c3ef693e98b8c14512e389ed0c | 115f11050032c09bd561d8e1121d1be0932542f3 | /address_standardizer/settings.py | 10438bb00ed9cd974a2df3f66bf4f5ff93595eda | [
"MIT"
] | permissive | bitjockey42/address_standardizer | 7c619e567c1ea48d3309a9c62ed9093ffc6a2361 | cab196a09664893fb1867a35a9737fdc38771865 | refs/heads/master | 2023-04-26T12:03:40.555573 | 2021-05-05T18:47:03 | 2021-05-05T18:47:03 | 362,680,117 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 114 | py | import os
HOST = os.getenv("HOST", "0.0.0.0")
PORT = os.getenv("PORT", "8080")
DEBUG = os.getenv("DEBUG", False)
| [
"github@ajulian.me"
] | github@ajulian.me |
c5726f75c757c38f8cbd21289f63e73ea05370c2 | e23a4f57ce5474d468258e5e63b9e23fb6011188 | /140_gui/pyqt_pyside/examples/PyQt_PySide_book/004_Main components/001_Inscription/171_setScaledContents - toClass.py | 338077f7a2d487be688c7007a97764ffd712864a | [] | no_license | syurskyi/Python_Topics | 52851ecce000cb751a3b986408efe32f0b4c0835 | be331826b490b73f0a176e6abed86ef68ff2dd2b | refs/heads/master | 2023-06-08T19:29:16.214395 | 2023-05-29T17:09:11 | 2023-05-29T17:09:11 | 220,583,118 | 3 | 2 | null | 2023-02-16T03:08:10 | 2019-11-09T02:58:47 | Python | UTF-8 | Python | false | false | 522 | py | from PySide import QtCore, QtGui
import sys
class SampleWindow(QtGui.QWidget):
def __init__(self):
super(SampleWindow, self).__init__()
window.setWindowTitle("Класс QLabel")
window.resize(300, 150)
label = QtGui.QLabel()
label.setText("Текст надписи")
label.setFrameStyle(QtGui.QFrame.Box | QtGui.QFrame.Plain)
label.setPixmap(QtGui.QPixmap("foto.png"))
label.setAutoFillBackground(True)
label.setScaledContents(True)
vbox = QtGui.QVBoxLayout()
vbox.addWidget(label)
window.setLayout(vbox)
| [
"sergejyurskyj@yahoo.com"
] | sergejyurskyj@yahoo.com |
510328e777222074176c906f8c7df287ecbd3b2a | 677dacd01aaa5207f95c3502865fcfc20f37ffb2 | /quik-fix.py | 79e9d06fea7c5733bb1688534b80709988e082f3 | [] | no_license | fehernandez12/QuikFix | 4b25f08c1f9f6c7d68a6d87aedcbb465ba282483 | 7860bbb29b46be326137d5943a9507a0352737c4 | refs/heads/master | 2020-12-23T07:45:47.957668 | 2020-01-30T21:56:28 | 2020-01-30T21:56:28 | 237,087,965 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,234 | py | import discord
import time
import os
import sys
import json
from discord.ext import commands
import sqlite3
conn = sqlite3.connect(os.path.join(sys.path[0], 'QuikFix.db'))
db_cursor = conn.cursor()
bot = commands.Bot('.')
@bot.event
async def on_ready():
print('Logged in as: ')
print(bot.user.name)
print(bot.user.id)
print('------------')
@bot.event
async def on_message(message):
user_id = message.author.id
lookup = db_cursor.execute('SELECT * FROM discord_user WHERE userid = %u' % user_id)
if not db_cursor.fetchone():
user_data = []
user_data.append(message.author.discriminator)
user_data.append(message.author.id)
user_data.append(message.author.name)
user_data.append(0)
user_data.append(0)
user_data.append(0)
user_data.append(0)
user_data.append(1000)
arg = tuple(user_data)
db_cursor.execute('INSERT INTO discord_user VALUES(NULL, ?, ?, ?, ?, ?, ?, ?, ?, NULL)', arg)
conn.commit()
result = db_cursor.fetchone()
print("User {} has been added to the database.".format(message.author.name))
else:
return
@bot.command()
async def register1(ctx, message):
user_id = message.author.id
lookup = db_cursor.execute('SELECT plays_game FROM discord_user WHERE userid = %u' % user_id)
if not db_cursor.fetchone():
db_cursor.execute('UPDATE discord_user SET plays_game = 1 WHERE userid = %u' % user_id)
conn.commit()
else:
pass
@bot.command()
async def say(ctx, *, arg):
await ctx.send(arg)
@bot.command()
async def snowspam(ctx):
count = 0
await ctx.send('Starting mass pinging in 3...')
time.sleep(1)
await ctx.send('2...')
time.sleep(1)
await ctx.send('1...')
while count < 200:
count += 1
remaining = str(200 - count)
await ctx.send('<@244239956170637323> is gay\nRemaining pings: {}'.format(remaining))
time.sleep(5)
await ctx.send('Ok, just one more...')
time.sleep(1)
await ctx.send('Hi <@244239956170637323>')
bot.run('NjcxMTMzMjQzMjEwNDY1Mjkz.XjL-Jg.cMFzc_2Ayx05wu1UiAtY4aLYRFI') | [
"noreply@github.com"
] | noreply@github.com |
53c9aed9f7a93c6fe201f4664a845b48f15db2f9 | a81c07a5663d967c432a61d0b4a09de5187be87b | /chrome/installer/mac/signing/pipeline.py | 898c0d4542bb3474d654e71a1e4fd5a1355e3897 | [
"LGPL-2.0-or-later",
"MPL-2.0",
"GPL-2.0-only",
"BSD-3-Clause"
] | permissive | junxuezheng/chromium | c401dec07f19878501801c9e9205a703e8643031 | 381ce9d478b684e0df5d149f59350e3bc634dad3 | refs/heads/master | 2023-02-28T17:07:31.342118 | 2019-09-03T01:42:42 | 2019-09-03T01:42:42 | 205,967,014 | 2 | 0 | BSD-3-Clause | 2019-09-03T01:48:23 | 2019-09-03T01:48:23 | null | UTF-8 | Python | false | false | 14,899 | py | # Copyright 2019 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
The pipeline module orchestrates the entire signing process, which includes:
1. Customizing build products for release channels.
2. Code signing the application bundle and all of its nested code.
3. Producing a packaged DMG.
4. Signing and packaging the installer tools.
"""
import os.path
from . import commands, model, modification, notarize, signing
def _customize_and_sign_chrome(paths, dist_config, dest_dir, signed_frameworks):
"""Does channel customization and signing of a Chrome distribution. The
resulting app bundle is moved into |dest_dir|.
Args:
paths: A |model.Paths| object.
dist_config: A |config.CodeSignConfig| for the |model.Distribution|.
dest_dir: The directory into which the product will be placed when
the operations are completed.
signed_frameworks: A dict that will store paths and change counts of
already-signed inner frameworks keyed by bundle ID. Paths are used
to recycle already-signed frameworks instead of re-signing them.
Change counts are used to verify equivalence of frameworks when
recycling them. Callers can pass an empty dict on the first call,
and reuse the same dict for subsequent calls. This function will
produce and consume entries in the dict. If this sharing is
undesired, pass None instead of a dict.
"""
# Copy the app to sign into the work dir.
commands.copy_files(
os.path.join(paths.input, dist_config.base_config.app_dir), paths.work)
# Customize the app bundle.
modification.customize_distribution(paths, dist_config.distribution,
dist_config)
work_dir_framework_path = os.path.join(paths.work,
dist_config.framework_dir)
if signed_frameworks is not None and dist_config.base_bundle_id in signed_frameworks:
# If the inner framework has already been modified and signed for this
# bundle ID, recycle the existing signed copy without signing a new
# copy. This ensures that bit-for-bit identical input will result in
# bit-for-bit identical signatures not affected by differences in, for
# example, the signature's timestamp. All variants of a product sharing
# the same bundle ID are assumed to have bit-for-bit identical
# frameworks.
#
# This is significant because of how binary diff updates work. Binary
# diffs are built between two successive versions on the basis of their
# inner frameworks being bit-for-bit identical without regard to any
# customizations applied only to the outer app. In order for these to
# apply to all installations regardless of the presence or specific
# values of any app-level customizations, all inner frameworks for a
# single version and base bundle ID must always remain bit-for-bit
# identical, including their signatures.
(signed_framework_path, signed_framework_change_count
) = signed_frameworks[dist_config.base_bundle_id]
actual_framework_change_count = commands.copy_dir_overwrite_and_count_changes(
os.path.join(dest_dir, signed_framework_path),
work_dir_framework_path,
dry_run=False)
if actual_framework_change_count != signed_framework_change_count:
raise ValueError(
'While customizing and signing {} ({}), actual_framework_change_count {} != signed_framework_change_count {}'
.format(dist_config.base_bundle_id,
dist_config.packaging_basename,
actual_framework_change_count,
signed_framework_change_count))
signing.sign_chrome(paths, dist_config, sign_framework=False)
else:
unsigned_framework_path = os.path.join(paths.work,
'modified_unsigned_framework')
commands.copy_dir_overwrite_and_count_changes(
work_dir_framework_path, unsigned_framework_path, dry_run=False)
signing.sign_chrome(paths, dist_config, sign_framework=True)
actual_framework_change_count = commands.copy_dir_overwrite_and_count_changes(
work_dir_framework_path, unsigned_framework_path, dry_run=True)
if signed_frameworks is not None:
dest_dir_framework_path = os.path.join(dest_dir,
dist_config.framework_dir)
signed_frameworks[dist_config.base_bundle_id] = (
dest_dir_framework_path, actual_framework_change_count)
app_path = os.path.join(paths.work, dist_config.app_dir)
commands.make_dir(dest_dir)
commands.move_file(app_path, os.path.join(dest_dir, dist_config.app_dir))
def _staple_chrome(paths, dist_config):
"""Staples all the executable components of the Chrome app bundle.
Args:
paths: A |model.Paths| object.
dist_config: A |config.CodeSignConfig| for the customized product.
"""
parts = signing.get_parts(dist_config)
# Only staple the signed, bundled executables.
part_paths = [
part.path
for part in parts.values()
# TODO(https://crbug.com/979725): Reinstate .xpc bundle stapling once
# the signing environment is on a macOS release that supports
# Xcode 10.2 or newer.
if part.path[-4:] in ('.app',)
]
# Reverse-sort the paths so that more nested paths are stapled before
# less-nested ones.
part_paths.sort(reverse=True)
for part_path in part_paths:
notarize.staple(os.path.join(paths.work, part_path))
def _package_and_sign_dmg(paths, dist_config):
"""Packages, signs, and verifies a DMG for a signed build product.
Args:
paths: A |model.Paths| object.
dist_config: A |config.CodeSignConfig| for the |dist|.
Returns:
The path to the signed DMG file.
"""
dist = dist_config.distribution
dmg_path = _package_dmg(paths, dist, dist_config)
# dmg_identifier is like dmg_name but without the .dmg suffix. If a
# brand code is in use, use the actual brand code instead of the
# name fragment, to avoid leaking the association between brand
# codes and their meanings.
dmg_identifier = dist_config.packaging_basename
if dist.branding_code:
dmg_identifier = dist_config.packaging_basename.replace(
dist.packaging_name_fragment, dist.branding_code)
product = model.CodeSignedProduct(
dmg_path, dmg_identifier, sign_with_identifier=True)
signing.sign_part(paths, dist_config, product)
signing.verify_part(paths, product)
return dmg_path
def _package_dmg(paths, dist, config):
"""Packages a Chrome application bundle into a DMG.
Args:
paths: A |model.Paths| object.
dist: The |model.Distribution| for which the product was customized.
config: The |config.CodeSignConfig| object.
Returns:
A path to the produced DMG file.
"""
packaging_dir = paths.packaging_dir(config)
if dist.channel_customize:
dsstore_file = 'chrome_{}_dmg_dsstore'.format(dist.channel)
icon_file = 'chrome_{}_dmg_icon.icns'.format(dist.channel)
else:
dsstore_file = 'chrome_dmg_dsstore'
icon_file = 'chrome_dmg_icon.icns'
dmg_path = os.path.join(paths.output,
'{}.dmg'.format(config.packaging_basename))
app_path = os.path.join(paths.work, config.app_dir)
# A locally-created empty directory is more trustworthy than /var/empty.
empty_dir = os.path.join(paths.work, 'empty')
commands.make_dir(empty_dir)
# Make the disk image. Don't include any customized name fragments in
# --volname because the .DS_Store expects the volume name to be constant.
# Don't put a name on the /Applications symbolic link because the same disk
# image is used for all languages.
# yapf: disable
commands.run_command([
os.path.join(packaging_dir, 'pkg-dmg'),
'--verbosity', '0',
'--tempdir', paths.work,
'--source', empty_dir,
'--target', dmg_path,
'--format', 'UDBZ',
'--volname', config.app_product,
'--icon', os.path.join(packaging_dir, icon_file),
'--copy', '{}:/'.format(app_path),
'--copy',
'{}/keystone_install.sh:/.keystone_install'.format(packaging_dir),
'--mkdir', '.background',
'--copy',
'{}/chrome_dmg_background.png:/.background/background.png'.format(
packaging_dir),
'--copy', '{}/{}:/.DS_Store'.format(packaging_dir, dsstore_file),
'--symlink', '/Applications:/ ',
])
# yapf: enable
return dmg_path
def _package_installer_tools(paths, config):
"""Signs and packages all the installer tools, which are not shipped to end-
users.
Args:
paths: A |model.Paths| object.
config: The |config.CodeSignConfig| object.
"""
DIFF_TOOLS = 'diff_tools'
tools_to_sign = signing.get_installer_tools(config)
other_tools = (
'dirdiffer.sh',
'dirpatcher.sh',
'dmgdiffer.sh',
'keystone_install.sh',
'pkg-dmg',
)
with commands.WorkDirectory(paths) as paths:
diff_tools_dir = os.path.join(paths.work, DIFF_TOOLS)
commands.make_dir(diff_tools_dir)
for part in tools_to_sign.values():
commands.copy_files(
os.path.join(paths.input, part.path), diff_tools_dir)
part.path = os.path.join(DIFF_TOOLS, os.path.basename(part.path))
signing.sign_part(paths, config, part)
for part in tools_to_sign.values():
signing.verify_part(paths, part)
for tool in other_tools:
commands.copy_files(
os.path.join(paths.packaging_dir(config), tool), diff_tools_dir)
zip_file = os.path.join(paths.output, DIFF_TOOLS + '.zip')
commands.run_command(['zip', '-9ry', zip_file, DIFF_TOOLS],
cwd=paths.work)
def sign_all(orig_paths, config, disable_packaging=False, do_notarization=True):
"""For each distribution in |config|, performs customization, signing, and
DMG packaging and places the resulting signed DMG in |orig_paths.output|.
The |paths.input| must contain the products to customize and sign.
Args:
orig_paths: A |model.Paths| object.
config: The |config.CodeSignConfig| object.
package_dmg: If True, the signed application bundle will be packaged
into a DMG, which will also be signed. If False, the signed app
bundle will be copied to |paths.output|.
do_notarization: If True, the signed application bundle will be sent for
notarization by Apple. The resulting notarization ticket will then
be stapled. If |package_dmg| is also True, the stapled application
will be packaged in the DMG and then the DMG itself will be
notarized and stapled.
"""
with commands.WorkDirectory(orig_paths) as notary_paths:
# First, sign all the distributions and optionally submit the
# notarization requests.
uuids_to_config = {}
signed_frameworks = {}
for dist in config.distributions:
with commands.WorkDirectory(orig_paths) as paths:
dist_config = dist.to_config(config)
do_packaging = (dist.package_as_dmg or
dist.package_as_pkg) and not disable_packaging
# If not packaging and not notarizing, then simply drop the
# signed bundle in the output directory when done signing.
if not do_packaging and not do_notarization:
dest_dir = paths.output
else:
dest_dir = notary_paths.work
dest_dir = os.path.join(dest_dir,
dist_config.packaging_basename)
_customize_and_sign_chrome(paths, dist_config, dest_dir,
signed_frameworks)
# If the build products are to be notarized, ZIP the app bundle
# and submit it for notarization.
if do_notarization:
zip_file = os.path.join(
notary_paths.work,
dist_config.packaging_basename + '.zip')
commands.run_command([
'zip', '--recurse-paths', '--symlinks', '--quiet',
zip_file, dist_config.app_dir
],
cwd=dest_dir)
uuid = notarize.submit(zip_file, dist_config)
uuids_to_config[uuid] = dist_config
# Wait for app notarization results to come back, stapling as they do.
if do_notarization:
for result in notarize.wait_for_results(uuids_to_config.keys(),
config):
dist_config = uuids_to_config[result]
dest_dir = os.path.join(notary_paths.work,
dist_config.packaging_basename)
_staple_chrome(notary_paths.replace_work(dest_dir), dist_config)
# After all apps are optionally notarized, package as required.
if not disable_packaging:
uuids_to_package_path = {}
for dist in config.distributions:
dist_config = dist.to_config(config)
if dist.package_as_dmg:
paths = orig_paths.replace_work(
os.path.join(notary_paths.work,
dist_config.packaging_basename))
dmg_path = _package_and_sign_dmg(paths, dist_config)
if do_notarization:
uuid = notarize.submit(dmg_path, dist_config)
uuids_to_package_path[uuid] = dmg_path
if dist.package_as_pkg:
# TODO(avi): Do packaging as a pkg here.
pass
# Wait for packaging notarization results to come back, stapling as
# they do.
if do_notarization:
for result in notarize.wait_for_results(
uuids_to_package_path.keys(), config):
package_path = uuids_to_package_path[result]
notarize.staple(package_path)
_package_installer_tools(orig_paths, config)
| [
"commit-bot@chromium.org"
] | commit-bot@chromium.org |
a6343b9d83e42c7fc1cccf50d1d7355d6d923be4 | f53a990d400ac3838c2dc06ee17c851c8383063e | /pfile/python_client.py | 2c52d84d7fa5fb51f1e9c984a73df33a30b19880 | [] | no_license | syy1023/c_learning | d9b6e525181bf3d9bbed5c85a11d05e76888fc6c | 1af806f5c8bbb9cd750af5a69766d6937d8418b0 | refs/heads/master | 2021-09-10T07:02:40.378141 | 2018-03-22T02:32:57 | 2018-03-22T02:32:57 | 113,152,846 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 141 | py | #coding=utf-8
import socket
s=socket.socket()
host=socket.gethostname()
port=4849
s.connect((host, port))
print s.recv(1024)
s.close()
| [
"2665572581@qq.com"
] | 2665572581@qq.com |
fe60ae9957984e07a9ff679fa218dbd4dd291e63 | 6d1d92d22d94c17645f2accec6361054848f3256 | /ipactory/wsgi.py | e161e43e65ea11aa6ebb1d1c604ab35325b0420a | [] | no_license | jg1021/ipac | 7c7e440fff610c3f7e752956f2dda4d617358b5e | 098211d98a3de1e06a5b5be3a7241e52f16bc96a | refs/heads/master | 2022-12-10T14:19:16.455931 | 2020-09-14T03:40:56 | 2020-09-14T03:40:56 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 393 | py | """
WSGI config for ipactory project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'ipactory.settings')
application = get_wsgi_application()
| [
"0607juny@gmail.com"
] | 0607juny@gmail.com |
2e6ecb54b480a398f319df68538b50b978a06dc3 | f34d3948b707e461151ee33296a61fb23a6d3f44 | /month01/day11/day11/day10_exercise/exercise01.py | 2661ccd6399fb82f85eed30d55de03d907cdb447 | [] | no_license | xiao-a-jian/python-study | f9c4e3ee7a2f9ae83bec6afa7c7b5434e8243ed8 | c8e8071277bcea8463bf6f2e8cd9e30ae0f1ddf3 | refs/heads/master | 2022-06-09T17:44:41.804228 | 2020-05-05T07:48:07 | 2020-05-05T07:48:07 | 256,927,969 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 620 | py | # 定义函数, 删除列表中相同元素(只保留一个)
# list01 = [6, 54, 65, 677, 6, 65, 6, 65]
# 更节省内存
# def delete_duplicates(list_target):
# for r in range(len(list_target) - 1, 0, -1):
# for c in range(r):
# if list_target[r] == list_target[c]:
# del list_target[r]
# break
#
# # 测试
# list01 = [6, 54, 65, 677, 6, 65, 6, 65]
# delete_all(list01)
# print(list01)
# 更简单
def delete_duplicates(list_target):
return set(list_target)
# 测试
list01 = [6, 54, 65, 677, 6, 65, 6, 65]
list01 = delete_duplicates(list01)
print(list01)
| [
"1261247299@qq.com"
] | 1261247299@qq.com |
03b65e6c63a0b4d0b29232e8bff50158051677c9 | db21dffaa21da3bd953567c2a61aab267468e7f0 | /Code Up/[4031] 합과 차.py | cb725139249936b5ca4ee9b79be65923fdc142eb | [] | no_license | BoHye0202/CodingTest | 14a51ab3729154fbeae42029e0b32f872def7535 | 5fae962cdb41935367f17c6317c80ac2fe3cd8b1 | refs/heads/main | 2023-04-26T03:22:54.044215 | 2021-05-26T11:29:19 | 2021-05-26T11:29:19 | 356,518,878 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 70 | py | n = int(input())
m = int(input())
print((n+m)//2)
print(n-((n+m)//2))
| [
"0202cjstk@naver.com"
] | 0202cjstk@naver.com |
b0bd240d43ac3b1a59a797702c347075e3592f73 | 7152b9b0295c6b9961bc875fbc581fc9271b4490 | /Day4/wholesale.py | 1673dc265d36b4681aec9d8939eb0752e24d396b | [] | no_license | Vinay-1312/Coder-sWeek-ML | f0c95abe5ac198fbbde48851be2f497fd3efea9c | 94b82c76a62c4766e2b0760173880a56e7a8999f | refs/heads/master | 2022-10-21T09:19:03.629840 | 2020-06-11T08:57:58 | 2020-06-11T08:57:58 | 268,483,932 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,061 | py | # -*- coding: utf-8 -*-
"""
Created on Fri Jun 5 12:58:24 2020
@author: dell
"""
import matplotlib.pyplot as plt
import pandas as pd
data=pd.read_csv("Wholesale.csv")
from sklearn.preprocessing import StandardScaler
sc=StandardScaler()
data=data.drop(["Channel","Region"],axis=1).values
data=sc.fit_transform(data)
from sklearn.cluster import KMeans
from sklearn.decomposition import PCA
p=PCA(n_components=2,whiten=True)
x=p.fit_transform(data)
c=[]
#kmeans
#elblow mehtod
for i in range(1,11):
k=KMeans(n_clusters=i,init="k-means++",random_state=0)
k.fit(x)
c.append(k.inertia_)
plt.title("elbow")
plt.xlabel("numbero of clusters")
plt.plot(range(1,11),c)
plt.show()
k=KMeans(n_clusters=5,init="k-means++",random_state=0)
pred=k.fit_predict(x)
plt.scatter(x[pred==0,0],x[pred==0,1],s=80,color="yellow",label="cluster1")
plt.scatter(x[pred==1,0],x[pred==1,1],s=80,color="blue",label="cluster2")
plt.scatter(x[pred==2,0],x[pred==2,1],s=80,color="red",label="cluster3")
plt.scatter(x[pred==3,0],x[pred==3,1],s=80,color="green",label="cluster4")
plt.scatter(x[pred==4,0],x[pred==4,1],s=80,color="black",label="cluster5")
plt.scatter(k.cluster_centers_[:,0],k.cluster_centers_[:,1],s=200,label="centroids")
plt.xlabel("x1")
plt.title("kmeans")
plt.ylabel("x2")
plt.show()
for i in range(5):
print(pred[i])
#hierachichal
import scipy.cluster.hierarchy as sch
dendrogram=sch.dendrogram(sch.linkage(x,method="ward"))
plt.title("dendrogram")
plt.xlabel("number of observation")
plt.ylabel("euclidian distance")
plt.show()
from sklearn.cluster import AgglomerativeClustering
ac=AgglomerativeClustering(n_clusters=3)
pred=ac.fit_predict(x)
plt.scatter(x[pred==0,0],x[pred==0,1],s=80,color="yellow",label="cluster1")
plt.scatter(x[pred==1,0],x[pred==1,1],s=80,color="blue",label="cluster2")
plt.scatter(x[pred==2,0],x[pred==2,1],s=80,color="red",label="cluster3")
plt.xlabel("x1")
plt.title("hierachical")
plt.ylabel("x2")
plt.show()
for i in range(5):
print(pred[i])
| [
"noreply@github.com"
] | noreply@github.com |
d29da2fa6b389a1e61c922b0468ca492e288956d | 3b84c4b7b16ccfd0154f8dcb75ddbbb6636373be | /google-cloud-sdk/lib/googlecloudsdk/third_party/apis/securitycenter/v1p1beta1/resources.py | 204b6a0c852d1fd7a975618ac6a38fa929b91cb3 | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | twistedpair/google-cloud-sdk | 37f04872cf1ab9c9ce5ec692d2201a93679827e3 | 1f9b424c40a87b46656fc9f5e2e9c81895c7e614 | refs/heads/master | 2023-08-18T18:42:59.622485 | 2023-08-15T00:00:00 | 2023-08-15T12:14:05 | 116,506,777 | 58 | 24 | null | 2022-02-14T22:01:53 | 2018-01-06T18:40:35 | Python | UTF-8 | Python | false | false | 2,453 | py | # -*- coding: utf-8 -*- #
# Copyright 2015 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Resource definitions for cloud platform apis."""
import enum
BASE_URL = 'https://securitycenter.googleapis.com/v1p1beta1/'
DOCS_URL = 'https://console.cloud.google.com/apis/api/securitycenter.googleapis.com/overview'
class Collections(enum.Enum):
"""Collections for all supported apis."""
ORGANIZATIONS = (
'organizations',
'organizations/{organizationsId}',
{},
['organizationsId'],
True
)
ORGANIZATIONS_ASSETS = (
'organizations.assets',
'organizations/{organizationsId}/assets/{assetsId}',
{},
['organizationsId', 'assetsId'],
True
)
ORGANIZATIONS_NOTIFICATIONCONFIGS = (
'organizations.notificationConfigs',
'{+name}',
{
'':
'organizations/{organizationsId}/notificationConfigs/'
'{notificationConfigsId}',
},
['name'],
True
)
ORGANIZATIONS_OPERATIONS = (
'organizations.operations',
'{+name}',
{
'':
'organizations/{organizationsId}/operations/{operationsId}',
},
['name'],
True
)
ORGANIZATIONS_SOURCES = (
'organizations.sources',
'{+name}',
{
'':
'organizations/{organizationsId}/sources/{sourcesId}',
},
['name'],
True
)
ORGANIZATIONS_SOURCES_FINDINGS = (
'organizations.sources.findings',
'organizations/{organizationsId}/sources/{sourcesId}/findings/'
'{findingId}',
{},
['organizationsId', 'sourcesId', 'findingId'],
True
)
def __init__(self, collection_name, path, flat_paths, params,
enable_uri_parsing):
self.collection_name = collection_name
self.path = path
self.flat_paths = flat_paths
self.params = params
self.enable_uri_parsing = enable_uri_parsing
| [
"gcloud@google.com"
] | gcloud@google.com |
4cd9fac0659f565ca93a4ac5eb56440c5998707d | b77565a023a88480bb3330b18be929a19775f5dc | /정승호/키로거/solution.py | 570bd7078f7eb72449816e49fd2e0b55166a2674 | [] | no_license | Jeoungseungho/python-coding-study | 5af34bff429e24a93f6af4b0473d793ea2b791ee | 431e02d12d0834c71f423471701a2182f66a3776 | refs/heads/master | 2023-08-11T07:38:09.122123 | 2021-10-06T06:32:44 | 2021-10-06T06:32:44 | 283,200,892 | 20 | 12 | null | 2021-10-06T05:22:50 | 2020-07-28T12:07:21 | Python | UTF-8 | Python | false | false | 591 | py | import sys
input = sys.stdin.readline
N = int(input())
for _ in range(N):
pass_word = input().rstrip()
left_stack = []
right_stack = []
for word in pass_word:
if word == '<':
if left_stack:
right_stack.append(left_stack.pop())
elif word == '>':
if right_stack:
left_stack.append(right_stack.pop())
elif word == '-':
if left_stack:
left_stack.pop()
else: left_stack.append(word)
left_stack.extend(reversed(right_stack))
print(''.join(left_stack))
| [
"platoon07@khu.ac.kr"
] | platoon07@khu.ac.kr |
ff5e90060294b08c257176521da92b63632eebf5 | f8f24808ee8317a87669bb27d5e4014afbf1d58f | /BM25/venv/Scripts/rst2html4.py | d4f6f449197853895d9988ab28c5a5fc55386e89 | [] | no_license | c-tel/Information-Retrieval | 4bef96bc8d7bf28f588d820d258dca4a5e6a25d9 | 502befe34f58cf6920283bf568f71dc5bab605d8 | refs/heads/master | 2020-03-18T01:40:58.846629 | 2018-05-20T13:59:44 | 2018-05-20T13:59:44 | 134,154,346 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 762 | py | #!C:\Users\Админ\PycharmProjects\BM25\venv\Scripts\python.exe
# $Id: rst2html4.py 7994 2016-12-10 17:41:45Z milde $
# Author: David Goodger <goodger@python.org>
# Copyright: This module has been placed in the public domain.
"""
A minimal front end to the Docutils Publisher, producing (X)HTML.
The output conforms to XHTML 1.0 transitional
and almost to HTML 4.01 transitional (except for closing empty tags).
"""
try:
import locale
locale.setlocale(locale.LC_ALL, '')
except:
pass
from docutils.core import publish_cmdline, default_description
description = ('Generates (X)HTML documents from standalone reStructuredText '
'sources. ' + default_description)
publish_cmdline(writer_name='html4', description=description)
| [
"rorex555@gmail.com"
] | rorex555@gmail.com |
e4deaeba08a07b68f75717301ee3af72f73931c2 | 3a6f8cd866f8ad9d815152d3422706c2948bfd0e | /MainApp/models.py | c44c49a78d6cdae97e47d0a1b0c999dda6e48e8b | [] | no_license | DmitryCheremisov/My_Django | 8365a76aea0b2b1ccde19870f957d3899a6256c9 | 5c6ce0ffc1dd04b34c73029184e81baf928ada4c | refs/heads/master | 2023-01-19T21:10:55.340066 | 2020-11-26T18:19:37 | 2020-11-26T18:19:37 | 316,294,157 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 371 | py | from django.db import models
# Create your models here.
class Item(models.Model):
name = models.CharField(max_length=100)
brand = models.CharField(max_length=100)
count = models.PositiveIntegerField()
color = models.CharField(max_length=30, default=None, blank=True)
def __str__(self):
return f"Item: {self.name} {self.brand} count: {self.count}" | [
"x-carver@yandex.ru"
] | x-carver@yandex.ru |
7814ae09297328a8ce2e3978ead518dd2fdf7c5a | 3873e47ae77036a17d29ac53504a2609acb836eb | /dippy/core/models/message.py | 6d3e4d6c50bdbe6ae93b2fc7941106f4cf34d0f6 | [
"MIT"
] | permissive | dontbanmeplz/dippy.core | 117f1be6edd2988d9d8429a9ded6182205919afb | 3811f0aa8bc0ae9f9a25c15c4cc054630b3e9710 | refs/heads/main | 2023-04-26T17:31:17.543348 | 2021-05-22T17:17:42 | 2021-05-22T17:17:42 | 369,865,912 | 0 | 0 | MIT | 2021-05-22T17:16:57 | 2021-05-22T17:16:56 | null | UTF-8 | Python | false | false | 2,551 | py | from __future__ import annotations
from datetime import datetime
from dippy.core.enums import (
AllowedMention,
InteractionResponseType,
MessageActivityType,
MessageType,
)
from dippy.core.models.attachment import AttachmentModel
from dippy.core.models.channel import ChannelMentionModel
from dippy.core.models.embed import EmbedModel
from dippy.core.models.member import MemberModel
from dippy.core.models.model import DippyCoreModel, DippyCoreCacheableModel
from dippy.core.models.reaction import ReactionModel
from dippy.core.models.sticker import StickerModel
from dippy.core.models.user import UserModel
from dippy.core.snowflake import Snowflake
from pydantic import Field
from typing import Optional, Union
class AllowedMentions(DippyCoreModel):
parse: list[AllowedMention] = Field(default_factory=list)
roles: list[Snowflake] = Field(default_factory=list, max_items=100)
users: list[Snowflake] = Field(default_factory=list, max_items=100)
replied_user: bool = Field(default=False)
class MessageActivityModel(DippyCoreModel):
type: MessageActivityType
party_id: Optional[str]
class MessageReferenceModel(DippyCoreModel):
message_id: Optional[Snowflake]
channel_id: Optional[Snowflake]
guild_id: Optional[Snowflake]
fail_if_not_exists: Optional[bool] = Field(default=True)
class MessageInteractionModel(DippyCoreModel):
id: Snowflake
type: InteractionResponseType
name: str
user: UserModel
class MessageModel(DippyCoreCacheableModel):
id: Optional[Snowflake]
channel_id: Snowflake
guild_id: Optional[Snowflake]
author: Optional[UserModel]
member: Optional[MemberModel]
content: Optional[str]
timestamp: Optional[datetime]
edited_timestamp: Optional[datetime]
tts: Optional[bool]
mention_everyone: Optional[bool]
mentions: Optional[list[UserModel]]
mention_roles: Optional[list[Snowflake]]
mention_channels: Optional[list[ChannelMentionModel]]
attachments: Optional[list[AttachmentModel]]
embeds: Optional[list[EmbedModel]]
reactions: Optional[list[ReactionModel]]
nonce: Optional[Union[int, str]]
pinned: Optional[bool]
webhook_id: Optional[Snowflake]
type: Optional[MessageType]
activity: Optional[MessageActivityModel]
message_reference: Optional[MessageReferenceModel]
flags: Optional[int]
stickers: Optional[list[StickerModel]]
referenced_message: Optional[MessageModel]
interaction: Optional[MessageInteractionModel]
MessageModel.update_forward_refs()
| [
"hi@zech.codes"
] | hi@zech.codes |
96ae58733b73ddbe8b3fe24003c3a503f66c39de | a4d0868cb236d8fc6bbc6cfc5c19c5ba06828120 | /main.py | 85dd6f8991202e9f97216f2003c533c4d52b9e55 | [] | no_license | GunterFritz/cykit | 99312fbe7d457842b4c1f47a57c04d026f58e1d9 | c331b7c4c5677556e27060d06760d6d842d3ceea | refs/heads/master | 2020-05-31T14:40:03.794810 | 2017-11-30T20:46:18 | 2017-11-30T20:46:18 | 94,035,173 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 193 | py | import sys
from gui.draw import MyView
from PyQt5.QtWidgets import QApplication
if __name__ == '__main__':
app = QApplication(sys.argv)
view = MyView()
view.show()
sys.exit(app.exec_())
| [
"gunter.fritz@web.de"
] | gunter.fritz@web.de |
2ecc7dfe6f8b957a7e99998acd15cba59cc29ea2 | 9dc32feb50562293a59917c56188e2036b21b341 | /sel.py | d99de448d50d0d12268688605d8c663e04d3c1ec | [] | no_license | JJDing-Louis/Chrome-Selenium-Python- | 6180dead8eb6d944f5e1d47719cbbe595e30e93c | b379320c7d06579928d57adc1a75624dd5922d9a | refs/heads/master | 2022-06-21T02:46:36.409149 | 2020-05-09T04:15:11 | 2020-05-09T04:15:16 | 262,457,194 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 309 | py | from selenium import webdriver
browser = webdriver.Chrome()
use=input("帳號:")
psw=input("密碼")
browser.get('https://www.eyny.com/member.php?mod=logging&action=login')
a=browser.find_element_by_name('username')
a.send_keys(use)
a=browser.find_element_by_name('password')
a.send_keys(psw)
a.submit()
| [
"xujpvuxu2332@gmail.com"
] | xujpvuxu2332@gmail.com |
a519ff280a0a25e5d2ad8a78358df252d8eb37df | 81f171fcf6831a9267072a6348b12c517d9166e1 | /Megatron-DeepSpeed/megatron/mpu/tests/test_cross_entropy.py | 8155e3645c67aefb416087080b14598e42ff0fe5 | [
"Apache-2.0",
"MIT"
] | permissive | MikeGu721/EasyLLM | ae8c6e650e6710d8a6556a53485b05d5708e45dc | 4f18766e3baed3694d7e887999881a5c6b72a24a | refs/heads/main | 2023-07-05T04:32:35.739390 | 2023-07-04T07:03:22 | 2023-07-04T07:03:22 | 625,133,639 | 47 | 5 | null | null | null | null | UTF-8 | Python | false | false | 4,074 | py | # coding=utf-8
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from commons import set_random_seed
from commons import IdentityLayer
from commons import print_separator
from commons import initialize_distributed
from mpu.cross_entropy import vocab_parallel_cross_entropy
import mpu
import torch.nn.functional as F
import torch
import random
import sys
from deepspeed.accelerator import get_accelerator
sys.path.append("../..")
def torch_cross_entropy(batch_size, seq_length, vocab_size,
logits_scale, seed):
set_random_seed(seed)
identity = IdentityLayer((batch_size, seq_length, vocab_size),
scale=logits_scale).to(get_accelerator().device_name())
logits = identity()
target = get_accelerator().LongTensor(
size=(batch_size, seq_length)).random_(0, vocab_size)
loss = F.cross_entropy(logits.view(-1, logits.size()[-1]),
target.view(-1),
reduction='none').view_as(target).mean()
loss.backward()
return loss, identity.weight.grad
def mpu_cross_entropy(batch_size, seq_length, vocab_size,
logits_scale, seed):
set_random_seed(seed)
identity = IdentityLayer((batch_size, seq_length, vocab_size),
scale=logits_scale).to(get_accelerator().device_name())
logits = identity()
logits_parallel = mpu.scatter_to_tensor_model_parallel_region(logits)
target = get_accelerator().LongTensor(
size=(batch_size, seq_length)).random_(0, vocab_size)
loss = vocab_parallel_cross_entropy(logits_parallel, target).mean()
loss.backward()
return loss, identity.weight.grad
def test_cross_entropy(tensor_model_parallel_size):
if torch.distributed.get_rank() == 0:
print('> testing cross entropy with model parallel size {} ...'.
format(tensor_model_parallel_size))
mpu.initialize_model_parallel(tensor_model_parallel_size)
tensor_model_parallel_size = mpu.get_tensor_model_parallel_world_size()
batch_size = 13
seq_length = 17
vocab_size_per_partition = 11
logits_scale = 1000.0
vocab_size = vocab_size_per_partition * tensor_model_parallel_size
seed = 1234
loss_torch, grad_torch = torch_cross_entropy(batch_size, seq_length,
vocab_size, logits_scale,
seed)
loss_mpu, grad_mpu = mpu_cross_entropy(batch_size, seq_length,
vocab_size, logits_scale,
seed)
error = loss_torch.sub_(loss_mpu).abs().max()
print(' max error in loss on global rank {}: {}'.format(
torch.distributed.get_rank(), error))
assert error < 1.0e-6
error = grad_torch.sub_(grad_mpu).abs().max()
print(' max error in grad on global rank {}: {}'.format(
torch.distributed.get_rank(), error))
assert error < 1.0e-6
# Reset groups
mpu.destroy_tensor_model_parallel()
torch.distributed.barrier()
if torch.distributed.get_rank() == 0:
print('>> passed the test :-)')
if __name__ == '__main__':
initialize_distributed()
world_size = torch.distributed.get_world_size()
tensor_model_parallel_size = 1
while tensor_model_parallel_size <= world_size:
print_separator('test cross entropy')
test_cross_entropy(tensor_model_parallel_size)
tensor_model_parallel_size *= 2
| [
"437914030@qq.com"
] | 437914030@qq.com |
ddc0b9c1fe3b67c0a5d6fd069262be7facd56757 | 532c5b6bd09b85e337cf568f6535299a773e15a5 | /Livid_DS1_v2/DS1.py | 08a31086ecd0094b54e892baf9419f88a5949836 | [
"MIT"
] | permissive | thomasf/LiveRemoteScripts | ac796744c9694887f7a3c5ac3570630870c09054 | 23bb89fc62fce97fc7e46f6a94623ada1f255294 | refs/heads/master | 2021-01-18T09:27:51.838549 | 2016-03-14T05:12:08 | 2016-03-14T05:12:08 | 24,001,076 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 15,395 | py | # by amounra 0216 : http://www.aumhaa.com
# written against Live 9.6 release on 021516
from __future__ import absolute_import, print_function
import Live
import math
import sys
from re import *
from itertools import imap, chain, starmap
from ableton.v2.base import inject, listens, listens_group
from ableton.v2.control_surface import ControlSurface, ControlElement, Layer, Skin, PrioritizedResource, Component, ClipCreator, DeviceBankRegistry
from ableton.v2.control_surface.elements import ComboElement, ButtonMatrixElement, DoublePressElement, MultiElement, DisplayDataSource, SysexElement
from ableton.v2.control_surface.components import M4LInterfaceComponent, ClipSlotComponent, SceneComponent, SessionComponent, TransportComponent, BackgroundComponent, ViewControlComponent, SessionRingComponent, SessionRecordingComponent, SessionNavigationComponent, MixerComponent, PlayableComponent
from ableton.v2.control_surface.components.mixer import simple_track_assigner
from ableton.v2.control_surface.control import control_color
from ableton.v2.control_surface.mode import AddLayerMode, ModesComponent, DelayMode
from ableton.v2.control_surface.elements.physical_display import PhysicalDisplayElement
from ableton.v2.control_surface.components.session_recording import *
from ableton.v2.control_surface.control import PlayableControl, ButtonControl, control_matrix
from aumhaa.v2.base import initialize_debug
from aumhaa.v2.control_surface import SendLividSysexMode, MomentaryBehaviour, ExcludingMomentaryBehaviour, DelayedExcludingMomentaryBehaviour, ShiftedBehaviour, LatchingShiftedBehaviour, FlashingBehaviour
from aumhaa.v2.control_surface.mod_devices import *
from aumhaa.v2.control_surface.mod import *
from aumhaa.v2.control_surface.elements import MonoEncoderElement, MonoBridgeElement, generate_strip_string
from aumhaa.v2.control_surface.elements.mono_button import *
from aumhaa.v2.control_surface.components import MonoDeviceComponent, DeviceNavigator, TranslationComponent, MonoMixerComponent
from aumhaa.v2.control_surface.components.device import DeviceComponent
from aumhaa.v2.control_surface.components.mono_instrument import *
from aumhaa.v2.livid import LividControlSurface, LividSettings, LividRGB
from aumhaa.v2.control_surface.components.fixed_length_recorder import FixedLengthSessionRecordingComponent
from aumhaa.v2.control_surface.components.device import DeviceComponent
from .Map import *
ENCODER_SPEED = [0, 0, 1, 0, 2, 0, 3, 0, 4, 0, 5, 0, 6, 0, 7, 0, 8, 0, 9, 0, 10, 0, 11, 0, 12, 0, 13, 0, 14, 0, 15, 0, 16, 0, 17, 0, 18, 0, 19, 0, 20, 0, 21, 0, 22, 0, 23, 0, 24, 0, 127, 1, 26, 0, 127, 1, 127, 1]
MIDI_NOTE_TYPE = 0
MIDI_CC_TYPE = 1
MIDI_PB_TYPE = 2
MIDI_MSG_TYPES = (MIDI_NOTE_TYPE, MIDI_CC_TYPE, MIDI_PB_TYPE)
MIDI_NOTE_ON_STATUS = 144
MIDI_NOTE_OFF_STATUS = 128
MIDI_CC_STATUS = 176
MIDI_PB_STATUS = 224
def is_device(device):
return (not device is None and isinstance(device, Live.Device.Device) and hasattr(device, 'name'))
def make_pad_translations(chan):
return tuple((x%4, int(x/4), x+16, chan) for x in range(16))
def return_empty():
return []
debug = initialize_debug()
class DS1SessionComponent(SessionComponent):
def set_scene_launch_buttons(self, buttons):
assert(not buttons or buttons.width() == self._session_ring.num_scenes and buttons.height() == 1)
if buttons:
for button, (x, _) in buttons.iterbuttons():
scene = self.scene(x)
debug('setting scene launch for button:', button, 'scene:', scene)
scene.set_launch_button(button)
else:
for x in xrange(self._session_ring.num_scenes):
scene = self.scene(x)
scene.set_launch_button(None)
class DS1SessionNavigationComponent(SessionNavigationComponent):
def set_track_select_dial(self, dial):
self._on_track_select_dial_value.subject = dial
@listens('value')
def _on_track_select_dial_value(self, value):
self._can_bank_left() and self._bank_left() if value == 127 else self._can_bank_right() and self._bank_right()
class DS1TransportComponent(TransportComponent):
def _update_stop_button_color(self):
self._stop_button.color = 'Transport.StopOn' if self._play_toggle.is_toggled else 'Transport.StopOff'
class DS1(LividControlSurface):
_sysex_id = 16
_model_name = 'DS1'
def __init__(self, c_instance):
super(DS1, self).__init__(c_instance)
self._skin = Skin(DS1Colors)
with self.component_guard():
self._define_sysex()
self._setup_controls()
self._setup_background()
self._setup_m4l_interface()
self._setup_session_control()
self._setup_mixer_control()
self._setup_transport_control()
self._setup_device_control()
self._setup_session_recording_component()
self._setup_main_modes()
def _initialize_script(self):
super(DS1, self)._initialize_script()
self._main_modes.set_enabled(True)
self._main_modes.selected_mode = 'Main'
def _initialize_hardware(self):
super(DS1, self)._initialize_hardware()
self.local_control_off.enter_mode()
self.encoder_absolute_mode.enter_mode()
self.encoder_speed_sysex.enter_mode()
def _define_sysex(self):
self.encoder_speed_sysex = SendLividSysexMode(livid_settings = self._livid_settings, call = 'set_encoder_mapping', message = ENCODER_SPEED)
self.encoder_absolute_mode = SendLividSysexMode(livid_settings = self._livid_settings, call = 'set_encoder_encosion_mode', message = [2])
self.local_control_off = SendLividSysexMode(livid_settings = self._livid_settings, call = 'set_local_control', message = [0])
def _setup_controls(self):
is_momentary = True
optimized = True
resource = PrioritizedResource
self._fader = [MonoEncoderElement(mapping_feedback_delay = -1, msg_type = MIDI_CC_TYPE, channel = CHANNEL, identifier = DS1_FADERS[index], name = 'Fader_' + str(index), num = index, script = self, optimized_send_midi = optimized, resource_type = resource) for index in range(8)]
self._dial = [[MonoEncoderElement(mapping_feedback_delay = -1, msg_type = MIDI_CC_TYPE, channel = CHANNEL, identifier = DS1_DIALS[x][y], name = 'Dial_' + str(x) + '_' + str(y), num = x + (y*5), script = self, optimized_send_midi = optimized, resource_type = resource) for x in range(8)] for y in range(5)]
self._side_dial = [MonoEncoderElement(mapping_feedback_delay = -1, msg_type = MIDI_CC_TYPE, channel = CHANNEL, identifier = DS1_SIDE_DIALS[x], name = 'Side_Dial_' + str(x), num = x, script = self, optimized_send_midi = optimized, resource_type = resource) for x in range(4)]
self._encoder = [MonoEncoderElement(mapping_feedback_delay = -1, msg_type = MIDI_CC_TYPE, channel = CHANNEL, identifier = DS1_ENCODERS[x], name = 'Encoder_' + str(x), num = x, script = self, optimized_send_midi = optimized, resource_type = resource) for x in range(4)]
self._encoder_button = [MonoButtonElement(is_momentary = is_momentary, msg_type = MIDI_NOTE_TYPE, channel = CHANNEL, identifier = DS1_ENCODER_BUTTONS[index], name = 'EncoderButton_' + str(index), script = self, skin = self._skin, optimized_send_midi = optimized, resource_type = resource) for index in range(4)]
self._master_fader = MonoEncoderElement(msg_type = MIDI_CC_TYPE, channel = CHANNEL, identifier = DS1_MASTER, name = 'MasterFader', num = 0, script = self, optimized_send_midi = optimized, resource_type = resource)
self._button = [MonoButtonElement(is_momentary = is_momentary, msg_type = MIDI_NOTE_TYPE, channel = CHANNEL, identifier = DS1_BUTTONS[index], name = 'Button_' + str(index), script = self, skin = self._skin, optimized_send_midi = optimized, resource_type = resource) for index in range(16)]
self._grid = [[MonoButtonElement(is_momentary = is_momentary, msg_type = MIDI_NOTE_TYPE, channel = CHANNEL, identifier = DS1_GRID[x][y], name = 'Button_' + str(x) + '_' + str(y), script = self, skin = self._skin, optimized_send_midi = optimized, resource_type = resource) for x in range(3)] for y in range(3)]
self._dummy = [MonoEncoderElement(msg_type = MIDI_CC_TYPE, channel = CHANNEL, identifier = 120+x, name = 'Dummy_Dial_' + str(x), num = x, script = self, optimized_send_midi = optimized, resource_type = resource) for x in range(5)]
self._fader_matrix = ButtonMatrixElement(name = 'FaderMatrix', rows = [self._fader])
self._top_buttons = ButtonMatrixElement(name = 'TopButtonMatrix', rows = [self._button[:8]])
self._bottom_buttons = ButtonMatrixElement(name = 'BottomButtonMatrix', rows = [self._button[8:]])
self._dial_matrix = ButtonMatrixElement(name = 'DialMatrix', rows = self._dial)
self._side_dial_matrix = ButtonMatrixElement(name = 'SideDialMatrix', rows = [self._side_dial])
self._encoder_matrix = ButtonMatrixElement(name = 'EncoderMatrix', rows = [self._encoder])
self._encoder_button_matrix = ButtonMatrixElement(name = 'EncoderButtonMatrix', rows = [self._encoder_button])
self._grid_matrix = ButtonMatrixElement(name = 'GridMatrix', rows = self._grid)
self._selected_parameter_controls = ButtonMatrixElement(name = 'SelectedParameterControls', rows = [self._dummy + self._encoder[:1] + self._encoder[2:]])
def _setup_background(self):
self._background = BackgroundComponent(name = 'Background')
self._background.layer = Layer(priority = 0, fader_matrix = self._fader_matrix,
top_buttons = self._top_buttons,
bottom_buttons = self._bottom_buttons,
dial_matrix = self._dial_matrix,
side_dial_matrix = self._side_dial_matrix,
encoder_button_matrix = self._encoder_button_matrix,
grid_matrix = self._grid_matrix)
self._background.set_enabled(True)
def _setup_autoarm(self):
self._auto_arm = AutoArmComponent(name='Auto_Arm')
self._auto_arm.can_auto_arm_track = self._can_auto_arm_track
def _tracks_to_use(self):
return self.song.visible_tracks + self.song.return_tracks
def _setup_session_control(self):
self._session_ring = SessionRingComponent(num_tracks = 8, num_scenes = 1, tracks_to_use = self._tracks_to_use)
self._session_ring.set_enabled(True)
self._session_navigation = DS1SessionNavigationComponent(name = 'SessionNavigation', session_ring = self._session_ring)
self._session_navigation._vertical_banking.scroll_up_button.color = 'Session.NavigationButtonOn'
self._session_navigation._vertical_banking.scroll_down_button.color = 'Session.NavigationButtonOn'
self._session_navigation._horizontal_banking.scroll_up_button.color = 'Session.NavigationButtonOn'
self._session_navigation._horizontal_banking.scroll_down_button.color = 'Session.NavigationButtonOn'
self._session_navigation.layer = Layer(priority = 4, track_select_dial = ComboElement(control = self._encoder[1], modifier = [self._encoder_button[1]] ), up_button = self._grid[0][1], down_button = self._grid[0][2])
self._session_navigation.set_enabled(False)
self._session = DS1SessionComponent(session_ring = self._session_ring, auto_name = True)
hasattr(self._session, '_enable_skinning') and self._session._enable_skinning()
self._session.layer = Layer(priority = 4, scene_launch_buttons = self._grid_matrix.submatrix[1:2, 1:2])
self._session.clips_layer = AddLayerMode(self._session, Layer(priority = 4, clip_launch_buttons = self._top_buttons, stop_track_clip_buttons = self._bottom_buttons))
self._session.set_enabled(False)
def _setup_mixer_control(self):
self._mixer = MonoMixerComponent(name = 'Mixer', num_returns = 2, tracks_provider = self._session_ring, track_assigner = simple_track_assigner, invert_mute_feedback = True, auto_name = True, enable_skinning = True)
self._mixer.master_strip().set_volume_control(self._master_fader)
self._mixer.set_prehear_volume_control(self._side_dial[3])
self._mixer.layer = Layer(volume_controls = self._fader_matrix, track_select_dial = self._encoder[1])
self._strip = [self._mixer.channel_strip(index) for index in range(8)]
for index in range(8):
self._strip[index].layer = Layer(priority = 4, parameter_controls = self._dial_matrix.submatrix[index:index+1, :])
self._mixer.selected_strip().layer = Layer(priority = 4, parameter_controls = self._selected_parameter_controls)
self._mixer.master_strip().layer = Layer(priority = 4, parameter_controls = self._side_dial_matrix.submatrix[:3, :])
self._mixer.main_layer = AddLayerMode(self._mixer, Layer(priority = 4, solo_buttons = self._bottom_buttons, mute_buttons = self._top_buttons))
self._mixer.select_layer = AddLayerMode(self._mixer, Layer(priority = 4, arm_buttons = self._bottom_buttons, track_select_buttons = self._top_buttons))
self.song.view.selected_track = self._mixer.channel_strip(0)._track
self._mixer.set_enabled(False)
def _setup_transport_control(self):
self._transport = DS1TransportComponent()
self._transport.name = 'Transport'
self._transport._record_toggle.view_transform = lambda value: 'Transport.RecordOn' if value else 'Transport.RecordOff'
self._transport.layer = Layer(priority = 4, stop_button = self._grid[1][0], play_button = self._grid[0][0], record_button = self._grid[2][0])
self._transport.set_enabled(True)
def _setup_device_control(self):
self._device = DeviceComponent(name = 'Device_Component', device_provider = self._device_provider, device_bank_registry = DeviceBankRegistry())
self._device_navigator = DeviceNavigator(self._device_provider, self._mixer, self)
self._device_navigator.name = 'Device_Navigator'
def _setup_session_recording_component(self):
self._clip_creator = ClipCreator()
self._clip_creator.name = 'ClipCreator'
self._recorder = SessionRecordingComponent(self._clip_creator, ViewControlComponent())
self._recorder.set_enabled(True)
self._recorder.layer = Layer(priority = 4, automation_button = self._grid[1][2], record_button = self._grid[2][1],)
def _setup_m4l_interface(self):
self._m4l_interface = M4LInterfaceComponent(controls=self.controls, component_guard=self.component_guard, priority = 10)
self._m4l_interface.name = "M4LInterface"
self.get_control_names = self._m4l_interface.get_control_names
self.get_control = self._m4l_interface.get_control
self.grab_control = self._m4l_interface.grab_control
self.release_control = self._m4l_interface.release_control
def _setup_translations(self):
controls = []
for control in self.controls:
controls.append(control)
self._translations = TranslationComponent(controls, 10)
self._translations.name = 'TranslationComponent'
self._translations.set_enabled(False)
def _setup_main_modes(self):
self._main_modes = ModesComponent(name = 'MainModes')
self._main_modes.add_mode('Main', [self._mixer, self._mixer.main_layer, self._session, self._session_navigation], cycle_mode_button_color = 'ModeButtons.Main')
self._main_modes.add_mode('Select', [self._mixer, self._mixer.select_layer, self._session, self._session_navigation], cycle_mode_button_color = 'ModeButtons.Select')
self._main_modes.add_mode('Clips', [self._mixer, self._session, self._session.clips_layer, self._session_navigation], cycle_mode_button_color = 'ModeButtons.Clips')
self._main_modes.layer = Layer(priority = 4, cycle_mode_button = self._grid[2][2])
self._main_modes.selected_mode = 'Main'
self._main_modes.set_enabled(False)
def _can_auto_arm_track(self, track):
routing = track.current_input_routing
return routing == 'Ext: All Ins' or routing == 'All Ins' or routing.startswith('DS1 Input')
#self._main_modes.selected_mode in ['Sends', 'Device'] and
# a | [
"aumhaa@gmail.com"
] | aumhaa@gmail.com |
5e835b0217af9cacfda1300a020affd8d3af6e9d | eebced51fe5452c4bdd3332c4c8c85caa73f753c | /temp.py | 9876ec2057147f22db73156f5d516c51ed319e1a | [] | no_license | sobanjawaid26/GFG-algo-ds-practice-with-test-cases | bd0dbc8f6b117ace135799d259d2821e1330d8df | fe5c0355050291fc186df5367d1865c830b57ef8 | refs/heads/master | 2023-07-12T17:18:11.568612 | 2021-09-01T15:09:01 | 2021-09-01T15:09:01 | 401,565,010 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,882 | py | # Python program for flattening a Linked List
class Node():
def __init__(self, data):
self.data = data
self.right = None
self.down = None
class LinkedList():
def __init__(self):
# head of list
self.head = None
# Utility function to insert a node at beginning of the
# linked list
def push(self, head_ref, data):
# 1 & 2: Allocate the Node &
# Put in the data
new_node = Node(data)
# Make next of new Node as head
new_node.down = head_ref
# 4. Move the head to point to new Node
head_ref = new_node
# 5. return to link it back
return head_ref
def printList(self):
temp = self.head
while (temp != None):
print(temp.data, end=" ")
temp = temp.down
print()
# An utility function to merge two sorted linked lists
def merge(self, a, b):
# if first linked list is empty then second
# is the answer
if (a == None):
return b
# if second linked list is empty then first
# is the result
if (b == None):
return a
# compare the data members of the two linked lists
# and put the larger one in the result
result = None
if (a.data < b.data):
result = a
result.down = self.merge(a.down, b)
else:
result = b
result.down = self.merge(a, b.down)
result.right = None
return result
def flatten(self, root):
# Base Case
if (root == None or root.right == None):
return root
# recur for list on right
root.right = self.flatten(root.right)
# now merge
root = self.merge(root, root.right)
# return the root
# it will be in turn merged with its left
return root
# Driver program to test above functions
L = LinkedList()
'''
Let us create the following linked list
5 -> 10 -> 19 -> 28
| | | |
V V V V
7 20 22 35
| | |
V V V
8 50 40
| |
V V
30 45
'''
L.head = L.push(L.head, 30);
L.head = L.push(L.head, 8);
L.head = L.push(L.head, 7);
L.head = L.push(L.head, 5);
L.head.right = L.push(L.head.right, 20);
L.head.right = L.push(L.head.right, 10);
L.head.right.right = L.push(L.head.right.right, 50);
L.head.right.right = L.push(L.head.right.right, 22);
L.head.right.right = L.push(L.head.right.right, 19);
L.head.right.right.right = L.push(L.head.right.right.right, 45);
L.head.right.right.right = L.push(L.head.right.right.right, 40);
L.head.right.right.right = L.push(L.head.right.right.right, 35);
L.head.right.right.right = L.push(L.head.right.right.right, 20);
# flatten the list
L.head = L.flatten(L.head);
L.printList()
# This code is contributed by maheshwaripiyush9
| [
"sobanjawaid26@gmail.com"
] | sobanjawaid26@gmail.com |
2d0ec65add986b7f658df04cd49b363a813c07f9 | e330d9e94780fb83af4d7f0a3bbc7258e921bfdf | /my_app/migrations/0001_initial.py | acecd4a71f7df1d8ce39cb065c9f2a8d48255608 | [] | no_license | Kacper-Hernacki/buy-my-stuff | 9d96c467b45bdafed50db1a046405938d99e2a10 | 9ce287bd9752bdf0c5266fbfe81f2762c2b103d9 | refs/heads/master | 2023-04-24T03:58:25.814950 | 2020-06-04T12:43:26 | 2020-06-04T12:43:26 | 269,356,720 | 0 | 0 | null | 2021-04-20T20:02:50 | 2020-06-04T12:42:47 | Python | UTF-8 | Python | false | false | 554 | py | # Generated by Django 3.0.5 on 2020-05-05 16:57
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Search',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('search', models.CharField(max_length=500)),
('created', models.DateTimeField(auto_now=True)),
],
),
]
| [
"hernackikacper@gmail.com"
] | hernackikacper@gmail.com |
65b1a3b59c48e5efeb1e7f5cdd75370358d4b584 | aebc347ff9a8ad739111f13aa8d4cf9d48a1e4bd | /data/170818/170818_125942_normal_trap_sweep/0027_normal_trap_sweep_E5071C.py | 348f43e7772521a0bba15513497252a31eeb63b7 | [] | no_license | geyang/170422_EonHe_M018V6 | f01a60d3b8a911ba815a0fcc0bf1b6e2aa8f5f17 | ce189e22f99942e46fce84a0dca714888e44bc69 | refs/heads/master | 2021-06-22T21:01:23.257239 | 2017-08-25T09:48:17 | 2017-08-25T09:48:17 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 16,742 | py | from data_cache import dataCacheProxy
from time import sleep, time, strftime
from setup_instruments import fridge, seekat, yoko1, nwa, filament
from resonance_fitting import fit_res_gerwin
import numpy as np
import matplotlib.pyplot as plt
from tqdm import tqdm
import os
from shutil import copyfile
import quicktimetrace_4D as fastsweep
this_script = r"0027_normal_trap_sweep_E5071C.py"
expt = 'normal_trap_sweep'
Vtg_bias = 0.00
do_reload_waveforms = True
do_check_f0 = True
t0 = time()
# The start and stop points of the fast sweep
mu_guess = -0.265
def f_mu(mu):
return -1.755 - 7*mu
x = 0.150
Vtrap_parks = 0.180
Vtrap_stops = 0.170
Vtrap_backs = 0.360
Vrg_starts = -1/1.15 * (Vtrap_parks - (f_mu(mu_guess)-0.120)) - x
Vrg_stops = -1/1.15 * (Vtrap_stops -(f_mu(mu_guess)-0.120)) - x
Vrg_backs = -1/1.15 * (Vtrap_backs -(f_mu(mu_guess)-0.120)) - x
N1 = 50
N2 = 25
N3 = 275
N4 = 50
# if np.any(Vtrap_stops == Vtrap_parks):
# raise ValueError("Stop value for Vtrap cannot be equal to Start value for Vtrap!")
if do_reload_waveforms:
# Load waveform into memory of the BNCAWG
for bnc, ch_voltage_params in zip([fastsweep.bnc1, fastsweep.bnc2],
[(0.00, Vrg_starts, Vrg_stops, Vrg_backs), (Vtrap_parks, Vtrap_parks, Vtrap_stops, Vtrap_backs)]):
bnc.set_output(False)
fastsweep.setup_waveforms(bnc, ch_voltage_params, (N1, N2, N3, N4), sweep_time=100E-3)
bnc.set_output(True)
# This is for the get_voltages function:
bnc1_sweeps_up = Vrg_starts > 0
bnc2_sweeps_up = Vtrap_stops < Vtrap_parks
print bnc1_sweeps_up, bnc2_sweeps_up
if __name__ == "__main__":
today = strftime("%y%m%d")
now = strftime("%H%M%S")
expt_path = os.path.join(r'C:\Users\slab\Desktop\Gerwin\data', today, "%s_%s_%s" % (today, now, expt))
print "Saving data in %s" % expt_path
if not os.path.isdir(expt_path):
os.makedirs(expt_path)
sleep(1)
try:
nwa.read_data()
except:
pass
copyfile(os.path.join(r"C:\Users\slab\Desktop\Gerwin\experiment", this_script),
os.path.join(expt_path, this_script))
dataCache = dataCacheProxy(file_path=os.path.join(expt_path, os.path.split(expt_path)[1] + ".h5"))
prefix = "electron_loading"
fridgeParams = {'wait_for_temp': 0.080,
'min_temp_wait_time': 60}
filamentParams = {"amplitude": 4.2,
"offset": -0.5,
"frequency": 113e3,
"duration": 40e-3}
pulseParams = {"delay": .00,
"pulses": 200}
# for yoko in [yoko1]:
# yoko.set_mode('VOLT')
# yoko.set_voltage_limit(10)
# yoko.set_output(True)
def set_voltages(res, trap, res_guard, trap_guard, pinch=None, verbose=True):
if res is not None:
seekat.set_voltage(1, res, verbose=verbose)
if trap is not None:
if bnc2_sweeps_up:
fastsweep.change_sweep_bounds(fastsweep.bnc2, trap+0.10, trap)
else:
fastsweep.change_sweep_bounds(fastsweep.bnc2, trap, trap-0.10)
if res_guard is not None:
if bnc1_sweeps_up:
fastsweep.change_sweep_bounds(fastsweep.bnc1, res_guard+0.10, res_guard)
else:
fastsweep.change_sweep_bounds(fastsweep.bnc1, res_guard, res_guard-0.10)
if trap_guard is not None:
seekat.set_voltage(4, trap_guard, verbose=verbose)
if pinch is not None:
seekat.set_voltage(5, pinch, verbose=verbose)
dataCache.post("voltage_log", np.array([time(),
seekat.get_voltage(1),
fastsweep.get_idle_value(fastsweep.bnc2, sweep_up=bnc2_sweeps_up),
fastsweep.get_idle_value(fastsweep.bnc1, sweep_up=bnc1_sweeps_up),
seekat.get_voltage(4),
seekat.get_voltage(5)]))
def get_voltages(active_electrodes=[np.nan]*5):
ret = active_electrodes
for k in np.where(np.isnan(active_electrodes))[0]:
if k == 1:
# Trap electrode
ret[1] = fastsweep.get_idle_value(fastsweep.bnc2, sweep_up=bnc2_sweeps_up)
elif k == 2:
# Resonator guard electrode
ret[2] = fastsweep.get_idle_value(fastsweep.bnc1, sweep_up=bnc1_sweeps_up)
else:
ret[k] = seekat.get_voltage(k+1)
return ret
filament.setup_driver(**filamentParams)
filament.set_timeout(10000)
print filament.get_id()
def unload():
print "********************"
print "UNLOADING ELECTRONS!"
print "********************"
for k in range(5):
print "\tStep %d"%(k+1)
for volts in [-1, -2, -3, -4, -3, -2, -1]:
set_voltages(volts, volts, volts, volts, verbose=False)
sleep(0.5)
def unload_trap(start=-3.0, stop=-5.0):
print "********************"
print "UNLOADING TRAP ONLY!"
print "********************"
res_init, trap_init, res_guard_init, trap_guard_init, pinch = get_voltages()
vs = list(np.arange(start, stop, -1)) +\
list(np.arange(stop, start, +1))
for k in range(5):
print "\tStep %d"%(k+1)
for volts in vs:
set_voltages(res_init, volts, res_guard_init, trap_guard_init, verbose=False)
sleep(0.5)
set_voltages(res_init, trap_init, res_guard_init, trap_guard_init)
def take_trace_and_save(averages, active_electrodes=[np.nan]*5):
temperature = fridge.get_mc_temperature()
dataCache.post('temperature', temperature)
Vres, Vtrap, Vrg, Vtg, Vpinch = get_voltages(active_electrodes)
dataCache.post('Vres', Vres)
dataCache.post('Vtrap', Vtrap)
dataCache.post('Vrg', Vrg)
dataCache.post('Vtg', Vtg)
dataCache.post('Vpinch', Vpinch)
if averages > 1:
fpts, mags, phases = nwa.take_one_averaged_trace()
else:
fpts, mags, phases = nwa.take_one()
dataCache.post('fpts', fpts)
dataCache.post('mags', mags)
dataCache.post('phases', phases)
dataCache.post('time', time() - t0)
return temperature, fpts, mags
def unload_with_filament():
# First loading to get rid of most electrons!
if load_electrons:
set_voltages(-3.0, -3.0, 0.0, 0.0)
sleep(2.0)
temperature = fridge.get_mc_temperature()
print "Waiting for consistent electron loading temperature of < 550 mK...."
while temperature > 0.550:
temperature = fridge.get_mc_temperature()
sleep(2)
print '.',
filament.fire_filament(100, 0.01)
print "Fired filament!"
sleep(10.0)
def load_resonator_not_trap():
print "\n"
print "********************"
print "LOADING ELECTRONS..."
print "********************"
set_voltages(2.0, -3.0, 0.0, 0.0)
sleep(2.0)
temperature = fridge.get_mc_temperature()
print "Waiting for consistent electron loading temperature of < 550 mK...."
while temperature > 0.550:
temperature = fridge.get_mc_temperature()
sleep(2)
print '.',
filament.fire_filament(57, 0.01)
print "Fired filament!"
sleep(15.0)
def conditional_load(target_deltaf=7.0E6, target_Q=9000):
"""
Fires the filament until a minimum resonance frequency difference has been satisfied
and a Q > 9000 has been satisfied.
:param target_deltaf: Positive frequency difference in Hz
:return:
"""
abs_deltaf = 1e9
Q = 0
# Set both the Q and deltaf threshold to something low if you want it to continue after the first load
while not (Q > target_Q and abs_deltaf > target_deltaf):
unload_with_filament()
load_resonator_not_trap()
set_voltages(0.6, -2.0, None, None)
sleep(2.0)
if calibration_averages > 1:
fpts, mags, phases = nwa.take_one_averaged_trace()
else:
fpts, mags, phases = nwa.take_one()
f0, Q = fit_res_gerwin(fpts, mags, span=3E6)
if np.abs(f0-6.40511e9) - target_deltaf > 1E6:
abs_deltaf = 0
print "Fit result after loading: delta f = %.2f MHz (too high) and Q = %.0f" % (np.abs(f0-6.40511e9)/1E6, Q)
else:
abs_deltaf = np.abs(f0-6.40511e9)
print "Fit result after loading: delta f = %.2f MHz and Q = %.0f" % (abs_deltaf/1E6, Q)
not_settled = True
stable_temp = 0.550
# print "Waiting for temperature to stabilize to %.0f mK..." % (stable_temp * 1E3)
while not_settled:
temperature = fridge.get_mc_temperature()
if temperature <= stable_temp:
not_settled = False
return f0, Q
nwa.set_measure('S21')
calibration_power = -40
calibration_averages = 25
calibration_sweep_points = 401
calibration_ifbw = 10E3
nwa.set_trigger_source('BUS')
nwa.set_format('SLOG')
nwa_calibration_config = {'start' : 6.385E9,
'stop': 6.407E9,
'sweep_points': calibration_sweep_points,
'power': calibration_power,
'averages': calibration_averages,
'ifbw': calibration_ifbw}
nwa.configure(**nwa_calibration_config)
nwa.set_trigger_continuous(True)
fastsweep.setup_calibration_trace(calibration_averages, calibration_sweep_points)
nwa.set_electrical_delay(68E-9)
nwa.set_phase_offset(180.0)
dataCache.set_dict('nwa_calibration_config', nwa_calibration_config)
#dataCache.set_dict('nwa_sweep_config', nwa_sweep_config)
nwa.auto_scale()
# Define the sweep here
v1 = np.arange(0.600, 0.800, 0.050).tolist() + [0.800]
v2 = np.arange(-2.0, 0.0, 0.25).tolist() + np.arange(0.000, 1.0, 0.005).tolist()
Vress = v1 + list(0.80 * np.ones(len(v2)))
Vtraps = np.array(list(-2.0 * np.ones(len(v1))) + v2)
Vresguards = np.zeros(len(v1) + len(v2))
fig = plt.figure(figsize=(8., 12.))
plt.subplot(311)
plt.plot(Vress, 'o', ms=3, color="#23aaff", markeredgecolor="none", label="Resonator")
plt.plot(Vtraps, 'o', ms=3, color="#f4b642", markeredgecolor="none", label='Trap')
plt.plot(Vresguards, 'o', ms=3, color="lawngreen", markeredgecolor="none", label='Res guard')
plt.ylabel("Voltage")
plt.xlim(0, len(Vress))
plt.legend(loc=0, prop={'size' : 8})
if calibration_averages > 1:
fpts, mags, phases = nwa.take_one_averaged_trace()
else:
fpts, mags, phases = nwa.take_one()
plt.subplot(312)
current_vres, current_vtrap, current_vrg, current_vtg, pinch = get_voltages()
plt.text(np.min(fpts) + 0.10*(np.max(fpts)-np.min(fpts)),
np.min(mags) + 0.85*(np.max(mags) - np.min(mags)),
"res, trap, rg, tg = (%.2fV, %.2fV, %.2fV, %.2fV)" % (current_vres, current_vtrap, current_vrg, current_vtg))
plt.plot(fpts, mags)
plt.xlabel('Frequency (Hz)')
plt.ylabel('Magnitude (dB)')
plt.xlim(np.min(fpts), np.max(fpts))
plt.subplot(313)
plt.plot(fpts, phases)
plt.xlabel('Frequency (Hz)')
plt.ylabel('Phase (deg)')
plt.xlim(np.min(fpts), np.max(fpts))
fig.savefig(os.path.join(expt_path, "pre_electron_loading.png"), dpi=200)
# plt.show()
nwa.set_format('MLOG')
nwa.auto_scale()
nwa.set_trigger_source('INT')
nwa.set_trigger_source('BUS')
nwa.set_format('SLOG')
nwa.set_average_state(True)
f0, Q = fit_res_gerwin(fpts, mags, span=2E6)
target_deltaf = 7.00E6
change_readout_freq = True
target_Q = 9200
print "delta f = %.2f MHz and Q = %.0f" % (np.abs(f0 - 6.40511E9) / 1E6, Q)
if do_check_f0 and (not((target_deltaf-0.15E6) < np.abs(f0-6.40511E9) < (target_deltaf+0.05E6)) or Q < target_Q):
unload()
load_electrons = True
change_readout_freq = True
if load_electrons:
# Unload and then load once
f0, Q = conditional_load(target_deltaf=target_deltaf, target_Q=target_Q)
Q_pre_meas = 0
while Q_pre_meas < target_Q:
# Try to adjust the electron density on the resonator:
tries = 0
dataCache.post("f0_pre_meas", f0)
dataCache.post("Q_pre_meas", Q)
abs_deltaf = np.abs(f0 - 6.40511e9)
while (abs_deltaf > target_deltaf) and (tries < 15):
tries += 1
if (abs_deltaf - target_deltaf) < 0.30E6 and tries < 5:
unload_voltage = -0.15
#The first unload shows a really strong decrease.
else:
unload_voltage = -0.25
for i, poo in enumerate([unload_voltage, 0.6]):
set_voltages(poo, None, None, None)
sleep(2.0)
if poo == 0.6:
if calibration_averages > 1:
fpts, mags, phases = nwa.take_one_averaged_trace()
else:
fpts, mags, phases = nwa.take_one()
f0, Q = fit_res_gerwin(fpts, mags, span=3E6)
dataCache.post("f0_pre_meas", f0)
dataCache.post("Q_pre_meas", Q)
abs_deltaf = np.abs(f0 - 6.40511e9)
print "\t%d. delta f = %.2f MHz and Q = %.0f" % (i, abs_deltaf / 1E6, Q)
# Force another reload if f0 doesn't fall in between the following values
Q_pre_meas = Q if (target_deltaf-0.20E6) < np.abs(f0-6.40511E9) < (target_deltaf+0.00E6) else target_Q-1000
# If after adjusting the density the Q falls below 9000, start over
if Q < target_Q:
print "Retrying load, Q < %.0f after adjusting electron density..." % (target_Q)
f0, Q = conditional_load(target_deltaf=target_deltaf, target_Q=target_Q)
# sleep(300)
fridge_temp = fridge.get_mc_temperature()
while fridge_temp > 0.550:
sleep(10.0)
fridge_temp = fridge.get_mc_temperature()
if calibration_averages > 1:
fpts, mags, phases = nwa.take_one_averaged_trace()
else:
fpts, mags, phases = nwa.take_one()
else:
change_readout_freq = True
print "Target deltaf and target Q already satisfied. Starting sweep right away!"
nwa.configure(**nwa_calibration_config)
set_voltages(Vress[0], Vtraps[0], Vresguards[0], Vtg_bias, pinch=-1.00)
f0, Q = fit_res_gerwin(fpts, mags, span=2E6)
if change_readout_freq or np.abs(current_readout_freq - f0) > 150E3:
nwa.set_center_frequency(f0+0.25E6)
print "Drive frequency set to new value: Delta f = %.3f MHz"%((f0-6.40511E9)/1E6)
else:
nwa.set_center_frequency(f0+0.25E6)
print "Drive frequency set to new value: Delta f = %.3f MHz" % ((f0 - 6.40511E9) / 1E6)
p1, p2, p3, constant_Vtrapguard, constant_Vpinch = get_voltages()
# Actual sweep
nwa.set_span(1.5E6)
for k, voltages in tqdm(enumerate(zip(Vress, Vtraps, Vresguards))):
Vres, Vtrap, Vresguard = voltages[0], voltages[1], voltages[2]
print Vres, Vtrap, Vresguard
set_voltages(Vres, Vtrap, Vresguard, None)
active_electrodes = [np.nan]*5
active_electrodes[0] = Vres if Vress[k] == Vress[k-1] else np.nan
active_electrodes[1] = Vtrap if Vtraps[k] == Vtraps[k-1] else np.nan
active_electrodes[2] = Vresguard if Vresguards[k] == Vresguards[k - 1] else np.nan
active_electrodes[3] = constant_Vtrapguard
active_electrodes[4] = constant_Vpinch
T, F, M = take_trace_and_save(calibration_averages, active_electrodes=active_electrodes)
if k == (np.argmin(np.diff(Vress))+1):
print "Adjusting NWA center frequency..."
f0, Q = fit_res_gerwin(F, M, span=2E6)
nwa.set_center_frequency(f0+0.25E6)
nwa.set_format('MLOG')
nwa.auto_scale()
nwa.set_trigger_source('INT')
| [
"yangge1987@gmail.com"
] | yangge1987@gmail.com |
451aae1af4c66971218e6fb7fdfbf086be63787e | c486035ace9953873d717b1b87f5b7ecf828af7d | /hostapd.py | 9be1924f09d824741fdea2b7396e521491e2d805 | [] | no_license | nickd0/python-wifi-connect | 65256c1402406a0dd809abb4419764be6b3231e9 | 750661909cca839db959ee17bfc1bb0511b55294 | refs/heads/master | 2021-04-12T03:00:08.400436 | 2018-03-14T19:25:05 | 2018-03-14T19:25:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,049 | py | import config
import random
import time
import os
import subprocess as sp
CONF_FILE = '/tmp/hostapd.conf'
process = None
MAX_ATTEMPTS = 5
attempts = 0
def start():
global process
global attempts
if config.hostapd.ssid_randomize:
addon = os.environ['RESIN_DEVICE_NAME_AT_INIT'] if 'RESIN_DEVICE_NAME_AT_INIT' in os.environ else str(random.randint(1, 1000))
ssid = config.hostapd.ssid + '_' + addon
else:
ssid = config.hostapd.ssid
conf_file = """
interface={}
ssid={}
hw_mode=g
channel=6
auth_algs=1
wmm_enabled=0
""".format(config.hostapd.iface, ssid)
with open(CONF_FILE, 'w+') as f:
f.write(conf_file)
process = sp.Popen(['hostapd', CONF_FILE], stdout=sp.PIPE, stderr=sp.PIPE)
time.sleep(2)
process.poll()
if process.returncode:
if attempts <= MAX_ATTEMPTS:
restart()
attempts += 1
else:
print("HOSTAPD: max attempts reached")
def stop():
process.kill()
def restart():
stop()
time.sleep(1)
start()
| [
"nick@farmshelf.com"
] | nick@farmshelf.com |
4c2e0128f87a1e1cd437f60867570b90acb4259e | 714a22e87e5ae6a2b670a10437409100015f171b | /meshzoo/__init__.py | 2e6201faacd3e0de9e0015493737a24f245fd3a2 | [
"MIT"
] | permissive | krober10nd/meshzoo | ce3aa71a8a87a0749df78c6939e7d893a05f91d1 | 5e8b04d81ee5c23887e3d0244273b3d90b2eba9a | refs/heads/master | 2021-02-17T00:04:36.319498 | 2020-02-24T15:52:48 | 2020-02-24T15:52:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 811 | py | from meshzoo.__about__ import __author__, __author_email__, __version__, __website__
from .cube import cube
from .helpers import create_edges, plot2d, show2d
from .hexagon import hexagon
from .moebius import moebius
from .rectangle import rectangle
from .simple_arrow import simple_arrow
from .simple_shell import simple_shell
from .sphere import icosa_sphere, octa_sphere, tetra_sphere, uv_sphere
from .triangle import triangle
from .tube import tube
__all__ = [
"__version__",
"__author__",
"__author_email__",
"__website__",
#
"cube",
"hexagon",
"moebius",
"rectangle",
"simple_arrow",
"simple_shell",
"uv_sphere",
"icosa_sphere",
"octa_sphere",
"tetra_sphere",
"triangle",
"tube",
#
"show2d",
"plot2d",
"create_edges",
]
| [
"nico.schloemer@gmail.com"
] | nico.schloemer@gmail.com |
89855498cc5ffedc6599b095d035b074719742e2 | 0bed1250a4805866f871d037c1bce3e5c8757101 | /MATH PROB/factorsum.py | 49ad0da6391d712e6d693b28e7c0123975692580 | [] | no_license | Srinjana/CC_practice | 13018f5fd09f8a058e7b634a8626668a0058929a | 01793556c1c73e6c4196a0444e8840b5a0e2ab24 | refs/heads/main | 2023-08-02T05:42:49.016104 | 2021-09-20T15:39:24 | 2021-09-20T15:39:24 | 358,312,714 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 578 | py | # for a given number from a list of numbers find the factors and add the factors . If the sum of factors is present in the original list, sort the factors in acsending order
# and print them. If sum not in the original list, print -1.
# Author @Srinjana
def findFactorSum(n):
factor = [1]
for i in range (2 ,n+1):
if i%n ==0:
factor.append(i)
return sum(factor)
inplist = list(map(int, input().strip().split(",")))
flag = 0
for i in inplist:
if findFactorSum(i) in inplist:
flag = 1
print(i)
if flag==0:
print(-1)
| [
"srinjanap.official@gmail.com"
] | srinjanap.official@gmail.com |
e1a7eb2527297240c2ea0b1a5148168e97a4f329 | 0041d087b66a665f3c6a267253729026c068944d | /board/admin.py | 2b1d1e75e5009790dfb7c065637ca2db882e8e4c | [] | no_license | Qonrad/tusvri | e0128636de2cca222510c2e5dad5febb45285395 | d8feebf71d514f704d36b31db8637a2796ac932a | refs/heads/master | 2020-03-13T16:56:06.770463 | 2018-04-26T20:39:55 | 2018-04-26T20:39:55 | 126,738,497 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 122 | py | from django.contrib import admin
# Register your models here.
from .models import Position
admin.site.register(Position) | [
"Conrad@Conrads-MacBook-Pro.local"
] | Conrad@Conrads-MacBook-Pro.local |
219a94a8e0ca3282c8ea199474456ed64cd0b8e2 | 20594b7389f31d960ba16acfe8c4d4787b6c5e7b | /tf_simple_test/keras_mnist/keras_mnist.py | 2cf8dc7ace831e27706286d1ecbb2a816cffe317 | [] | no_license | meton-robean/DeepLearning_proj | 8dee59c54ab06ad0083b74d2e828f84927437232 | 87c1e56fa8e8b9782b9a75f83d0530b9739ad0ef | refs/heads/master | 2018-10-31T05:10:57.078234 | 2018-09-16T02:36:55 | 2018-09-16T02:36:55 | 117,234,457 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,681 | py |
'''Trains a simple convnet on the MNIST dataset.
Gets to 99.25% test accuracy after 12 epochs
(there is still a lot of margin for parameter tuning).
16 seconds per epoch on a GRID K520 GPU.
'''
from __future__ import print_function
import keras
import os
from keras.datasets import mnist
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten
from keras.layers import Conv2D, MaxPooling2D
from keras import backend as K
batch_size = 128
num_classes = 10
epochs = 1
# input image dimensions
img_rows, img_cols = 28, 28
# the data, split between train and test sets
(x_train, y_train), (x_test, y_test) = mnist.load_data()
if K.image_data_format() == 'channels_first':
x_train = x_train.reshape(x_train.shape[0], 1, img_rows, img_cols)
x_test = x_test.reshape(x_test.shape[0], 1, img_rows, img_cols)
input_shape = (1, img_rows, img_cols)
else:
x_train = x_train.reshape(x_train.shape[0], img_rows, img_cols, 1)
x_test = x_test.reshape(x_test.shape[0], img_rows, img_cols, 1)
input_shape = (img_rows, img_cols, 1)
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train /= 255
x_test /= 255
print('x_train shape:', x_train.shape)
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')
# convert class vectors to binary class matrices
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
model = Sequential()
model.add(Conv2D(32, kernel_size=(3, 3),
activation='relu',
input_shape=input_shape))
model.add(Conv2D(64, (3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(num_classes, activation='softmax'))
model.compile(loss=keras.losses.categorical_crossentropy,
optimizer=keras.optimizers.Adadelta(),
metrics=['accuracy'])
model.fit(x_train, y_train,
batch_size=batch_size,
epochs=epochs,
verbose=1,
validation_data=(x_test, y_test))
model.save_weights('mnist_keras.h5')
print('save model successfully....')
if os.path.exists('mnist_keras.h5'):
from keras.models import model_from_json
model.load_weights('mnist_keras.h5')
json_string = model.to_json()
print(json_string)
print('using existed model file to test...')
score = model.evaluate(x_test, y_test, verbose=0)
else:
print('using training model to test...')
score = model.evaluate(x_test, y_test, verbose=0)
print('Test loss:', score[0])
print('Test accuracy:', score[1]) | [
"meton-robean@qq.com"
] | meton-robean@qq.com |
fdd456ebce30a832b7ebf9ab1907fccf0c09a7d2 | 85ba0cb18acc854d08397bb21c9a3fab143cf938 | /common/__init__.py | cc96600fdf61e299cea76619a4855af4a4f01477 | [] | no_license | WustChuiChui/research | 5eb2006a4febfd8db5a470cd64288fdb990bbc6b | 69156370cc78d90984b7768f87c83b3486ec86b3 | refs/heads/master | 2020-04-18T06:42:13.351765 | 2019-05-30T10:39:52 | 2019-05-30T10:39:52 | 167,333,065 | 17 | 0 | null | null | null | null | UTF-8 | Python | false | false | 23 | py | #form FCLayer import *
| [
"wangjia8@xiaomi.com"
] | wangjia8@xiaomi.com |
3db27f60c4eb7ce5a20739d242ecf35db354cf90 | c329057d1561b8ffde0cf26677bb932b4c044826 | /py32.py | 25f958ea6850f7c2c14aa2456d3b6012da3874a1 | [] | no_license | kimotot/pe | b3611662110ca8a07b410a8e3d90c412c9decbd3 | 8d12cc64b0f9ad5156e2b1aed0245726acb9a404 | refs/heads/master | 2021-01-19T11:53:17.683814 | 2017-04-03T05:05:09 | 2017-04-03T05:05:09 | 82,271,607 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,341 | py | # coding:UTF-8
import copy
import time
def permutations(origin):
''' 与えられたリスト要素の順列を求める関数
引数はリストなど、イテーラブルなもの
戻値は全ての順列を要素としてリストにしたもの
再帰呼び出し関数'''
if len(origin) == 0:
return [[]]
else:
ans = []
for index,header in enumerate(origin):
new_orign = copy.deepcopy(origin)
del new_orign[index]
for cuder in permutations(new_orign):
cuder.insert(0,header)
ans.append(copy.deepcopy(cuder))
return ans
def permutationsIt(origin):
''' 与えられたリスト要素の順列を求める関数
引数はリストなど、イテーラブルなもの
戻値は全ての順列を要素としてリストにしたもの
再帰呼び出し関数'''
if len(origin) == 0:
yield []
else:
for index, header in enumerate(origin):
new_orign = copy.deepcopy(origin)
del new_orign[index]
for cuder in permutationsIt(new_orign):
cuder.insert(0, header)
yield cuder
def pandegi14(alist):
'''1から9の数字列が、1X4のパンデジタルであるか判定する関数'''
x = alist[0]
y = alist[1]*1000 + alist[2]*100 + alist[3]*10 + alist[4]
z = alist[5]*1000 + alist[6]*100 + alist[7]*10 + alist[8]
if x * y == z:
return True,z
else:
return False,0
def pandegi23(alist):
'''1から9の数字列が、2X3のパンデジタルであるか判定する関数'''
x = alist[0]*10 + alist[1]
y = alist[2]*100 + alist[3]*10 + alist[4]
z = alist[5]*1000 + alist[6]*100 + alist[7]*10 + alist[8]
if x * y == z:
return True,z
else:
return False,0
if __name__ == "__main__":
start = time.time()
s = set()
for n in permutationsIt([1,2,3,4,5,6,7,8,9]):
b,z = pandegi14(n)
if b:
print(14,n)
s.add(z)
b,z = pandegi23(n)
if b:
print(23,n)
s.add(z)
print("総和={0}".format(sum(s)))
elapsed_time = time.time() - start
print("処理時間={0:.4f}".format(elapsed_time))
| [
"god4bid@hear.to"
] | god4bid@hear.to |
a07685e2f447328a9c1b2cf82dd3f85c207276ee | cbf31804ad705886979195b36f5c44301ab58785 | /middleware.py | 8802512e20ff5dd4bb135786e0573e2b63e8f514 | [
"MIT"
] | permissive | nxexox/django-get-domain | f9a21fdfb6245bca2e8cb40e16486ee3a4f304be | a3d493f37ad06d0a915ffe359eaebedaee3d096a | refs/heads/master | 2020-12-02T06:37:53.591841 | 2017-07-11T08:02:18 | 2017-07-11T08:02:18 | 96,866,560 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,163 | py | # -*- coding: utf-8 -*-
"""
Middleware for django.http.request.HttpRequest
"""
from django.utils.deprecation import MiddlewareMixin
from django.urls import reverse_lazy
def get_domain(request, reverse_url=None, *args, **kwargs):
"""
Create domain from django.http.request.HttpRequest.
Optional add internal link from reverse_lazy
:param request: Object Request
:param reverse_url: Name urls, from project.urls.
:param args: Optional position arguments for reverse_lazy
:param kwargs: Optional names arguments for reverse_lazy
:type request: django.http.request.HttpRequest
:type reverse_url: str
:type args: list
:type kwargs: dict
:return: Created link
:rtype: str
:raises:
"""
url = "{}://{}".format(request.scheme, request.get_host())
if reverse_url:
url += str(reverse_lazy(reverse_url, args=args, kwargs=kwargs))
return url
class RequestGetDomainMiddleware(MiddlewareMixin):
"""
Additional in django.http.request.HttpRequest function on get validate domain from request.
"""
def process_request(self, request):
request.get_domain = get_domain
| [
"nxexox@gmail.com"
] | nxexox@gmail.com |
7cf52d60ac8b25592fe1eeea129de67bb092c07b | b894d9056a3679930ad3a09a21984a9586cc6719 | /housepwned/pipelines.py | 5fed3687a0431a04531b6327f0f4aabcb0de2adb | [] | no_license | benjumanji/housepwned | 2d2daa786054531adbe692192c81cad5f4f16be5 | 887f34875d285aafbfa38594cd5d39a262d8ac0b | refs/heads/master | 2020-07-04T05:39:45.878024 | 2014-07-23T20:51:45 | 2014-07-23T20:51:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 290 | py | # -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html
class HousepwnedPipeline(object):
def process_item(self, item, spider):
return item
| [
"edwards.benj@gmail.com"
] | edwards.benj@gmail.com |
6bc9b11127952bf70d28399bc2102d5fabac2edd | 38fc847f91a3dba5c407f1779513c7dccf04ab12 | /string_dispose_new.py | 126c09831b8fd22c8c549592ceb806a0c3fcf471 | [] | no_license | shanhx2000/Hack_cur | 867173e1550559eacd9755b40a97ab74a31f0a4b | 2ceee838022baff84201142620d9f815c7ca1768 | refs/heads/master | 2020-06-19T10:03:00.430855 | 2019-07-14T02:57:41 | 2019-07-14T02:57:41 | 196,668,960 | 0 | 0 | null | 2019-07-13T17:01:08 | 2019-07-13T02:26:57 | HTML | UTF-8 | Python | false | false | 2,981 | py | # -*- coding: utf-8 -*-
"""
Spyder Editor
This is a temporary script file.
"""
#with open (file) as f:
# for line in f:
# do something...
import os
import re
import nltk
from nltk.tokenize import word_tokenize
from nltk.stem.wordnet import WordNetLemmatizer
dictionary = {}
force_stopwords = ['、','(',')',',','。',':','“','”',
'\n\u3000','\u3000','的','‘','’',
'a','in','also','below','am','is','are','have',
'the','of',',',' ','and','this','to','be',
'that','it','was','by']
stopwords=[]
def process(textori):
#textori = input()
output_wr = ""
for line in textori:
for word in line:
if word.isalpha() or word == ' ':
output_wr = output_wr + word.lower()
elif word == '\n' or word == '_':
output_wr = output_wr + ' '
else:
output_wr = output_wr + ' '
text = output_wr
fredist = nltk.FreqDist(text.split(' ')) # 获取单文件词频
print(fredist)
f_s_w = open('stopwords.txt','r')
#print(f_s_w)
for line in f_s_w:
#print(line)
stopwords.append(line[:len(line)-1])
#print(stopwords)
for localkey in fredist.keys(): # 所有词频合并。 如果存在词频相加,否则添加
if (localkey in stopwords) or (localkey in force_stopwords): # 检查是否为停用词
# print('-->停用词:', localkey)
continue
if localkey in dictionary.keys(): # 检查当前词频是否在字典中存在
# print('--> 重复值:', localkey, dictionary[localkey]+fredist[localkey],fredist[localkey])
dictionary[localkey] = dictionary[localkey] + fredist[localkey] # 如果存在,将词频累加,并更新字典值
else: # 如果字典中不存在
dictionary[localkey] = fredist[localkey] # 将当前词频添加到字典中
# print('--> 新增值:', localkey, dictionary[localkey])
words = []
for word in dictionary:
tt = ()
tmp_list = [word,dictionary[word]]
tt = tuple(tmp_list)
if word not in stopwords:
words.append(tt)
#print(words)
tmp = sorted(words,key=lambda x:x[1],reverse=True)
#print(tmp)
output_str = []
for item in tmp:
output_str.append(item[0])
return output_str
#write_to_file(tmp,cur_dir+'/data/result/'+nam[0]+'_result.txt')
#print(nam)
'''
'''
#uf.close()
'''
'''
# print('===================================================')
# print(sorted(dictionary.items(), key = lambda x:x[1])) # 根据词频字典值排序,并打印
def write_to_file(words, file='results.txt'):
f = open(file, 'w')
for item in words:
# for field in item:
f.write(str(item[0])+' ')
f.write(str(item[1]))#+','
f.write('\n')
f.close()
| [
"coned@outlook.com"
] | coned@outlook.com |
48329fba254e4b07d3988292bb905c7739573dfe | 56f5b2ea36a2258b8ca21e2a3af9a5c7a9df3c6e | /CMGTools/H2TauTau/prod/25aug_corrMC/up/mc/DY1JetsToLL_M-50_TuneZ2Star_8TeV-madgraph/Summer12_DR53X-PU_S10_START53_V7A-v1/AODSIM/V5_B/PAT_CMG_V5_16_0_1377544841/HTT_24Jul_newTES_manzoni_Up_Jobs/Job_217/run_cfg.py | 589bc3616bc9bbb96cd7a0726131bdbacc21691c | [] | no_license | rmanzoni/HTT | 18e6b583f04c0a6ca10142d9da3dd4c850cddabc | a03b227073b2d4d8a2abe95367c014694588bf98 | refs/heads/master | 2016-09-06T05:55:52.602604 | 2014-02-20T16:35:34 | 2014-02-20T16:35:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,500 | py | import FWCore.ParameterSet.Config as cms
import os,sys
sys.path.append('/afs/cern.ch/user/m/manzoni/summer13/CMGTools/CMSSW_5_3_9/src/CMGTools/H2TauTau/prod/25aug_corrMC/up/mc/DY1JetsToLL_M-50_TuneZ2Star_8TeV-madgraph/Summer12_DR53X-PU_S10_START53_V7A-v1/AODSIM/V5_B/PAT_CMG_V5_16_0_1377544841/HTT_24Jul_newTES_manzoni_Up_Jobs')
from base_cfg import *
process.source = cms.Source("PoolSource",
noEventSort = cms.untracked.bool(True),
inputCommands = cms.untracked.vstring('keep *',
'drop cmgStructuredPFJets_cmgStructuredPFJetSel__PAT'),
duplicateCheckMode = cms.untracked.string('noDuplicateCheck'),
fileNames = cms.untracked.vstring('/store/cmst3/user/cmgtools/CMG/DY1JetsToLL_M-50_TuneZ2Star_8TeV-madgraph/Summer12_DR53X-PU_S10_START53_V7A-v1/AODSIM/V5_B/PAT_CMG_V5_16_0/cmgTuple_1973.root',
'/store/cmst3/user/cmgtools/CMG/DY1JetsToLL_M-50_TuneZ2Star_8TeV-madgraph/Summer12_DR53X-PU_S10_START53_V7A-v1/AODSIM/V5_B/PAT_CMG_V5_16_0/cmgTuple_1974.root',
'/store/cmst3/user/cmgtools/CMG/DY1JetsToLL_M-50_TuneZ2Star_8TeV-madgraph/Summer12_DR53X-PU_S10_START53_V7A-v1/AODSIM/V5_B/PAT_CMG_V5_16_0/cmgTuple_1975.root',
'/store/cmst3/user/cmgtools/CMG/DY1JetsToLL_M-50_TuneZ2Star_8TeV-madgraph/Summer12_DR53X-PU_S10_START53_V7A-v1/AODSIM/V5_B/PAT_CMG_V5_16_0/cmgTuple_1976.root',
'/store/cmst3/user/cmgtools/CMG/DY1JetsToLL_M-50_TuneZ2Star_8TeV-madgraph/Summer12_DR53X-PU_S10_START53_V7A-v1/AODSIM/V5_B/PAT_CMG_V5_16_0/cmgTuple_1977.root')
)
| [
"riccardo.manzoni@cern.ch"
] | riccardo.manzoni@cern.ch |
a292d226c79e5613f782f0ea465e9a03c06b0e6d | de725b742e69f38318c04cd44ac970e7135857a5 | /assets/forms.py | 0173d7e2fd8182e88243ee75191332c9c8f1868c | [] | no_license | haochenxiao666/itelftool | e5c0811b48e01d0eeff13d15d33b89960091960a | 8558dce6d97e7443c95513aa1389910c3902043f | refs/heads/master | 2020-04-14T22:55:46.732111 | 2018-10-18T09:00:44 | 2018-10-18T09:00:44 | 164,183,750 | 1 | 0 | null | 2019-01-05T05:05:32 | 2019-01-05T05:05:31 | null | UTF-8 | Python | false | false | 4,458 | py | #! /usr/bin/env python
# -*- coding: utf-8 -*-
from django import forms
from django.forms.widgets import *
from .models import Asset, IDC, HostGroup, Cabinet
'''
class AssetForm(forms.ModelForm):
class Meta:
model = Asset
exclude = ("id",)
widgets = {
'hostname': TextInput(attrs={'class': 'form-control', 'style': 'width:530px;', 'placeholder': u'必填项'}),
'ip': TextInput(attrs={'class': 'form-control', 'style': 'width:530px;', 'placeholder': u'必填项'}),
'other_ip': TextInput(attrs={'class': 'form-control', 'style': 'width:530px;'}),
'group': Select(attrs={'class': 'form-control', 'style': 'width:530px;'}),
'asset_no': TextInput(attrs={'class': 'form-control', 'style': 'width:530px;'}),
'asset_type': Select(attrs={'class': 'form-control', 'style': 'width:530px;'}),
'status': Select(attrs={'class': 'form-control', 'style': 'width:530px;'}),
'os': TextInput(attrs={'class': 'form-control', 'style': 'width:530px;'}),
'vendor': TextInput(attrs={'class': 'form-control', 'style': 'width:530px;'}),
'up_time': TextInput(attrs={'class': 'form-control', 'style': 'width:530px;'}),
'cpu_model': TextInput(attrs={'class': 'form-control', 'style': 'width:530px;'}),
'cpu_num': TextInput(attrs={'class': 'form-control', 'style': 'width:530px;'}),
'memory': TextInput(attrs={'class': 'form-control', 'style': 'width:530px;'}),
'disk': TextInput(attrs={'class': 'form-control', 'style': 'width:530px;'}),
'sn': TextInput(attrs={'class': 'form-control', 'style': 'width:530px;'}),
'idc': Select(attrs={'class': 'form-control', 'style': 'width:530px;'}),
'position': TextInput(attrs={'class': 'form-control', 'style': 'width:530px;', 'placeholder': u'物理机写位置,虚机写宿主'}),
'memo': Textarea(attrs={'rows': 4, 'cols': 15, 'class': 'form-control', 'style': 'width:530px;'}),
}
'''
class IdcForm(forms.ModelForm):
# def clean(self):
# cleaned_data = super(IdcForm, self).clean()
# value = cleaned_data.get('ids')
# try:
# Idc.objects.get(name=value)
# self._errors['ids'] = self.error_class(["%s的信息已经存在" % value])
# except Idc.DoesNotExist:
# pass
# return cleaned_data
class Meta:
model = IDC
exclude = ("id",)
widgets = {
'ids': TextInput(attrs={'class': 'form-control','style': 'width:450px;'}),
'name': TextInput(attrs={'class': 'form-control','style': 'width:450px;'}),
'address': TextInput(attrs={'class': 'form-control','style': 'width:450px;'}),
'tel': TextInput(attrs={'class': 'form-control','style': 'width:450px;'}),
'contact': TextInput(attrs={'class': 'form-control','style': 'width:450px;'}),
'contact_phone': TextInput(attrs={'class': 'form-control','style': 'width:450px;'}),
'ip_range': TextInput(attrs={'class': 'form-control','style': 'width:450px;'}),
'jigui': TextInput(attrs={'class': 'form-control','style': 'width:450px;'}),
'bandwidth': TextInput(attrs={'class': 'form-control','style': 'width:450px;'}),
}
class GroupForm(forms.ModelForm):
def clean(self):
cleaned_data = super(GroupForm, self).clean()
value = cleaned_data.get('name')
try:
Cabinet.objects.get(name=value)
self._errors['name'] = self.error_class(["%s的信息已经存在" % value])
except Cabinet.DoesNotExist:
pass
return cleaned_data
class Meta:
model = HostGroup
exclude = ("id", )
widgets = {
'name': TextInput(attrs={'class': 'form-control', 'style': 'width:450px;'}),
'desc': Textarea(attrs={'rows': 4, 'cols': 15, 'class': 'form-control', 'style': 'width:450px;'}),
}
class CabinetForm(forms.ModelForm):
class Meta:
model = Cabinet
exclude = ("id", )
widgets = {
'name': TextInput(attrs={'class': 'form-control', 'style': 'width:450px;'}),
'idc': Select(attrs={'class': 'form-control', 'style': 'width:450px;'}),
'desc': Textarea(attrs={'rows': 4, 'cols': 15, 'class': 'form-control', 'style': 'width:450px;'}),
}
| [
"420521738@qq.com"
] | 420521738@qq.com |
49b38f161f8835d372100339e64dd31ffa5f1974 | 2754b704fc1eacf70e0d99f301a7ed31e5376b34 | /Spoton/search/migrations/0005_auto__add_job.py | 80da31315014da5e053095a49c4100211ed98458 | [] | no_license | 2bethere/SPTest | 4e36745232917fe879eb9fe4204f2440941a98b3 | 0d25e92513c7e40a97e3c0fa55c8a3d46a402e50 | refs/heads/master | 2021-01-01T05:41:22.103400 | 2014-08-01T00:09:28 | 2014-08-01T00:09:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,455 | py | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Job'
db.create_table(u'search_job', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('site', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['search.Site'])),
('start_url', self.gf('django.db.models.fields.CharField')(max_length=2000)),
('pagelimit', self.gf('django.db.models.fields.IntegerField')(default=100)),
))
db.send_create_signal(u'search', ['Job'])
def backwards(self, orm):
# Deleting model 'Job'
db.delete_table(u'search_job')
models = {
u'search.event': {
'Meta': {'object_name': 'Event'},
'end_time': ('django.db.models.fields.DateTimeField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'request': ('django.db.models.fields.CharField', [], {'max_length': '2000'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['search.Site']"}),
'start_time': ('django.db.models.fields.DateTimeField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'url': ('django.db.models.fields.CharField', [], {'max_length': '2000'})
},
u'search.job': {
'Meta': {'object_name': 'Job'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'pagelimit': ('django.db.models.fields.IntegerField', [], {'default': '100'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['search.Site']"}),
'start_url': ('django.db.models.fields.CharField', [], {'max_length': '2000'})
},
u'search.site': {
'Meta': {'object_name': 'Site'},
'domain': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_update': ('django.db.models.fields.DateTimeField', [], {})
}
}
complete_apps = ['search'] | [
"2bethere@gmail.com"
] | 2bethere@gmail.com |
38aef9c63c4f0620b06efcb7670a3ffe1b1044a4 | a8b0266fabd86ff4c1bc86d99a7b91856634f0ba | /cherrypy/test/test_conn.py | 98775a6259afed0d7c52cf36508f5015f2e42776 | [] | no_license | c3pb/wallhackctl | 5a704bc66a035898ed7d490ad6596257fffdc1e8 | 86e9ce09b32149566e50d7d1a880e6a7a86e4616 | refs/heads/master | 2016-09-06T14:57:31.967997 | 2011-02-16T18:54:36 | 2011-02-16T18:54:36 | 1,375,028 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 25,607 | py | """Tests for TCP connection handling, including proper and timely close."""
from cherrypy.test import test
test.prefer_parent_path()
from httplib import HTTPConnection, HTTPSConnection, NotConnected, BadStatusLine
import urllib
import socket
import sys
import time
timeout = 1
import cherrypy
from cherrypy.test import webtest
from cherrypy import _cperror
pov = 'pPeErRsSiIsStTeEnNcCeE oOfF vViIsSiIoOnN'
def setup_server():
def raise500():
raise cherrypy.HTTPError(500)
class Root:
def index(self):
return pov
index.exposed = True
page1 = index
page2 = index
page3 = index
def hello(self):
return "Hello, world!"
hello.exposed = True
def timeout(self, t):
return str(cherrypy.server.httpserver.timeout)
timeout.exposed = True
def stream(self, set_cl=False):
if set_cl:
cherrypy.response.headers['Content-Length'] = 10
def content():
for x in range(10):
yield str(x)
return content()
stream.exposed = True
stream._cp_config = {'response.stream': True}
def error(self, code=500):
raise cherrypy.HTTPError(code)
error.exposed = True
def upload(self):
if not cherrypy.request.method == 'POST':
raise AssertionError("'POST' != request.method %r" %
cherrypy.request.method)
return "thanks for '%s'" % cherrypy.request.body.read()
upload.exposed = True
def custom(self, response_code):
cherrypy.response.status = response_code
return "Code = %s" % response_code
custom.exposed = True
def err_before_read(self):
return "ok"
err_before_read.exposed = True
err_before_read._cp_config = {'hooks.on_start_resource': raise500}
def one_megabyte_of_a(self):
return ["a" * 1024] * 1024
one_megabyte_of_a.exposed = True
cherrypy.tree.mount(Root())
cherrypy.config.update({
'server.max_request_body_size': 1001,
'server.socket_timeout': timeout,
})
from cherrypy.test import helper
class ConnectionCloseTests(helper.CPWebCase):
def test_HTTP11(self):
if cherrypy.server.protocol_version != "HTTP/1.1":
return self.skip()
self.PROTOCOL = "HTTP/1.1"
self.persistent = True
# Make the first request and assert there's no "Connection: close".
self.getPage("/")
self.assertStatus('200 OK')
self.assertBody(pov)
self.assertNoHeader("Connection")
# Make another request on the same connection.
self.getPage("/page1")
self.assertStatus('200 OK')
self.assertBody(pov)
self.assertNoHeader("Connection")
# Test client-side close.
self.getPage("/page2", headers=[("Connection", "close")])
self.assertStatus('200 OK')
self.assertBody(pov)
self.assertHeader("Connection", "close")
# Make another request on the same connection, which should error.
self.assertRaises(NotConnected, self.getPage, "/")
def test_Streaming_no_len(self):
self._streaming(set_cl=False)
def test_Streaming_with_len(self):
self._streaming(set_cl=True)
def _streaming(self, set_cl):
if cherrypy.server.protocol_version == "HTTP/1.1":
self.PROTOCOL = "HTTP/1.1"
self.persistent = True
# Make the first request and assert there's no "Connection: close".
self.getPage("/")
self.assertStatus('200 OK')
self.assertBody(pov)
self.assertNoHeader("Connection")
# Make another, streamed request on the same connection.
if set_cl:
# When a Content-Length is provided, the content should stream
# without closing the connection.
self.getPage("/stream?set_cl=Yes")
self.assertHeader("Content-Length")
self.assertNoHeader("Connection", "close")
self.assertNoHeader("Transfer-Encoding")
self.assertStatus('200 OK')
self.assertBody('0123456789')
else:
# When no Content-Length response header is provided,
# streamed output will either close the connection, or use
# chunked encoding, to determine transfer-length.
self.getPage("/stream")
self.assertNoHeader("Content-Length")
self.assertStatus('200 OK')
self.assertBody('0123456789')
chunked_response = False
for k, v in self.headers:
if k.lower() == "transfer-encoding":
if str(v) == "chunked":
chunked_response = True
if chunked_response:
self.assertNoHeader("Connection", "close")
else:
self.assertHeader("Connection", "close")
# Make another request on the same connection, which should error.
self.assertRaises(NotConnected, self.getPage, "/")
# Try HEAD. See http://www.cherrypy.org/ticket/864.
self.getPage("/stream", method='HEAD')
self.assertStatus('200 OK')
self.assertBody('')
self.assertNoHeader("Transfer-Encoding")
else:
self.PROTOCOL = "HTTP/1.0"
self.persistent = True
# Make the first request and assert Keep-Alive.
self.getPage("/", headers=[("Connection", "Keep-Alive")])
self.assertStatus('200 OK')
self.assertBody(pov)
self.assertHeader("Connection", "Keep-Alive")
# Make another, streamed request on the same connection.
if set_cl:
# When a Content-Length is provided, the content should
# stream without closing the connection.
self.getPage("/stream?set_cl=Yes",
headers=[("Connection", "Keep-Alive")])
self.assertHeader("Content-Length")
self.assertHeader("Connection", "Keep-Alive")
self.assertNoHeader("Transfer-Encoding")
self.assertStatus('200 OK')
self.assertBody('0123456789')
else:
# When a Content-Length is not provided,
# the server should close the connection.
self.getPage("/stream", headers=[("Connection", "Keep-Alive")])
self.assertStatus('200 OK')
self.assertBody('0123456789')
self.assertNoHeader("Content-Length")
self.assertNoHeader("Connection", "Keep-Alive")
self.assertNoHeader("Transfer-Encoding")
# Make another request on the same connection, which should error.
self.assertRaises(NotConnected, self.getPage, "/")
def test_HTTP10_KeepAlive(self):
self.PROTOCOL = "HTTP/1.0"
if self.scheme == "https":
self.HTTP_CONN = HTTPSConnection
else:
self.HTTP_CONN = HTTPConnection
# Test a normal HTTP/1.0 request.
self.getPage("/page2")
self.assertStatus('200 OK')
self.assertBody(pov)
# Apache, for example, may emit a Connection header even for HTTP/1.0
## self.assertNoHeader("Connection")
# Test a keep-alive HTTP/1.0 request.
self.persistent = True
self.getPage("/page3", headers=[("Connection", "Keep-Alive")])
self.assertStatus('200 OK')
self.assertBody(pov)
self.assertHeader("Connection", "Keep-Alive")
# Remove the keep-alive header again.
self.getPage("/page3")
self.assertStatus('200 OK')
self.assertBody(pov)
# Apache, for example, may emit a Connection header even for HTTP/1.0
## self.assertNoHeader("Connection")
class PipelineTests(helper.CPWebCase):
def test_HTTP11_Timeout(self):
# If we timeout without sending any data,
# the server will close the conn with a 408.
if cherrypy.server.protocol_version != "HTTP/1.1":
return self.skip()
self.PROTOCOL = "HTTP/1.1"
# Connect but send nothing.
self.persistent = True
conn = self.HTTP_CONN
conn.auto_open = False
conn.connect()
# Wait for our socket timeout
time.sleep(timeout * 2)
# The request should have returned 408 already.
response = conn.response_class(conn.sock, method="GET")
response.begin()
self.assertEqual(response.status, 408)
conn.close()
# Connect but send half the headers only.
self.persistent = True
conn = self.HTTP_CONN
conn.auto_open = False
conn.connect()
conn.send('GET /hello HTTP/1.1')
conn.send(("Host: %s" % self.HOST).encode('ascii'))
# Wait for our socket timeout
time.sleep(timeout * 2)
# The conn should have already sent 408.
response = conn.response_class(conn.sock, method="GET")
response.begin()
self.assertEqual(response.status, 408)
conn.close()
def test_HTTP11_Timeout_after_request(self):
# If we timeout after at least one request has succeeded,
# the server will close the conn without 408.
if cherrypy.server.protocol_version != "HTTP/1.1":
return self.skip()
self.PROTOCOL = "HTTP/1.1"
# Make an initial request
self.persistent = True
conn = self.HTTP_CONN
conn.putrequest("GET", "/timeout?t=%s" % timeout, skip_host=True)
conn.putheader("Host", self.HOST)
conn.endheaders()
response = conn.response_class(conn.sock, method="GET")
response.begin()
self.assertEqual(response.status, 200)
self.body = response.read()
self.assertBody(str(timeout))
# Make a second request on the same socket
conn._output('GET /hello HTTP/1.1')
conn._output("Host: %s" % self.HOST)
conn._send_output()
response = conn.response_class(conn.sock, method="GET")
response.begin()
self.assertEqual(response.status, 200)
self.body = response.read()
self.assertBody("Hello, world!")
# Wait for our socket timeout
time.sleep(timeout * 2)
# Make another request on the same socket, which should error
conn._output('GET /hello HTTP/1.1')
conn._output("Host: %s" % self.HOST)
conn._send_output()
response = conn.response_class(conn.sock, method="GET")
try:
response.begin()
except:
if not isinstance(sys.exc_info()[1],
(socket.error, BadStatusLine)):
self.fail("Writing to timed out socket didn't fail"
" as it should have: %s" % sys.exc_info()[1])
else:
if response.status != 408:
self.fail("Writing to timed out socket didn't fail"
" as it should have: %s" %
response.read())
conn.close()
# Make another request on a new socket, which should work
self.persistent = True
conn = self.HTTP_CONN
conn.putrequest("GET", "/", skip_host=True)
conn.putheader("Host", self.HOST)
conn.endheaders()
response = conn.response_class(conn.sock, method="GET")
response.begin()
self.assertEqual(response.status, 200)
self.body = response.read()
self.assertBody(pov)
# Make another request on the same socket,
# but timeout on the headers
conn.send('GET /hello HTTP/1.1')
# Wait for our socket timeout
time.sleep(timeout * 2)
response = conn.response_class(conn.sock, method="GET")
try:
response.begin()
except:
if not isinstance(sys.exc_info()[1],
(socket.error, BadStatusLine)):
self.fail("Writing to timed out socket didn't fail"
" as it should have: %s" % sys.exc_info()[1])
else:
self.fail("Writing to timed out socket didn't fail"
" as it should have: %s" %
response.read())
conn.close()
# Retry the request on a new connection, which should work
self.persistent = True
conn = self.HTTP_CONN
conn.putrequest("GET", "/", skip_host=True)
conn.putheader("Host", self.HOST)
conn.endheaders()
response = conn.response_class(conn.sock, method="GET")
response.begin()
self.assertEqual(response.status, 200)
self.body = response.read()
self.assertBody(pov)
conn.close()
def test_HTTP11_pipelining(self):
if cherrypy.server.protocol_version != "HTTP/1.1":
return self.skip()
self.PROTOCOL = "HTTP/1.1"
# Test pipelining. httplib doesn't support this directly.
self.persistent = True
conn = self.HTTP_CONN
# Put request 1
conn.putrequest("GET", "/hello", skip_host=True)
conn.putheader("Host", self.HOST)
conn.endheaders()
for trial in range(5):
# Put next request
conn._output('GET /hello HTTP/1.1')
conn._output("Host: %s" % self.HOST)
conn._send_output()
# Retrieve previous response
response = conn.response_class(conn.sock, method="GET")
response.begin()
body = response.read()
self.assertEqual(response.status, 200)
self.assertEqual(body, "Hello, world!")
# Retrieve final response
response = conn.response_class(conn.sock, method="GET")
response.begin()
body = response.read()
self.assertEqual(response.status, 200)
self.assertEqual(body, "Hello, world!")
conn.close()
def test_100_Continue(self):
if cherrypy.server.protocol_version != "HTTP/1.1":
return self.skip()
self.PROTOCOL = "HTTP/1.1"
self.persistent = True
conn = self.HTTP_CONN
# Try a page without an Expect request header first.
# Note that httplib's response.begin automatically ignores
# 100 Continue responses, so we must manually check for it.
conn.putrequest("POST", "/upload", skip_host=True)
conn.putheader("Host", self.HOST)
conn.putheader("Content-Type", "text/plain")
conn.putheader("Content-Length", "4")
conn.endheaders()
conn.send("d'oh")
response = conn.response_class(conn.sock, method="POST")
version, status, reason = response._read_status()
self.assertNotEqual(status, 100)
conn.close()
# Now try a page with an Expect header...
conn.connect()
conn.putrequest("POST", "/upload", skip_host=True)
conn.putheader("Host", self.HOST)
conn.putheader("Content-Type", "text/plain")
conn.putheader("Content-Length", "17")
conn.putheader("Expect", "100-continue")
conn.endheaders()
response = conn.response_class(conn.sock, method="POST")
# ...assert and then skip the 100 response
version, status, reason = response._read_status()
self.assertEqual(status, 100)
while True:
line = response.fp.readline().strip()
if line:
self.fail("100 Continue should not output any headers. Got %r" % line)
else:
break
# ...send the body
conn.send("I am a small file")
# ...get the final response
response.begin()
self.status, self.headers, self.body = webtest.shb(response)
self.assertStatus(200)
self.assertBody("thanks for 'I am a small file'")
conn.close()
class ConnectionTests(helper.CPWebCase):
def test_readall_or_close(self):
if cherrypy.server.protocol_version != "HTTP/1.1":
return self.skip()
self.PROTOCOL = "HTTP/1.1"
if self.scheme == "https":
self.HTTP_CONN = HTTPSConnection
else:
self.HTTP_CONN = HTTPConnection
# Test a max of 0 (the default) and then reset to what it was above.
old_max = cherrypy.server.max_request_body_size
for new_max in (0, old_max):
cherrypy.server.max_request_body_size = new_max
self.persistent = True
conn = self.HTTP_CONN
# Get a POST page with an error
conn.putrequest("POST", "/err_before_read", skip_host=True)
conn.putheader("Host", self.HOST)
conn.putheader("Content-Type", "text/plain")
conn.putheader("Content-Length", "1000")
conn.putheader("Expect", "100-continue")
conn.endheaders()
response = conn.response_class(conn.sock, method="POST")
# ...assert and then skip the 100 response
version, status, reason = response._read_status()
self.assertEqual(status, 100)
while True:
skip = response.fp.readline().strip()
if not skip:
break
# ...send the body
conn.send("x" * 1000)
# ...get the final response
response.begin()
self.status, self.headers, self.body = webtest.shb(response)
self.assertStatus(500)
# Now try a working page with an Expect header...
conn._output('POST /upload HTTP/1.1')
conn._output("Host: %s" % self.HOST)
conn._output("Content-Type: text/plain")
conn._output("Content-Length: 17")
conn._output("Expect: 100-continue")
conn._send_output()
response = conn.response_class(conn.sock, method="POST")
# ...assert and then skip the 100 response
version, status, reason = response._read_status()
self.assertEqual(status, 100)
while True:
skip = response.fp.readline().strip()
if not skip:
break
# ...send the body
conn.send("I am a small file")
# ...get the final response
response.begin()
self.status, self.headers, self.body = webtest.shb(response)
self.assertStatus(200)
self.assertBody("thanks for 'I am a small file'")
conn.close()
def test_No_Message_Body(self):
if cherrypy.server.protocol_version != "HTTP/1.1":
return self.skip()
self.PROTOCOL = "HTTP/1.1"
# Set our HTTP_CONN to an instance so it persists between requests.
self.persistent = True
# Make the first request and assert there's no "Connection: close".
self.getPage("/")
self.assertStatus('200 OK')
self.assertBody(pov)
self.assertNoHeader("Connection")
# Make a 204 request on the same connection.
self.getPage("/custom/204")
self.assertStatus(204)
self.assertNoHeader("Content-Length")
self.assertBody("")
self.assertNoHeader("Connection")
# Make a 304 request on the same connection.
self.getPage("/custom/304")
self.assertStatus(304)
self.assertNoHeader("Content-Length")
self.assertBody("")
self.assertNoHeader("Connection")
def test_Chunked_Encoding(self):
if cherrypy.server.protocol_version != "HTTP/1.1":
return self.skip()
if (hasattr(self, 'harness') and
"modpython" in self.harness.__class__.__name__.lower()):
# mod_python forbids chunked encoding
return self.skip()
self.PROTOCOL = "HTTP/1.1"
# Set our HTTP_CONN to an instance so it persists between requests.
self.persistent = True
conn = self.HTTP_CONN
# Try a normal chunked request (with extensions)
body = ("8;key=value\r\nxx\r\nxxxx\r\n5\r\nyyyyy\r\n0\r\n"
"Content-Type: application/json\r\n"
"\r\n")
conn.putrequest("POST", "/upload", skip_host=True)
conn.putheader("Host", self.HOST)
conn.putheader("Transfer-Encoding", "chunked")
conn.putheader("Trailer", "Content-Type")
# Note that this is somewhat malformed:
# we shouldn't be sending Content-Length.
# RFC 2616 says the server should ignore it.
conn.putheader("Content-Length", "3")
conn.endheaders()
conn.send(body)
response = conn.getresponse()
self.status, self.headers, self.body = webtest.shb(response)
self.assertStatus('200 OK')
self.assertBody("thanks for 'xx\r\nxxxxyyyyy'")
# Try a chunked request that exceeds server.max_request_body_size.
# Note that the delimiters and trailer are included.
body = "3e3\r\n" + ("x" * 995) + "\r\n0\r\n\r\n"
conn.putrequest("POST", "/upload", skip_host=True)
conn.putheader("Host", self.HOST)
conn.putheader("Transfer-Encoding", "chunked")
conn.putheader("Content-Type", "text/plain")
# Chunked requests don't need a content-length
## conn.putheader("Content-Length", len(body))
conn.endheaders()
conn.send(body)
response = conn.getresponse()
self.status, self.headers, self.body = webtest.shb(response)
self.assertStatus(413)
conn.close()
def test_Content_Length(self):
# Try a non-chunked request where Content-Length exceeds
# server.max_request_body_size. Assert error before body send.
self.persistent = True
conn = self.HTTP_CONN
conn.putrequest("POST", "/upload", skip_host=True)
conn.putheader("Host", self.HOST)
conn.putheader("Content-Type", "text/plain")
conn.putheader("Content-Length", "9999")
conn.endheaders()
response = conn.getresponse()
self.status, self.headers, self.body = webtest.shb(response)
self.assertStatus(413)
self.assertBody("")
conn.close()
def test_598(self):
remote_data_conn = urllib.urlopen('%s://%s:%s/one_megabyte_of_a/' %
(self.scheme, self.HOST, self.PORT,))
buf = remote_data_conn.read(512)
time.sleep(timeout * 0.6)
remaining = (1024 * 1024) - 512
while remaining:
data = remote_data_conn.read(remaining)
if not data:
break
else:
buf += data
remaining -= len(data)
self.assertEqual(len(buf), 1024 * 1024)
self.assertEqual(buf, "a" * 1024 * 1024)
self.assertEqual(remaining, 0)
remote_data_conn.close()
class BadRequestTests(helper.CPWebCase):
def test_No_CRLF(self):
self.persistent = True
conn = self.HTTP_CONN
conn.send('GET /hello HTTP/1.1\n\n')
response = conn.response_class(conn.sock, method="GET")
response.begin()
self.body = response.read()
self.assertBody("HTTP requires CRLF terminators")
conn.close()
conn.connect()
conn.send('GET /hello HTTP/1.1\r\n\n')
response = conn.response_class(conn.sock, method="GET")
response.begin()
self.body = response.read()
self.assertBody("HTTP requires CRLF terminators")
conn.close()
if __name__ == "__main__":
helper.testmain()
| [
"matthias@grawinkel.com"
] | matthias@grawinkel.com |
d991e3d10bdb51337597176552296c3f3c534922 | 526176649fc3d37c87c06626a2e8fcb1cc840bf0 | /workshop/design3.py | 11374fab1b3f72e0bb5d5a2356529133a8583d9a | [] | no_license | rames4498/Bootcamps_and_workshops | cd193bb302f4b2ed9037750b07e35f6875415476 | 402ef143be7a52ae71e08cdf8b7f0ff35d502455 | refs/heads/master | 2022-09-22T04:49:10.657585 | 2022-09-13T07:06:36 | 2022-09-13T07:06:36 | 239,116,561 | 9 | 6 | null | null | null | null | UTF-8 | Python | false | false | 192 | py | from turtle import *
colors = ['red', 'purple', 'blue', 'green', 'yellow', 'orange']
for x in range(360):
pencolor(colors[x % 6])
width(x / 100 + 1)
forward(x)
left(59)
| [
"noreply@github.com"
] | noreply@github.com |
3ca015b9c9648f6db4d24057a4221f7acb6609ab | 1507348aa7121a1dd8d7af07b9bed6599d493b33 | /details.py | e5aa18850df2a6aa790cc8d6fb4feedbf2fad957 | [
"MIT"
] | permissive | naresh-bachwani/NITH_RESULTS_Portal | f0cc06e9ab36e5e123db9b73ff27fc36d1021ffe | 4748cddcb605ae9820c2a8ca188a7dd5fd029511 | refs/heads/master | 2020-04-11T19:03:41.136576 | 2018-12-16T16:41:40 | 2018-12-16T16:41:40 | 162,020,672 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,426 | py | # Import required packages
from kivy.app import App
from kivy.uix.boxlayout import BoxLayout
from kivy.properties import ObjectProperty
from kivy.core.window import Window
from kivy.uix.listview import ListItemButton
# import dbraw file
from dbraw import *
# type of plot(SGPI OR CGPI)
global plot_type
plot_type=[]
# list of roll nos
roll_nos=[]
second_roll_nos=[]
class ListButton(ListItemButton):
pass
class Details(BoxLayout):
# Connects the value in the TextInput widget to these fields
second_roll_no_text_input = ObjectProperty()
roll_no_text_input=ObjectProperty()
group_list = ObjectProperty()
# sgpi radio button's functionality
sgpi_active = ObjectProperty(True)
def sgpi_clicked(self,instance,value):
if value is True:
plot_type.append("SGPI")
else:
plot_type.remove("SGPI")
# cgpi radio button's functionality
cgpi_active = ObjectProperty(False)
def cgpi_clicked(self,instance,value):
if value is True:
plot_type.append("CGPI")
else:
plot_type.remove("CGPI")
#when compare button is hit
def submit_member(self):
roll_nos=[]
second_roll_nos=[]
# Get first roll no from the TextInputs
roll_no = self.roll_no_text_input.text
# Get second roll no from the TextInputs
second_roll_no = self.second_roll_no_text_input.text
# Add the first roll no to roll_nos
roll_nos.append(roll_no)
# check if second roll no text input is empty
if(second_roll_no!=""):
second_roll_nos.append(second_roll_no)
# call main function of dbraw
mainfunc(roll_nos,second_roll_nos,plot_type)
# when compare to new button is hit
def delete_member(self, *args):
second_roll_nos=[]
# clear second roll no text input
self.second_roll_no_text_input.text=""
#when exit button is hit
def Exit_app(self):
#stop the app
App.get_running_app().stop()
Window.close() # Closes the window
class DetailsApp(App):
def build(self):
return Details()
# Create the instance of the class DetailsApp
dbApp = DetailsApp()
dbApp.run() # Running the app
exit() # Exits the process
| [
"nbnb76543@gmail.com"
] | nbnb76543@gmail.com |
22851ce7e83e2aef32c5620caf346fae7a63488a | e2f507e0b434120e7f5d4f717540e5df2b1816da | /097-yield-2.py | e7bd0b03ff61e85bbac2470ad044513187273938 | [] | no_license | ash/amazing_python3 | 70984bd32ae325380382b1fe692c4b359ef23395 | 64c98940f8a8da18a8bf56f65cc8c8e09bd00e0c | refs/heads/master | 2021-06-23T14:59:37.005280 | 2021-01-21T06:56:33 | 2021-01-21T06:56:33 | 182,626,874 | 76 | 25 | null | null | null | null | UTF-8 | Python | false | false | 330 | py | # Using yield
def f():
for i in range(100):
yield i # not "return"
# Why do you need this assignment?
g1 = f() # generator object
g2 = f() # another generator
print(next(g1)) # value from generator 1
print(next(g2)) # from generator 2
print(next(g1)) # again 1
print(next(g2)) # ...
print(next(g1))
print(next(g2))
| [
"andy@shitov.ru"
] | andy@shitov.ru |
b4f738393d2222e9668e9e7f689cb0733806ef01 | 87dc1f3fc40565138c1e7dc67f1ca7cb84b63464 | /03_Hard/10_Knapsack_Problem/Knapsack_Problem.py | 4151b0f9027ccdcdfa7f6ccba3270994d39e40ac | [] | no_license | CodeInDna/Algo_with_Python | 8424f79fd3051dbc5861ba171ac2b33c76eec8b9 | a238e9e51effe76c530a4e0da7df871e45ec268a | refs/heads/master | 2021-07-25T08:33:42.475255 | 2021-07-20T16:53:48 | 2021-07-20T16:53:48 | 229,921,183 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,045 | py | # ---------------------------------- PROBLEM 10 (HARD)--------------------------------------#
# Knapsack Problem
# You are given an array of arrays. Each subarray in this array holds two integer values and
# represents an item; the first integer is the item's value, and the second integer is the item's
# weight. You are also given an integer representing the maximum capacity of a knapsack that you have.
# Your goal is to fit items in your knapsack, all the while maximizing their combined value. Note that
# the sum of the weights of the items that you pick cannot exceed the knapsack's capacity. Write a
# function that returns the maximized combined value of the items that you should pick, as well as an
# array of the indices of each item picked. Assume that there will only be one combination of items
# that maximizes the total value in the knapsack.
# Sample input: [[1, 2], [4, 3], [5, 6], [6, 7]], 10
# Sample output: [10, [1, 3]]
# ----------------METHOD 01---------------------#
# COMPLEXITY = TIME: O(Nc), SPACE: O(Nc), where N i the number of items and c is the capacity
def knapsackProblem(lst_Items, target_cap):
knapsackValues = [[0 for _ in range(target_cap + 1)] for _ in range(len(lst_Items) + 1)]
for i in range(1, len(lst_Items) + 1):
currentWeight = lst_Items[i - 1][1]
currentValue = lst_Items[i - 1][0]
for cap in range(target_cap + 1):
if currentWeight > cap:
knapsackValues[i][cap] = knapsackValues[i - 1][cap]
else:
knapsackValues[i][cap] = max(knapsackValues[i - 1][cap], knapsackValues[i - 1][cap - currentWeight] + currentValue)
return [knapsackValues[-1][-1], getKnapsackItems(knapsackValues, lst_Items)]
def getKnapsackItems(knapsackValues, items):
result = []
i = len(knapsackValues) - 1
c = len(knapsackValues[0]) - 1
while i > 0:
if knapsackValues[i][c] == knapsackValues[i - 1][c]:
i -= 1
else:
result.append(i - 1)
c -= items[i - 1][1]
i -= 1
if c == 0:
break
return list(reversed(result))
# ----------------METHOD 01---------------------#
| [
"ykarelia323@gmail.com"
] | ykarelia323@gmail.com |
1770d51d9366b71a8431fa338d98a9ae24c9472a | 45916bcec19f06e952d8dc5b690c5a40ac6d50d3 | /faculty/urls.py | e4b0c58a82a5a1029fb087bbd4106669218a2706 | [] | no_license | Rohitmotri/jcer-rest-api | 7eec30693bc4229bf5f67b7fefa3a6cdf6b86a89 | 934326ef3cb225953a600eb9544eb0ec3548ab45 | refs/heads/main | 2023-01-06T01:53:56.860843 | 2020-10-28T16:02:29 | 2020-10-28T16:02:29 | 306,625,925 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 520 | py | from django.urls import path
from faculty.controller.controller import (CreateAdminController,
GetAdminController,
UpdateAdminController,
DeleteAdminController)
urlpatterns = [
path('create/', CreateAdminController.as_view()),
path('get/', GetAdminController.as_view()),
path('update/', UpdateAdminController.as_view()),
path('delete/', DeleteAdminController.as_view()),
]
| [
"rohitarunmotri@gmail.com"
] | rohitarunmotri@gmail.com |
3fb1d9e3ea37cbf120d2ade96202f33e483e029e | c832a78eb1cc4a4c7d5b82d1e3e29af1ceac4091 | /venv/bin/pyrsa-sign | eac0d7802ea70c495c657b75b8007ec3bc4080d3 | [] | no_license | scottwedge/reporting-system | 42b5a48a42251bc290bb474b0bd244ed0ee13ed4 | 59bb703824ca08dfac1550c1e28fc3bc1028e123 | refs/heads/master | 2022-12-25T13:21:04.059774 | 2020-09-24T03:24:39 | 2020-09-24T03:24:39 | 298,155,594 | 0 | 0 | null | 2020-09-24T03:07:57 | 2020-09-24T03:07:56 | null | UTF-8 | Python | false | false | 256 | #!/Users/loctek/Downloads/reporting-system-master/venv/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from rsa.cli import sign
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(sign())
| [
"zh"
] | zh | |
5fd75c0e6a3071d2c1d75effabeef72c4de692b4 | a580e56c8d2c62278549f91b61721169d1f8569c | /dojoc.py | 64592ff24fa74f357aa9ce0742887ee9807effd6 | [] | no_license | GustavoKatel/dojoc | c96f5e1c3ba02a02f563d765687bada8788faa55 | c829091775555c50a90fbc84b9039b0ff101411f | refs/heads/master | 2020-04-09T07:45:11.798663 | 2013-11-21T18:46:25 | 2013-11-21T18:46:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,397 | py | #!/usr/bin/python
# Author: Gustavo Brito (GustavoKatel)
import sys, os, re, subprocess
from dojoc_test import DojoCTest
class bcolors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
class DojoC:
types = { "%s":"char *", "%d":"int", "%c":"char", "%f":"float" }
default_values = { "%s":"\"None\"", "%d":"0", "%c":"0", "%f":"0f" }
return_type = "" # must be in types
params = [] # each element must be in types
test_cases = []
def __init__(self, unit, debugMode):
self.unit = unit
self.testfile = unit+".test"
if not os.path.exists(self.testfile):
print self.testfile+" not found!"
sys.exit(1)
self.sys_dir = os.path.dirname(os.path.realpath(__file__))
self.debugMode = debugMode
self.processTestFile()
def new(self):
# create initial file
ufile = open(self.sys_dir+ os.sep + "unit.c.model")
unitfile = "".join(ufile.readlines())
ufile.close()
unitfile = unitfile.replace("%unit%", self.unit)
unitfile = unitfile.replace("%return_type%", self.types[self.return_type] )
unitfile = unitfile.replace("%default_ret%", self.default_values[self.return_type] )
plist = ""
for i in range(len(self.params)):
plist = plist + self.types[self.params[i]] + " p"+str(i)
if i<len(self.params)-1:
plist = plist+", "
unitfile = unitfile.replace("%params%", plist)
ufile = open(self.unit+".c", "w")
ufile.write(unitfile)
ufile.close()
def test(self):
# create Makefile
mfile = open(self.sys_dir+ os.sep + "Makefile.model" )
makefile = "".join(mfile.readlines())
mfile.close()
makefile = makefile.replace("%unit%", self.unit)
mfile = open("Makefile", "w")
mfile.write(makefile)
mfile.close()
# create compilable file
cfile = open(self.unit+".c")
content = "".join(cfile.readlines())
cfile.close()
content = content+ "\nint main(int argc, char **argv)\n{\n\n%test%\n\n\treturn 0;\n}\n"
# inject the test cases
teststr = ""
for test in self.test_cases:
fn = test.getTestFunc()
fn = "printf(\""+self.return_type+"\\n\", "+fn+");\n"
teststr = teststr + fn
content = content.replace("%test%", teststr)
cfile = open("."+self.unit+".c", "w")
cfile.write(content)
cfile.close()
# compile
ret = os.system("make")
if ret>0:
print bcolors.FAIL+ "NOT PASS" + bcolors.ENDC
sys.exit(1)
# execute
output = subprocess.check_output("./"+self.unit)
lines = output.split("\n")
for i in range(len(lines)):
val = lines[i]
if val=="" or val=="\n":
continue
print "\n" + bcolors.HEADER + "Test #"+str(i) + bcolors.ENDC
print self.test_cases[i].toString()
if self.test_cases[i].test(val)==True:
print "Result: "+ bcolors.OKGREEN +"PASS" + bcolors.ENDC
else:
print "Result: "+ bcolors.FAIL + "NOT PASS" + bcolors.ENDC
print "Expected: "+str(self.test_cases[i].getReturnVal())
print "Received: "+str(val)
sys.exit(1)
print "----------------------------------------"
if not self.debugMode:
subprocess.check_output("make clean-no-exe", shell=True)
def _getline(self, tfile):
line = tfile.readline()
# remove comments
if line.find("#")>=0:
line = line[:line.find("#")]
return line
def processTestFile(self):
# process the mask pattern
tfile = open(self.testfile)
pattern_types = self._getline(tfile) # tfile.readline()
regex = "((%[a-z]),)*(%[a-z])=(%[a-z])"
pattern_types = pattern_types.replace(" ", "")
m = re.match(regex, pattern_types)
items = m.groups()
for i in range(len(items)-1):
if i==0 or items[i]==None:
continue
self.params.append(items[i])
self.return_type = items[i+1]
# process the test cases
line = "ignore me!"
while not line=="":
line = self._getline(tfile) # tfile.readline()
if line.strip()=="":
continue
line = "(["+line[:-1]+")"
line = line.replace("=", "],")
line = "(params, return_val) = "+line
exec(line)
self.test_cases.append(DojoCTest(self.unit, params,return_val))
tfile.close()
if __name__=="__main__":
if len(sys.argv)>=3:
operador = sys.argv[1]
unidade = sys.argv[2]
debug = False
if len(sys.argv)>=4:
if sys.argv[3]=="debug":
debug = True
dojoc = DojoC(unidade, debug)
if operador=="new":
dojoc.new()
elif operador=="test":
dojoc.test()
else:
print "python dojoc.py [new|test] unitName"
| [
"gustavobs.katel@gmail.com"
] | gustavobs.katel@gmail.com |
0f4debbac5c41257777ec1da622fbc2f30e437f7 | 81b384655e970623333971ed063d85ebfe940ed5 | /hallo/modules/furry.py | e0c6e24dcdc8f58336a441e2edb56cbd20652d5e | [] | no_license | wirenic/Hallo | c3c8a3f11dd1f03729385f2761e0a6b216c6e1d2 | 68595816fd146c4af35e3f1bc91c58cdc6fa741c | refs/heads/master | 2023-03-18T06:50:14.111226 | 2021-02-26T07:59:33 | 2021-02-26T07:59:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 30,300 | py | from hallo.function import Function
from hallo.inc.commons import Commons
class E621(Function):
"""
Returns a random image from e621
"""
def __init__(self):
"""
Constructor
"""
super().__init__()
# Name for use in help listing
self.help_name = "e621"
# Names which can be used to address the function
self.names = {"e621"}
# Help documentation, if it's just a single line, can be set here
self.help_docs = "Returns a random e621 result using the search you specify. Format: e621 <tags>"
def run(self, event):
search_result = self.get_random_link_result(event.command_args)
if search_result is None:
return event.create_response("No results.")
else:
link = "https://e621.net/posts/{}".format(search_result["id"])
if search_result["post"]["rating"] == "e":
rating = "(Explicit)"
elif search_result["post"]["rating"] == "q":
rating = "(Questionable)"
elif search_result["post"]["rating"] == "s":
rating = "(Safe)"
else:
rating = "(Unknown)"
line_response = event.command_args.strip()
return event.create_response(
'e621 search for "{}" returned: {} {}'.format(
line_response, link, rating
)
)
def get_random_link_result(self, search):
"""Gets a random link from the e621 api."""
line_clean = search.replace(" ", "%20")
url = "https://e621.net/posts.json?tags=order:random%20score:%3E0%20{}%20&limit=1".format(
line_clean
)
return_list = Commons.load_url_json(url)
if len(return_list["posts"]) == 0:
return None
else:
result = return_list["posts"][0]
return result
class RandomPorn(Function):
"""
Returns a random explicit image from e621
"""
def __init__(self):
"""
Constructor
"""
super().__init__()
# Name for use in help listing
self.help_name = "random porn"
# Names which can be used to address the function
self.names = {"random porn", "randomporn"}
# Help documentation, if it's just a single line, can be set here
self.help_docs = (
"Returns a random explicit e621 result using the search you specify. "
"Format: random porn <tags>"
)
def run(self, event):
line_unclean = "{} -rating:s".format(event.command_args.strip())
function_dispatcher = event.server.hallo.function_dispatcher
e621_class = function_dispatcher.get_function_by_name("e621")
e621_obj = function_dispatcher.get_function_object(e621_class) # type: E621
search_result = e621_obj.get_random_link_result(line_unclean)
if search_result is None:
return event.create_response("No results.")
else:
link = "https://e621.net/posts/{}".format(search_result["id"])
if search_result["post"]["rating"] == "e":
rating = "(Explicit)"
elif search_result["post"]["rating"] == "q":
rating = "(Questionable)"
elif search_result["post"]["rating"] == "s":
rating = "(Safe)"
else:
rating = "(Unknown)"
line_response = event.command_args.strip()
return event.create_response(
'e621 search for "{}" returned: {} {}'.format(
line_response, link, rating
)
)
class Butts(Function):
"""
Returns a random butt from e621
"""
def __init__(self):
"""
Constructor
"""
super().__init__()
# Name for use in help listing
self.help_name = "butts"
# Names which can be used to address the function
self.names = {"random butt", "butts", "butts!", "butts."}
# Help documentation, if it's just a single line, can be set here
self.help_docs = (
'Returns a random image from e621 for the search "butt". Format: butts'
)
def run(self, event):
function_dispatcher = event.server.hallo.function_dispatcher
e621_class = function_dispatcher.get_function_by_name("e621")
e621_obj = function_dispatcher.get_function_object(e621_class) # type: E621
search_result = e621_obj.get_random_link_result("butt")
if search_result is None:
return event.create_response("No results.")
else:
link = "https://e621.net/posts/{}".format(search_result["id"])
if search_result["post"]["rating"] == "e":
rating = "(Explicit)"
elif search_result["post"]["rating"] == "q":
rating = "(Questionable)"
elif search_result["post"]["rating"] == "s":
rating = "(Safe)"
else:
rating = "(Unknown)"
return event.create_response(
'e621 search for "butt" returned: {} {}'.format(link, rating)
)
class Fursona(Function):
"""
Generates a random fursona
"""
def __init__(self):
"""
Constructor
"""
super().__init__()
# Name for use in help listing
self.help_name = "fursona"
# Names which can be used to address the function
self.names = {"fursona", "sona", "random fursona", "random sona"}
# Help documentation, if it's just a single line, can be set here
self.help_docs = "Generates your new fursona. Format: fursona"
def run(self, event):
adjective = [
"eldritch",
"neon green",
"angelic",
"ghostly",
"scene",
"emo",
"hipster",
"alien",
"sweaty",
"OBSCENELY BRIGHT YELLOW",
"spotted",
"hairy",
"glowing",
"pastel pink",
"glittering blue",
"golden",
"shimmering red",
"robotic",
"black",
"goth",
"elegant",
"white",
"divine",
"striped",
"radioactive",
"red and green",
"slimy",
"slime",
"garbage",
"albino",
"skeleton",
"petite",
"swamp",
"aquatic",
"vampire",
"bright pink and yellow",
"mossy",
"stone",
"gray",
"fairy",
"zombie",
"pastel",
"mint green",
"giant",
"big pink",
"tiny pink",
"big white",
"tiny white",
"tiny black",
"translucent",
"glistening",
"glittering black",
"shimmering white",
"iridescent",
"glass",
"silver",
"jewel-encrusted",
"fuschia",
"purple",
"tiny purple",
"lilac",
"lavender",
"shimmering lilac",
"sparkling purple",
"tiny blue",
"heavenly",
"gilded",
"holy",
"blue and white striped",
"black and orange spotted",
"black and red",
"black and orange",
"ancient",
"green",
"purple and blue",
"pink and blue",
"candy",
"abyssal",
"floral",
"candle",
"melanistic",
"punk",
"ethereal",
"unholy",
"celestial",
"cyan",
"cream",
"cream and pink",
"cream and brown",
"yellow",
"black and pink",
"magenta",
"speckled",
"tiger-striped",
"chocolate",
"pastel goth",
"vintage",
"glossy black",
"glossy white",
"glossy gray",
"glossy blue",
"glossy pink",
"shimmery gray",
"glossy yellow",
"magma",
"plastic",
"leucistic",
"piebald",
]
animal = [
"kestrel.",
"goat.",
"sheep.",
"dragon.",
"platypus.",
"blobfish.",
"hydra.",
"wolf.",
"fox.",
"sparkledog.",
"cow.",
"bull.",
"cat.",
"tiger.",
"panther.",
"hellhound.",
"spider.",
"beagle.",
"pomeranian.",
"whale.",
"hammerhead shark.",
"snake.",
"hyena.",
"lamb.",
"pony.",
"horse.",
"pup.",
"swan.",
"pigeon.",
"dove.",
"fennec fox.",
"fish.",
"rat.",
"possum.",
"hamster.",
"deer.",
"elk.",
"reindeer.",
"cheetah.",
"ferret.",
"bear.",
"panda.",
"koala.",
"kangaroo.",
"skink.",
"lizard.",
"iguana.",
"cerberus.",
"turtle.",
"raven.",
"cardinal.",
"bluejay.",
"antelope.",
"buffalo.",
"rabbit.",
"bunny.",
"frog.",
"newt.",
"salamander.",
"cobra.",
"coyote.",
"jellyfish.",
"bee.",
"wasp.",
"dinosaur.",
"bat.",
"worm.",
"chicken.",
"eel.",
"tiger.",
"sloth.",
"seal.",
"vulture.",
"barghest.",
"hedgehog.",
"peacock.",
"anglerfish.",
"dolphin.",
"liger.",
"llama.",
"alpaca.",
"walrus.",
"mantis.",
"ladybug.",
"penguin.",
"flamingo.",
"civet.",
"pudu.",
"crab.",
"maine coon.",
"fawn.",
"siamese.",
"amoeba.",
"owl.",
"unicorn.",
"crocodile.",
"alligator.",
"chihuahua.",
"great dane.",
"dachshund.",
"corgi.",
"rooster.",
"sparrow.",
"wyrm.",
"slug.",
"snail.",
"seagull.",
"badger.",
"gargoyle.",
"scorpion.",
"boa.",
"axolotl.",
]
description1 = [
"it constantly drips with a tar-like black substance.",
"it enjoys performing occult rituals with friends.",
"it is a communist.",
"a golden halo floats above its head.",
"it wears a mcdonalds uniform because it works at mcdonalds.",
"it carries a nail bat.",
"it wears louboutin heels.",
"it has two heads.",
"it has an unknowable amount of eyes.",
"it drools constantly.",
"its tongue is bright green.",
"it has numerous piercings.",
"it is a cheerleader.",
"it is a farmhand.",
"when you see it you are filled with an ancient fear.",
"it wears a toga.",
"it is made of jelly.",
"it has incredibly long and luxurious fur.",
"it uses reddit but won't admit it.",
"it glows softly and gently- evidence of a heavenly being.",
"it is a ghost.",
"it dresses like a greaser.",
"crystals grow from its flesh.",
"it rides motorcycles.",
"it wears incredibly large and impractical sunglasses.",
"it instagrams its starbucks drinks.",
"it is a hired killer.",
"where its tail should be is just another head.",
"it dwells in a bog.",
"it is wet and dripping with algae.",
"it runs a blog dedicated to different types of planes throughout history.",
"it worships the moon.",
"it comes from a long line of royalty.",
"it frolics in flowery meadows.",
"it wears a ballerina's outfit.",
"it wears a neutral milk hotel t-shirt with red fishnets and nothing else.",
"it wears a lot of eye makeup.",
"it won't stop sweating.",
"it has far too many teeth and they are all sharp.",
"it is a tattoo artist.",
"it is shaking.",
"it is a witch.",
"it wears scarves all the time.",
"to look into its eyes is to peer into a distant abyss.",
"mushrooms grow from its skin.",
"its face is actually an electronic screen.",
"it loves to wear combat boots with cute stickers all over them.",
"it comes from space.",
"it is a knife collector.",
"it flickers in and out of this plane of reality.",
"it wishes it were a butt.",
"its eyes are red.",
"it is the most beautiful thing you have ever seen.",
"it loves strawberry milkshakes.",
"it cries all the time and can't really do much about it.",
"it lives alone in a dense forgotten wilderness.",
"it wears big christmas sweaters year-round.",
"it floats about a foot off of the ground.",
"it loves trash.",
"it has demonic wings.",
"it has a cutie mark of a bar of soap.",
"it is melting.",
"it wears opulent jewelry of gold and gemstones.",
"it has a hoard of bones.",
"it has ram horns.",
"it has a forked tongue.",
"it wears frilly dresses.",
"it has antlers.",
"it is a nature spirit.",
"its back is covered in candles which flicker ominously.",
"it wears a leather jacket with lots of patches.",
"it wears a snapback.",
"it has a tattoo that says 'yolo'.",
"electricity flickers through the air surrounding it.",
"it is a fire elemental.",
"it consumes only blood.",
"it works at an adorable tiny bakery.",
"it is a professional wrestler.",
"instead of eyes there are just more ears.",
"it speaks a forgotten and ancient language both disturbing and enchanting to mortal ears.",
"it works out.",
"it wishes it were a tree.",
"it is always blushing.",
"it uses ancient and powerful magic.",
"it loves raw meat.",
"it is always smiling.",
"it can fire lasers from its eyes.",
"a small rainbutt follows it everywhere.",
"it is made of glass.",
"fireflies circle it constantly.",
"it is always accompanied by glowing orbs of light.",
"it has human legs.",
"water drips from it constantly.",
"it has golden horns.",
"it loves gore.",
"it lives in a cave with its parents.",
"its purse costs more than most people's cars.",
"it always shivers even when it's not cold.",
"it has tentacles.",
"it never blinks.",
"it only listens to metal.",
"it wears a golden crown.",
"it wears a white sundress.",
"it has green hair pulled up into two buns.",
"its body is covered in occult sigils and runes which pulse ominously.",
"it loves to devour the rotting plant matter covering the forest floor.",
"it wears a plain white mask.",
"its eyes flash multiple colors rapidly.",
"it loves to wear nail polish but applies it messily.",
"it runs a jimmy carter fanblog.",
"it is a surfer.",
"it only wears hawaiian shirts.",
"everything it wears is made out of denim.",
"it has long braided hair.",
"it calls everybody comrade.",
"it lures men to their deaths with its beautiful voice.",
"it has braces.",
"it has full sleeve tattoos.",
"it dresses like a grandpa.",
"smoke pours from its mouth.",
"it is a makeup artist.",
"it dresses like a pinup girl.",
"it has only one large eye.",
"it plays the harp.",
"it has very long hair with many flowers in it.",
"it has a cyan buzzcut.",
"it is a garden spirit.",
"it has fangs capable of injecting venom.",
"numerous eyeballs float around it. watching. waiting.",
"it loves to play in the mud.",
"it wears a surgical mask.",
"its eyes are pitch black and cause those who look directly into them for too long to "
"slowly grow older.",
"it wears numerous cute hairclips.",
"it has a very large tattoo of the 'blockbuster' logo.",
"it is constantly covered in honey that drips on everything and pools beneath it.",
"it wears a cherry-themed outfit.",
"it has heterochromia.",
"it is heavily scarred.",
"in place of a head it has a floating cube that glows and pulses softly.",
"it seems to be glitching.",
"it does not have organs- instead it is full of flowers.",
"its insides are glowing.",
"it is a skateboarder.",
"it is a superwholock blogger.",
"it is a skilled glass-blower.",
"it has a pet of the same species as itself.",
"it is the leader of an association of villains.",
"it wears a black leather outfit.",
"its pupils are slits.",
"it wears a crop top with the word OATMEAL in all caps.",
"it only wears crop tops and high waisted shorts.",
"it is always giving everyone a suspicious look.",
"it has a septum piercing.",
"instead of talking it just says numbers.",
"it is an internet famous scene queen.",
"its eyes are way too big to be normal.",
"it has super obvious tan lines.",
"it wears a maid outfit.",
"it is an emissary from hell.",
"its eyes have multiple pupils in them.",
"it has an impractically large sword.",
"it is a magical girl.",
"it has a scorpion tail.",
"it is a biologist specializing in marine invertebrates.",
"it runs. everywhere. all the time.",
"it is an esteemed fashion designer for beings with 6 or more limbs.",
"it wears short shorts that say CLAM.",
"it can't stop knitting.",
"it is always coated in glitter.",
"it worships powerful dolphin deities.",
"it has slicked back hair.",
"it has a thick beard.",
"it has a long braided beard plaited with ribbons.",
"it is a viking.",
"it wears a parka.",
"its outfit is completely holographic.",
"it wears an oversized pearl necklace.",
"it has stubble.",
"it carries a cellphone with a ridiculous amount of charms and keychains.",
"it wears crocs.",
"it has a hoard of gems and gold that was pillaged from innocent villagers.",
"it robs banks.",
"its facial features are constantly shifting.",
"it works as a librarian in hell.",
"it wears a fedora.",
]
description2 = [
"it constantly drips with a tar-like black substance.",
"it enjoys performing occult rituals with friends.",
"it is a communist.",
"a golden halo floats above its head.",
"it wears a mcdonalds uniform because it works at mcdonalds.",
"it carries a nail bat.",
"it wears louboutin heels.",
"it has two heads.",
"it has an unknowable amount of eyes.",
"it drools constantly.",
"its tongue is bright green.",
"it has numerous piercings.",
"it is a cheerleader.",
"it is a farmhand.",
"when you see it you are filled with an ancient fear.",
"it wears a toga.",
"it is made of jelly.",
"it has incredibly long and luxurious fur.",
"it uses reddit but won't admit it.",
"it glows softly and gently- evidence of a heavenly being.",
"it is a ghost.",
"it dresses like a greaser.",
"crystals grow from its flesh.",
"it rides motorcycles.",
"it wears incredibly large and impractical sunglasses.",
"it instagrams its starbucks drinks.",
"it is a hired killer.",
"where its tail should be is just another head.",
"it dwells in a bog.",
"it is wet and dripping with algae.",
"it runs a blog dedicated to different types of planes throughout history.",
"it worships the moon.",
"it comes from a long line of royalty.",
"it frolics in flowery meadows.",
"it wears a ballerina's outfit.",
"it wears a neutral milk hotel t-shirt with red fishnets and nothing else.",
"it wears a lot of eye makeup.",
"it won't stop sweating.",
"it has far too many teeth and they are all sharp.",
"it is a tattoo artist.",
"it is shaking.",
"it is a witch.",
"it wears scarves all the time.",
"to look into its eyes is to peer into a distant abyss.",
"mushrooms grow from its skin.",
"its face is actually an electronic screen.",
"it loves to wear combat boots with cute stickers all over them.",
"it comes from space.",
"it is a knife collector.",
"it flickers in and out of this plane of reality.",
"it wishes it were a butt.",
"its eyes are red.",
"it is the most beautiful thing you have ever seen.",
"it loves strawberry milkshakes.",
"it cries all the time and can't really do much about it.",
"it lives alone in a dense forgotten wilderness.",
"it wears big christmas sweaters year-round.",
"it floats about a foot off of the ground.",
"it loves trash.",
"it has demonic wings.",
"it has a cutie mark of a bar of soap.",
"it is melting.",
"it wears opulent jewelry of gold and gemstones.",
"it has a hoard of bones.",
"it has ram horns.",
"it has a forked tongue.",
"it wears frilly dresses.",
"it has antlers.",
"it is a nature spirit.",
"its back is covered in candles which flicker ominously.",
"it wears a leather jacket with lots of patches.",
"it wears a snapback.",
"it has a tattoo that says 'yolo'.",
"electricity flickers through the air surrounding it.",
"it is a fire elemental.",
"it consumes only blood.",
"it works at an adorable tiny bakery.",
"it is a professional wrestler.",
"instead of eyes there are just more ears.",
"it speaks a forgotten and ancient language both disturbing and enchanting to mortal ears.",
"it works out.",
"it wishes it were a tree.",
"it is always blushing.",
"it uses ancient and powerful magic.",
"it loves raw meat.",
"it is always smiling.",
"it can fire lasers from its eyes.",
"a small rainbutt follows it everywhere.",
"it is made of glass.",
"fireflies circle it constantly.",
"it is always accompanied by glowing orbs of light.",
"it has human legs.",
"water drips from it constantly.",
"it has golden horns.",
"why is it always covered in blood?",
"it loves gore.",
"it lives in a cave with its parents.",
"its purse costs more than most people's cars.",
"it always shivers even when it's not cold.",
"it has tentacles.",
"it never blinks.",
"it only listens to metal.",
"it wears a golden crown.",
"it wears a white sundress.",
"it has green hair pulled up into two buns.",
"its body is covered in occult sigils and runes which pulse ominously.",
"it loves to devour the rotting plant matter covering the forest floor.",
"it wears a plain white mask.",
"its eyes flash multiple colors rapidly.",
"you are afraid.",
"it loves to wear nail polish but applies it messily.",
"it runs a jimmy carter fanblog.",
"it is a surfer.",
"it only wears hawaiian shirts.",
"everything it wears is made out of denim.",
"it has long braided hair.",
"it calls everybody comrade.",
"it lures men to their deaths with its beautiful voice.",
"it has braces.",
"it has full sleeve tattoos.",
"it dresses like a grandpa.",
"smoke pours from its mouth.",
"it is a makeup artist.",
"it dresses like a pinup girl.",
"it has only one large eye.",
"it plays the harp.",
"it has very long hair with many flowers in it.",
"it has a cyan buzzcut.",
"it is a garden spirit.",
"it has fangs capable of injecting venom.",
"numerous eyeballs float around it. watching. waiting.",
"it loves to play in the mud.",
"it wears a surgical mask.",
"its eyes are pitch black and cause those who look directly into them for too long to "
"slowly grow older.",
"it wears numerous cute hairclips.",
"it has a very large tattoo of the 'blockbuster' logo.",
"it is constantly covered in honey that drips on everything and pools beneath it.",
"it wears a cherry-themed outfit.",
"it has heterochromia.",
"it is heavily scarred.",
"in place of a head it has a floating cube that glows and pulses softly.",
"it seems to be glitching.",
"its insides are glowing.",
"it does not have organs- instead it is full of flowers.",
"it is a skateboarder.",
"it is a superwholock blogger.",
"it is a skilled glass-blower.",
"it has a pet of the same species as itself.",
"it is the leader of an association of villains.",
"it wears a black leather outfit.",
"its pupils are slits..",
"it wears a crop top with the word OATMEAL in all caps.",
"it only wears crop tops and high waisted shorts.",
"it is always giving everyone a suspicious look.",
"it has a septum piercing.",
"its hair is beehive style. not an actual beehive.",
"instead of talking it just says numbers.",
"it has a halo. over its ass.",
"it is an internet famous scene queen.",
"its eyes are way too big to be normal.",
"it has super obvious tan lines.",
"it wears a maid outfit.",
"it is an emissary from hell.",
"its eyes have multiple pupils in them.",
"there are scorpions everywhere.",
"it has an impractically large sword.",
"it is a magical girl.",
"it has a scorpion tail.",
"it is a biologist specializing in marine invertebrates.",
"it runs. everywhere. all the time.",
"it is an esteemed fashion designer for beings with 6 or more limbs.",
"it wears short shorts that say CLAM.",
"it can't stop knitting.",
"it is always coated in glitter.",
"it worships powerful dolphin deities.",
"it has slicked back hair.",
"it has a thick beard.",
"it has a long braided beard plaited with ribbons.",
"it is a viking.",
"it wears a parka.",
"its outfit is completely holographic.",
"it wears an oversized pearl necklace.",
"it has stubble.",
"it carries a cellphone with a ridiculous amount of charms and keychains.",
"Welcome to Hell! Welcome to Hell!",
"it wears crocs.",
"it has a hoard of gems and gold that was pillaged from innocent villagers.",
"it robs banks and its partner in crime is the next fursona you generate.",
"its facial features are constantly shifting.",
"it works as a librarian in hell.",
"it wears a fedora.",
]
result = "Your new fursona is: {} {} {} {}".format(
Commons.get_random_choice(adjective)[0],
Commons.get_random_choice(animal)[0],
Commons.get_random_choice(description1)[0],
Commons.get_random_choice(description2)[0],
)
return event.create_response(result)
| [
"joshua@coales.co.uk"
] | joshua@coales.co.uk |
fd3c08bf7ff6f7e08533bb9129c270b01118af5b | 565c69222d1736f98cf83c1a4e2d88416091c0cf | /tests/test_sort_complex_number.py | 1277de1278615bff19345d2f440b4cf6729cf30d | [] | no_license | lnarasim/250_problems | 466e9ac0a8845eb83c275591992552c4c143645e | 561efe86f06aad8b2ce4b7659323be4d730bde3d | refs/heads/master | 2022-11-05T15:24:31.493816 | 2020-06-04T16:56:46 | 2020-06-04T16:56:46 | 264,413,351 | 0 | 1 | null | 2020-06-04T16:56:48 | 2020-05-16T10:33:28 | Python | UTF-8 | Python | false | false | 1,170 | py | from pyproblems.complex_number_sorter import sort_complex_numbers
import pytest
def test_sort_complex_number():
assert sort_complex_numbers(4+5j, 5+8j,3j,4) == (3j, (4+5j), 4, (5+8j))
assert sort_complex_numbers(1,2,4,5) == (1,2,4,5)
assert sort_complex_numbers(1+4j,2j,4.5 + 3j,5j) == (2j,5j,1+4j,4.5+3j)
assert sort_complex_numbers(1+2j,1.5+9j,2.5+7j,2+4j,1+8j) == (1+2j,1+8j,1.5+9j,2+4j,2.5+7j)
assert sort_complex_numbers(-1-2j,4-5j,3j) == (-1-2j,3j,4-5j)
assert sort_complex_numbers() == ()
def test_sort_complex_number_errors_1():
with pytest.raises(TypeError):
sort_complex_numbers(True,5+8j,3j,4)
def test_sort_complex_number_errors_2():
with pytest.raises(TypeError):
sort_complex_numbers(False,5+8j,-2+3j,4-6j)
def test_sort_complex_number_errors_3():
with pytest.raises(TypeError):
sort_complex_numbers([True,5+8j,3j,4])
def test_sort_complex_number_errors_4():
with pytest.raises(TypeError):
sort_complex_numbers((1+2j,1.5+9j),[2.5+7j,2+4j],1+8j)
def test_sort_complex_number_errors_5():
with pytest.raises(TypeError):
sort_complex_numbers({-1-2j,4-5j,3j})
| [
"m.jmsudhan@gmail.com"
] | m.jmsudhan@gmail.com |
2e445e4f56c622f6f5d41a6de407c6c9d92f5b20 | 83b8b30ebb633eecd29ca0a7a20cc43a293c9333 | /tests/basics/subclass_native2_list.py | 9ad0b77ef6dd1c7659097492eec0ebb77099b017 | [
"MIT",
"GPL-1.0-or-later"
] | permissive | adafruit/circuitpython | 430ec895149d1eb814b505db39b4977a35ee88a7 | 506dca71b0cbb7af749bb51f86b01021db5483b3 | refs/heads/main | 2023-08-21T16:30:46.781068 | 2023-08-20T00:39:44 | 2023-08-20T00:39:44 | 66,166,069 | 3,806 | 1,560 | MIT | 2023-09-14T19:23:51 | 2016-08-20T20:10:40 | C | UTF-8 | Python | false | false | 587 | py | class Base1:
def __init__(self, *args):
print("Base1.__init__", args)
class Clist1(Base1, list):
pass
a = Clist1()
print(len(a))
# Not compliant - list assignment should happen in list.__init__, which is not called
# because there's Base1.__init__, but we assign in list.__new__
#a = Clist1([1, 2, 3])
#print(len(a))
print("---")
class Clist2(list, Base1):
pass
# Not compliant - should call list.__init__, but we don't have it
#a = Clist2()
#print(len(a))
# Not compliant - should call list.__init__, but we don't have it
#a = Clist2([1, 2, 3])
#print(len(a))
| [
"pfalcon@users.sourceforge.net"
] | pfalcon@users.sourceforge.net |
70df8c9f735c4c5148e1742989ea63e29fb574e9 | 32f624b68b1f9251321ddee4d40fc00c834c46fa | /ShortMe/src/shortener/models.py | 16e776e6c3d29f0d382fbf58c414c0578c9714b9 | [] | no_license | AnkurDahama/ShortMe | 8a03c2ab533924d34a59c18235740bd61b8ee53b | 165962816452ad8a7b8697157002afff37d5309c | refs/heads/master | 2021-01-23T14:06:42.755734 | 2018-04-11T06:21:38 | 2018-04-11T06:21:38 | 93,242,052 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,118 | py | from __future__ import unicode_literals
from django.db import models
# Create your models here.
from .utils import code_generator, create_shortcode
class ShortMeManager(models.Manager):
def all(self, *args, **kwargs):
qs_main = super(ShortMeManager, self).all(*args, **kwargs)
qs = qs_main.filter(active=True)
return qs
def refresh_shortcodes(self):
qs = ShortMeURL.objects.filter(id__gte=1)
new_codes = 0
for q in qs:
q.shortcode = create_shortcode(q)
q.save()
new_codes += 1
return "New codes made: {i}".format(i = new_codes)
class ShortMeURL(models.Model):
url = models.CharField(max_length=220, )
shortcode = models.CharField(max_length=15, unique=True, blank= True)
active = models.BooleanField(default=True)
objects = ShortMeManager()
def save(self, *args, **kwargs):
if self.shortcode is None or self.shortcode == "":
self.shortcode = create_shortcode(self)
super(ShortMeURL, self).save(*args, **kwargs)
def __str__(self):
return str(self.url)
| [
"ankur.dahama@student.tut.fi"
] | ankur.dahama@student.tut.fi |
9ba139bcaa98b5c19be7ed4307c47d44abe13cff | 2db1a0038d26ccb6adc572b536cb5cd401fd7498 | /tryTen/Lib/site-packages/setuptools/py31compat.py | 0f1753a87be81de04522e4b1d674aee34dfb2e8c | [] | no_license | syurk/labpin | e795c557e7d7bcd4ff449cb9a3de32959a8c4968 | 04070dd5ce6c0a32c9ed03765f4f2e39039db411 | refs/heads/master | 2022-12-12T02:23:54.975797 | 2018-11-29T16:03:26 | 2018-11-29T16:03:26 | 159,692,630 | 0 | 1 | null | 2022-11-19T12:15:55 | 2018-11-29T16:04:20 | Python | UTF-8 | Python | false | false | 1,646 | py | import sys
import unittest
__all__ = ['get_config_vars', 'get_path']
try:
# Python 2.7 or >=3.2
from sysconfig import get_config_vars, get_path
except ImportError:
from distutils.sysconfig import get_config_vars, get_python_lib
def get_path(name):
if name not in ('platlib', 'purelib'):
raise ValueError("Name must be purelib or platlib")
return get_python_lib(name == 'platlib')
try:
# Python >=3.2
from tempfile import TemporaryDirectory
except ImportError:
import shutil
import tempfile
class TemporaryDirectory(object):
"""
Very simple temporary directory context manager.
Will try to delete afterward, but will also ignore OS and similar
errors on deletion.
"""
def __init__(self):
self.name = None # Handle mkdtemp raising an exception
self.name = tempfile.mkdtemp()
def __enter__(self):
return self.name
def __exit__(self, exctype, excvalue, exctrace):
try:
shutil.rmtree(self.name, True)
except OSError: # removal errors are not the only possible
pass
self.name = None
unittest_main = unittest.main
_PY31 = (3, 1) <= sys.version_info[:2] < (3, 2)
if _PY31:
# on Python 3.1, translate testRunner==None to TextTestRunner
# for compatibility with Python 2.6, 2.7, and 3.2+
def unittest_main(*args, **kwargs):
if 'testRunner' in kwargs and kwargs['testRunner'] is None:
kwargs['testRunner'] = unittest.TextTestRunner
return unittest.main(*args, **kwargs)
| [
"syurk738@students.bju.edu"
] | syurk738@students.bju.edu |
8131876bb62c14f5294f1792f0da4b600f2e059c | f2983a63e3e402f55c33d8d634453467fff4201b | /src/thomfx/util/graph.py | 77555d084b9c2394258164b043574c680738f9f4 | [] | no_license | ThomPuiman/thomfx-trader | af748e2334bfaca5ac61eceecfa4503411077a0f | 5e22140bc06635f2f9e9e1468af8cbb384d977b8 | refs/heads/master | 2021-01-22T07:47:24.753830 | 2017-02-13T17:13:10 | 2017-02-13T17:13:10 | 81,850,233 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 179 | py | from abc import ABCMeta, abstractmethod
class GraphAbstract:
@abstractmethod
def retrieve_data(self, instrument): pass
# @abstractmethod
# def draw(self): pass
| [
"TPuiman@vccp.com"
] | TPuiman@vccp.com |
480042958ddbf7fdce1f8de076f6e2e1849f75b3 | 1280843d603d367e589277fac54f571c0cb3a1e6 | /ML0101_Kmeans_clustering.py | 8aab80792065cd91806ab8813a3d3b4e89c6595d | [] | no_license | m-mgh/Project_DataScience | 8afa6b4fd61959e73a37862ecc927150b95cdabc | d674446d8615c45e309e95ad9e611892ea41ddc3 | refs/heads/master | 2023-01-03T19:07:31.169800 | 2020-10-28T02:18:05 | 2020-10-28T02:18:05 | 280,716,176 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,262 | py | # -*- coding: utf-8 -*-
"""
Created on Mon Apr 27 23:12:13 2020
@author: Mercedeh_Mgh
"""
import random
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.cluster import KMeans
from sklearn.datasets.samples_generator import make_blobs
#creating our own random data set
#first we need to creat a random seed
np.random.seed(0)
#generate random clusters of points. n_samples defines the number of points.centers is the number of centers to be generated or location of fixed centers.cluster_std is the standard deviation of the clusters.
X, y=make_blobs(n_samples=5000, centers=[[4,4],[-2,-1],[2,-3],[1,1]],cluster_std=0.9)
plt.scatter(X[:,0], X[:,1], marker='.')
#parameters of kmeans class of sklearn.cluster are init which is the method of selecting initial centroids, n_clusters which is the number of centroids/clusters, and n-init which is the number of times the algorithm will run with different centroid points
k_means=KMeans(n_clusters=4,init="k-means++",n_init=12)
k_means.fit(X)
#get labels for each point
k_means_labels=k_means.labels_
print(k_means_labels)
#get coordinates of centroids
k_means_cluster_centers=k_means.cluster_centers_
print(k_means_cluster_centers)
# plotting
fig=plt.figure(figsize=(6,4))
# Colors uses a color map, which will produce an array of colors based on the number of labels there are. We use set(k_means_labels) to get the unique labels.
colors=plt.cm.Spectral(np.linspace(0,1,len(set(k_means_labels))))
ax=fig.add_subplot(1,1,1)
# For loop that plots the data points and centroids.k will range from 0-3, which will match the possible clusters that each data point is in.
for k, col in zip(range(len([[4,4],[-2,-1],[2,-3],[1,-1]])),colors):
# Create a list of all data points, where the data poitns that are in the cluster (ex. cluster 0) are labeled as true, else they are labeled as false.
my_members=(k_means_labels==k)
# Define the centroid, or cluster center.
cluster_center=k_means_cluster_centers[k]
# Plots the datapoints with color col.
ax.plot(X[my_members,0],X[my_members,1],'w',markerfacecolor=col, marker='.')
# Plots the centroids with specified color, but with a darker outline
ax.plot(cluster_center[0],cluster_center[1],'o',markerfacecolor=col,markeredgecolor='k',markersize=6)
# Title of the plot
ax.set_title('KMeans')
# Remove x-axis ticks
ax.set_xticks(())
# Remove y-axis ticks
ax.set_yticks(())
# Show the plot
plt.show()
#running KMeans on real data set next
cust_df=pd.read_csv(r'https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/ML0101ENv3/labs/Cust_Segmentation.csv')
print(cust_df.head())
print(cust_df.columns)
print(cust_df['Edu'].unique())
print(cust_df['Address'].unique())
#because KMeans is not applicable to categorical variables- because euclidean distance is not meaningful for discrete variables- we need to drop the feature 'Address' which is categorical.
df=cust_df.drop('Address',axis=1)
print(df.head(5))
#next we normalize the data over the std. "Normalization is a statistical method that helps mathematical-based algorithms to interpret features with different magnitudes and distributions equally.We use StandardScaler() to normalize our dataset."
from sklearn.preprocessing import StandardScaler
X=df.values[:,1:]
X=np.nan_to_num(X)
Clus_dataSet=StandardScaler().fit_transform(X)
print(Clus_dataSet)
#apply Kmeans clustering algorithm to the preprocessed dataset
ClusterNum=3
k_means=KMeans(n_clusters=ClusterNum,init='k-means++',n_init=12)
k_means.fit(X)
labels=k_means.labels_
print (labels)
# assign labels to each row
df['clus_km']=labels
print(df.head(5))
#check centroids by averaging the features in each cluster
print(df.groupby('clus_km').mean())
#check distribution of customers based on age and income through plotting
area=np.pi*(X[:,1])**2
plt.scatter(X[:,0],X[:,3], s=area,c=labels.astype(np.float), alpha=0.5)
plt.xlabel('age',fontsize=18)
plt.ylabel('income',fontsize=16)
plt.show()
from mpl_toolkits.mplot3d import Axes3D
fig=plt.figure(1,figsize=(8,6))
plt.clf()
ax=Axes3D(fig,rect=[0,0,0.95,1],elev=48,azim=134)
plt.cla()
ax.set_xlabel('education')
ax.set_ylabel('age')
ax.set_zlabel('income')
ax.scatter(X[:,1],X[:,0],X[:,3],c=labels.astype(np.float))
| [
"13066023+m-mgh@users.noreply.github.com"
] | 13066023+m-mgh@users.noreply.github.com |
2d6a5abed2c76cfec59eb17ce5607c81c9fe669b | 0d22b1231fc1d66b0bcbda463a723dc44e1cd628 | /lesson_002/04_my_family.py | a1e7a012016ba479f00f52bf374c8691a6705209 | [] | no_license | Vladis90/pythonProject | 03133c59a77d2bbf1c20321f19fd8fbd970d4d2c | e3a306034e20fd7985b3e28b951c249306b4f5ee | refs/heads/master | 2023-08-19T19:50:32.046744 | 2021-10-29T16:01:35 | 2021-10-29T16:01:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 944 | py | # -*- coding: utf-8 -*-
# Создайте списки:
# моя семья (минимум 3 элемента, есть еще дедушки и бабушки, если что)
from typing import List, Tuple, Union
my_family = ['father', 'mother', 'son']
# список списков приблизителного роста членов вашей семьи
my_family_height = ['father', 183], ['mother', 165], ['son', 95]
# Выведите на консоль рост отца в формате
# Рост отца - ХХ см
print('rost ' + my_family[0] + ' ', my_family_height[0][1], 'cm')
# Выведите на консоль общий рост вашей семьи как сумму ростов всех членов
# Общий рост моей семьи - ХХ см
sum_height = 0
sum_height += my_family_height[0][1]
sum_height += my_family_height[1][1]
sum_height += my_family_height[2][1]
print(sum_height)
| [
"aa@gmail.com"
] | aa@gmail.com |
035d6ecea6b4ccc576382efc05661c81a944fa2d | 51000f5031cca82d77951bc22f6c6f1813ac2b7c | /notebook/utils.py | b1ae3b6503a0231e329c9a2e01e5c411ff6ec1f9 | [
"BSD-3-Clause"
] | permissive | EduServer/jupyter_notebook | c6b77f7d98e5c6f36b4b0ed27dcbce8b04e27d7c | 48a229dd3018ec80d82452f542b7e72f948dc6ce | refs/heads/master | 2020-04-28T04:29:19.450403 | 2019-03-12T02:33:45 | 2019-03-12T02:33:45 | 174,981,002 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,420 | py | """Notebook related utilities"""
# Copyright (c) Jupyter Development Team.
# Distributed under the terms of the Modified BSD License.
from __future__ import print_function
import ctypes
import errno
import os
import stat
import sys
from distutils.version import LooseVersion
try:
from inspect import isawaitable
except ImportError:
def isawaitable(f):
"""If isawaitable is undefined, nothing is awaitable"""
return False
try:
from concurrent.futures import Future as ConcurrentFuture
except ImportError:
class ConcurrentFuture:
"""If concurrent.futures isn't importable, nothing will be a c.f.Future"""
pass
try:
from urllib.parse import quote, unquote, urlparse, urljoin
from urllib.request import pathname2url
except ImportError:
from urllib import quote, unquote, pathname2url
from urlparse import urlparse, urljoin
# tornado.concurrent.Future is asyncio.Future
# in tornado >=5 with Python 3
from tornado.concurrent import Future as TornadoFuture
from tornado import gen
from ipython_genutils import py3compat
# UF_HIDDEN is a stat flag not defined in the stat module.
# It is used by BSD to indicate hidden files.
UF_HIDDEN = getattr(stat, 'UF_HIDDEN', 32768)
def exists(path):
"""Replacement for `os.path.exists` which works for host mapped volumes
on Windows containers
"""
try:
os.lstat(path)
except OSError:
return False
return True
def url_path_join(*pieces):
"""Join components of url into a relative url
Use to prevent double slash when joining subpath. This will leave the
initial and final / in place
"""
initial = pieces[0].startswith('/')
final = pieces[-1].endswith('/')
stripped = [s.strip('/') for s in pieces]
result = '/'.join(s for s in stripped if s)
if initial: result = '/' + result
if final: result = result + '/'
if result == '//': result = '/'
return result
def url_is_absolute(url):
"""Determine whether a given URL is absolute"""
return urlparse(url).path.startswith("/")
def path2url(path):
"""Convert a local file path to a URL"""
pieces = [ quote(p) for p in path.split(os.sep) ]
# preserve trailing /
if pieces[-1] == '':
pieces[-1] = '/'
url = url_path_join(*pieces)
return url
def url2path(url):
"""Convert a URL to a local file path"""
pieces = [ unquote(p) for p in url.split('/') ]
path = os.path.join(*pieces)
return path
def url_escape(path):
"""Escape special characters in a URL path
Turns '/foo bar/' into '/foo%20bar/'
"""
parts = py3compat.unicode_to_str(path, encoding='utf8').split('/')
return u'/'.join([quote(p) for p in parts])
def url_unescape(path):
"""Unescape special characters in a URL path
Turns '/foo%20bar/' into '/foo bar/'
"""
return u'/'.join([
py3compat.str_to_unicode(unquote(p), encoding='utf8')
for p in py3compat.unicode_to_str(path, encoding='utf8').split('/')
])
def is_file_hidden_win(abs_path, stat_res=None):
"""Is a file hidden?
This only checks the file itself; it should be called in combination with
checking the directory containing the file.
Use is_hidden() instead to check the file and its parent directories.
Parameters
----------
abs_path : unicode
The absolute path to check.
stat_res : os.stat_result, optional
Ignored on Windows, exists for compatibility with POSIX version of the
function.
"""
if os.path.basename(abs_path).startswith('.'):
return True
win32_FILE_ATTRIBUTE_HIDDEN = 0x02
try:
attrs = ctypes.windll.kernel32.GetFileAttributesW(
py3compat.cast_unicode(abs_path)
)
except AttributeError:
pass
else:
if attrs > 0 and attrs & win32_FILE_ATTRIBUTE_HIDDEN:
return True
return False
def is_file_hidden_posix(abs_path, stat_res=None):
"""Is a file hidden?
This only checks the file itself; it should be called in combination with
checking the directory containing the file.
Use is_hidden() instead to check the file and its parent directories.
Parameters
----------
abs_path : unicode
The absolute path to check.
stat_res : os.stat_result, optional
The result of calling stat() on abs_path. If not passed, this function
will call stat() internally.
"""
if os.path.basename(abs_path).startswith('.'):
return True
if stat_res is None or stat.S_ISLNK(stat_res.st_mode):
try:
stat_res = os.stat(abs_path)
except OSError as e:
if e.errno == errno.ENOENT:
return False
raise
# check that dirs can be listed
if stat.S_ISDIR(stat_res.st_mode):
# use x-access, not actual listing, in case of slow/large listings
if not os.access(abs_path, os.X_OK | os.R_OK):
return True
# check UF_HIDDEN
if getattr(stat_res, 'st_flags', 0) & UF_HIDDEN:
return True
return False
if sys.platform == 'win32':
is_file_hidden = is_file_hidden_win
else:
is_file_hidden = is_file_hidden_posix
def is_hidden(abs_path, abs_root=''):
"""Is a file hidden or contained in a hidden directory?
This will start with the rightmost path element and work backwards to the
given root to see if a path is hidden or in a hidden directory. Hidden is
determined by either name starting with '.' or the UF_HIDDEN flag as
reported by stat.
If abs_path is the same directory as abs_root, it will be visible even if
that is a hidden folder. This only checks the visibility of files
and directories *within* abs_root.
Parameters
----------
abs_path : unicode
The absolute path to check for hidden directories.
abs_root : unicode
The absolute path of the root directory in which hidden directories
should be checked for.
"""
if os.path.normpath(abs_path) == os.path.normpath(abs_root):
return False
if is_file_hidden(abs_path):
return True
if not abs_root:
abs_root = abs_path.split(os.sep, 1)[0] + os.sep
inside_root = abs_path[len(abs_root):]
if any(part.startswith('.') for part in inside_root.split(os.sep)):
return True
# check UF_HIDDEN on any location up to root.
# is_file_hidden() already checked the file, so start from its parent dir
path = os.path.dirname(abs_path)
while path and path.startswith(abs_root) and path != abs_root:
if not exists(path):
path = os.path.dirname(path)
continue
try:
# may fail on Windows junctions
st = os.lstat(path)
except OSError:
return True
if getattr(st, 'st_flags', 0) & UF_HIDDEN:
return True
path = os.path.dirname(path)
return False
def samefile_simple(path, other_path):
"""
Fill in for os.path.samefile when it is unavailable (Windows+py2).
Do a case-insensitive string comparison in this case
plus comparing the full stat result (including times)
because Windows + py2 doesn't support the stat fields
needed for identifying if it's the same file (st_ino, st_dev).
Only to be used if os.path.samefile is not available.
Parameters
-----------
path: String representing a path to a file
other_path: String representing a path to another file
Returns
-----------
same: Boolean that is True if both path and other path are the same
"""
path_stat = os.stat(path)
other_path_stat = os.stat(other_path)
return (path.lower() == other_path.lower()
and path_stat == other_path_stat)
def to_os_path(path, root=''):
"""Convert an API path to a filesystem path
If given, root will be prepended to the path.
root must be a filesystem path already.
"""
parts = path.strip('/').split('/')
parts = [p for p in parts if p != ''] # remove duplicate splits
path = os.path.join(root, *parts)
return path
def to_api_path(os_path, root=''):
"""Convert a filesystem path to an API path
If given, root will be removed from the path.
root must be a filesystem path already.
"""
if os_path.startswith(root):
os_path = os_path[len(root):]
parts = os_path.strip(os.path.sep).split(os.path.sep)
parts = [p for p in parts if p != ''] # remove duplicate splits
path = '/'.join(parts)
return path
def check_version(v, check):
"""check version string v >= check
If dev/prerelease tags result in TypeError for string-number comparison,
it is assumed that the dependency is satisfied.
Users on dev branches are responsible for keeping their own packages up to date.
"""
try:
return LooseVersion(v) >= LooseVersion(check)
except TypeError:
return True
# Copy of IPython.utils.process.check_pid:
def _check_pid_win32(pid):
import ctypes
# OpenProcess returns 0 if no such process (of ours) exists
# positive int otherwise
return bool(ctypes.windll.kernel32.OpenProcess(1,0,pid))
def _check_pid_posix(pid):
"""Copy of IPython.utils.process.check_pid"""
try:
os.kill(pid, 0)
except OSError as err:
if err.errno == errno.ESRCH:
return False
elif err.errno == errno.EPERM:
# Don't have permission to signal the process - probably means it exists
return True
raise
else:
return True
if sys.platform == 'win32':
check_pid = _check_pid_win32
else:
check_pid = _check_pid_posix
def maybe_future(obj):
"""Like tornado's gen.maybe_future
but more compatible with asyncio for recent versions
of tornado
"""
if isinstance(obj, TornadoFuture):
return obj
elif isawaitable(obj):
return asyncio.ensure_future(obj)
elif isinstance(obj, ConcurrentFuture):
return asyncio.wrap_future(obj)
else:
# not awaitable, wrap scalar in future
f = TornadoFuture()
f.set_result(obj)
return f
# monkeypatch tornado gen.maybe_future
# on Python 3
# TODO: remove monkeypatch after backporting smaller fix to 5.x
try:
import asyncio
except ImportError:
pass
else:
import tornado.gen
tornado.gen.maybe_future = maybe_future
| [
"1067741@mochiwang.com"
] | 1067741@mochiwang.com |
90c673e500897ed1e29191a096b9044e21532118 | 9af3dc963bf937b9320261eb74874661d97caa40 | /client.py | 8c27c1a8f741c22df535d6072988de2fe2728e63 | [] | no_license | Camp-Butterfly/backendAPI | 85ee68345908dbf199e2b8cc4b476fe49aca7215 | bf9ca3cb687ef050d5dca10a3ec77787bdeb13c2 | refs/heads/locally-working | 2022-12-10T17:12:39.521402 | 2019-12-17T20:36:18 | 2019-12-17T20:36:18 | 228,474,526 | 0 | 0 | null | 2022-12-08T03:19:07 | 2019-12-16T20:57:19 | Python | UTF-8 | Python | false | false | 2,718 | py | from flask import Flask
from flask import request
from flask_cors import CORS
from flask_cors import cross_origin
#from gevent.pywsgi import WSGIServer
import grpc
import numpy as np
import requests
import tensorflow as tf
import os
import base64
import io
import PIL
import json
from tensorflow_serving.apis import predict_pb2
from tensorflow_serving.apis import prediction_service_pb2_grpc
from tensorflow.keras.preprocessing import image
from PIL import Image
app = Flask(__name__)
CORS(app, resources={r"/api/*": {"origins": "*"}})
app.config['CORS_HEADERS'] = 'Content-Type'
@app.route('/api/v1/model', methods=['POST'])
@cross_origin()
def image_post():
#get base-64 from json object
test = request.get_json(force=True)
print(test)
img_c = test['image_content']
#preprocessing for base64 encoded image which has to do what
#image.load_img does => opens file, resizes to target size then maps to a keras array
###
img_c = base64.b64decode(img_c)
buf = io.BytesIO(img_c)
img = Image.open(buf)
img = img.resize([150,150])
###
img_tensor = image.img_to_array(img)
img_tensor = np.expand_dims(img_tensor, axis=0)
data = img_tensor
print(data)
#instantiate request
channel = grpc.insecure_channel('35.193.112.218:8500')
grpc.channel_ready_future(channel).result()
# create variable for service that sends object to channel
stub = prediction_service_pb2_grpc.PredictionServiceStub(channel)
# assign values to props of request
req = predict_pb2.PredictRequest()
req.model_spec.name = 'model'
req.model_spec.signature_name = 'serving_default'
req.inputs['conv2d_input'].CopyFrom(
tf.make_tensor_proto(data,shape=[1,150,150,3])
)
#make request to docker image container
result = stub.Predict(req,10.0)
#response from model as tensorflow array
floats = np.array(list(result.outputs['dense_1'].float_val))
#empty response catch
#if(not floats):
# max_ = 4
#if(not floats.argmax()):
# max_ = 4
#else:
max_ = floats.argmax()
print("\n")
print(floats)
print("\n")
print(max_)
print("\n")
# convert numpy integer to json; response to React app
res = json.dumps(max_)
return res
@app.route("/", methods=['GET'])
@cross_origin()
def helloWorld():
return "Hello, cross-origin-world!"
#@app.after_request
#def after_request(response):
# response.headers.add('Access-Control-Allow-Origin', '*')
# response.headers.add('Access-Control-Allow-Headers', 'Content-Type,Authorization')
# response.headers.add('Access-Control-Allow-Methods', 'GET,PUT,POST,DELETE,OPTIONS')
# return response
#if __name__=='__main__':
app.run(host='146.95.184.180', port=5000)
#http_server = WSGIServer(('146.95.184.180', 5000), app)
#http_server.serve_forever()
| [
"stephenjk@optonline.net"
] | stephenjk@optonline.net |
9f78758a5c032c4e90b6e5b71d8bb083e3f2a227 | d6c62ea503b9cd12f94036afd58e40743dabedd9 | /relief.py | 92b9675987b5ddca4b7a914b45d4006466302c14 | [] | no_license | better0123/reliefF-example | d2a9cc63a893d9863e9e4a15b01a4c630f27d584 | 8ab1df0ba77d4d978a4086d3e9c690d70011dfe6 | refs/heads/master | 2022-02-19T16:16:37.224842 | 2019-07-29T10:42:56 | 2019-07-29T10:42:56 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 869 | py | from ReliefF import ReliefF
import numpy as np
from sklearn import datasets
import pandas as pd
#example of 2 class problem
data = np.array([[9,2,2],[5,1,0],[9,3,2],[8,3,1],[6,0,0]])
target = np.array([0,0,1,1,1])
fs = ReliefF(n_neighbors=1, n_features_to_keep=2)
X_train = fs.fit_transform(data, target)
print(X_train)
print("--------------")
print("(No. of tuples, No. of Columns before ReliefF) : "+str(data.shape)+
"\n(No. of tuples , No. of Columns after ReliefF) : "+str(X_train.shape))
#example of multi class problem
iris = datasets.load_iris()
X = iris.data
Y = iris.target
fs = ReliefF(n_neighbors=20, n_features_to_keep=2)
X_train = fs.fit_transform(X, Y)
print("(No. of tuples, No. of Columns before ReliefF) : "+str(iris.data.shape)+
"\n(No. of tuples, No. of Columns after ReliefF) : "+str(X_train.shape))
| [
"noreply@github.com"
] | noreply@github.com |
357a3f80b42be32bab6b22b6cf20867bf994258b | f26521284741a1f730e2d52de7426807247e08b6 | /Python/From comment to code/main.py | 2bdaedd8e5de364f363e71f1890a5162d37eb455 | [
"MIT"
] | permissive | drtierney/hyperskill-problems | 0e6fe8ca418d1af700a5a1b1b2eed1f1f07b8e9e | b74da993f0ac7bcff1cbd5d89a3a1b06b05f33e0 | refs/heads/main | 2021-10-25T07:02:50.838216 | 2021-10-16T19:08:49 | 2021-10-16T19:08:49 | 253,045,232 | 5 | 1 | null | null | null | null | UTF-8 | Python | false | false | 41 | py | # prints "ok" without quotes
print("ok")
| [
"d.r.tierney@hotmail.co.uk"
] | d.r.tierney@hotmail.co.uk |
2831bc7d419bce7512e1ece5bc5f1630473b2e42 | ec6d2cdd96be805a4a011b14aa042d05bc41eb91 | /odd_occurencies_in_array.py | 62d362033d16aba361940b20095909bfa8999e2b | [] | no_license | evrenesat/codility_answers | 9538121f791f0a2594bacd1c0123f1dbe7831e34 | a031e93841d23c47763c24d3efbbf55de14aa799 | refs/heads/master | 2021-01-17T17:27:49.005627 | 2016-08-15T07:47:46 | 2016-08-15T07:47:46 | 65,673,883 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,606 | py | # -*- coding: utf-8 -*-
"""
A non-empty zero-indexed array A consisting of N integers is given. The array contains an odd number of elements, and each element of the array can be paired with another element that has the same value, except for one element that is left unpaired.
For example, in array A such that:
A[0] = 9 A[1] = 3 A[2] = 9
A[3] = 3 A[4] = 9 A[5] = 7
A[6] = 9
the elements at indexes 0 and 2 have value 9,
the elements at indexes 1 and 3 have value 3,
the elements at indexes 4 and 6 have value 9,
the element at index 5 has value 7 and is unpaired.
Write a function:
def solution(A)
that, given an array A consisting of N integers fulfilling the above conditions, returns the value of the unpaired element.
For example, given array A such that:
A[0] = 9 A[1] = 3 A[2] = 9
A[3] = 3 A[4] = 9 A[5] = 7
A[6] = 9
the function should return 7, as explained in the example above.
Assume that:
N is an odd integer within the range [1..1,000,000];
each element of array A is an integer within the range [1..1,000,000,000];
all but one of the values in A occur an even number of times.
Complexity:
expected worst-case time complexity is O(N);
expected worst-case space complexity is O(1), beyond input storage (not counting the storage required for input arguments).
Elements of input arrays can be modified.
"""
A = [9,3,9,3,9,7,9,7,8,3,5,3,5]
# A = [9]
def solution(A):
x = {}
for i in A:
if i in x:
x[i]+=1
else:
x[i]=1
for k,v in x.iteritems():
if v % 2 == 1:
return k
print(solution(A))
| [
"sleytr@gmail.com"
] | sleytr@gmail.com |
d8e06bb45fd1f90be90bb45e0c0cc52f227b3187 | 551b75f52d28c0b5c8944d808a361470e2602654 | /huaweicloud-sdk-eps/huaweicloudsdkeps/v1/model/link.py | a9a92750cec83aea4939f5cad6e9fa7a51be5167 | [
"Apache-2.0"
] | permissive | wuchen-huawei/huaweicloud-sdk-python-v3 | 9d6597ce8ab666a9a297b3d936aeb85c55cf5877 | 3683d703f4320edb2b8516f36f16d485cff08fc2 | refs/heads/master | 2023-05-08T21:32:31.920300 | 2021-05-26T08:54:18 | 2021-05-26T08:54:18 | 370,898,764 | 0 | 0 | NOASSERTION | 2021-05-26T03:50:07 | 2021-05-26T03:50:07 | null | UTF-8 | Python | false | false | 3,044 | py | # coding: utf-8
import pprint
import re
import six
class Link:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'href': 'str',
'rel': 'str'
}
attribute_map = {
'href': 'href',
'rel': 'rel'
}
def __init__(self, href=None, rel=None):
"""Link - a model defined in huaweicloud sdk"""
self._href = None
self._rel = None
self.discriminator = None
self.href = href
self.rel = rel
@property
def href(self):
"""Gets the href of this Link.
API的URL地址。
:return: The href of this Link.
:rtype: str
"""
return self._href
@href.setter
def href(self, href):
"""Sets the href of this Link.
API的URL地址。
:param href: The href of this Link.
:type: str
"""
self._href = href
@property
def rel(self):
"""Gets the rel of this Link.
self。
:return: The rel of this Link.
:rtype: str
"""
return self._rel
@rel.setter
def rel(self, rel):
"""Sets the rel of this Link.
self。
:param rel: The rel of this Link.
:type: str
"""
self._rel = rel
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, Link):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"hwcloudsdk@huawei.com"
] | hwcloudsdk@huawei.com |
fd394c02d2b2cee07e518e829a25fca4ea11bebb | 39ac12bf42f5137562ed12f63fc3c4ff455501cc | /python-advanced/Shallow-deep-copying/shallow-deep-copy.py | de69542e4b605e0b3fc2d52d3ff8e0ff43d8309a | [] | no_license | tiwariutkarsh422/python-advanced | f4f545bac120e143c8f77d566bcda13065c36085 | 1567a727deb561a2a7446550dd619718ad5b779f | refs/heads/master | 2022-10-31T22:53:53.667154 | 2020-06-04T13:47:34 | 2020-06-04T13:47:34 | 267,061,652 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,602 | py | import copy
original_list = [2, 4 ,['ada lovelace'], 1965, [7,8,9], [11, 12, 13]]
''' Shallow copies can be created using factory function such as list(), dict() etc.
However shallow copies only create references to the child object present in the original lists.'''
shallow_copy_list = list(original_list)
print('original list before append:', original_list)
print('shallow copied list before append', shallow_copy_list)
print()
original_list.append(['new_append'])
print('original list after append:', original_list)
print('shallow copied list after append', shallow_copy_list)# appending value sin original copy does not affect shallow copy
print()
original_list[2][0] = 'Ed'
print('original list after specified change', original_list)
print('shallow copied list after specofied change:', shallow_copy_list)
print()
''' As we can see above that since shallow copy is a one level deep copy, it
is not truly independent of the original list and it changes the shallow
copied list as well when original list is modified as it is only a reference
to the child objects of original_list before the append.'''
original_list = [2, 4 ,['ada lovelace'], 1965, [7,8,9], [11, 12, 13]]
''' Deep copy can be created usinfg deepcopy() function of copy module.'''
deep_copy_list = copy.deepcopy(original_list)
print('original list before change:', original_list)
print('deep copied list before change', deep_copy_list)
print()
original_list[2][0] = 'Ed'
print('original list after specified change', original_list)
print('deep copied list after specified change:', deep_copy_list)
| [
"noreply@github.com"
] | noreply@github.com |
4273bdd56e2e81b01ff48d2a502388a369e79b17 | e3f7f42caec6b37095d6d2462fb1b0e557d30422 | /server/test/test_tasks.py | 37ffe2f8c16a1393a94b0c6b10c53d7152a444df | [] | no_license | CUBRID/cubrid-manager-server-9.3.0 | 8a6a3aa0a096e74110108d67976bb644c2f47ae8 | 707f164e60b084eee6c72658b8bdb4dd5e6a890e | refs/heads/develop | 2021-07-07T10:03:25.331737 | 2021-04-05T05:00:59 | 2021-04-05T05:00:59 | 233,765,494 | 0 | 4 | null | 2021-04-05T05:00:59 | 2020-01-14T05:43:18 | C++ | UTF-8 | Python | false | false | 3,546 | py | #! /usr/bin/env python
import httplib,urllib
import json
import struct
import os, sys
def findport():
cubrid = os.environ.get("CUBRID")
conf = cubrid + "/conf/cm_httpd.conf"
cwm_find = False;
cf = open(conf, "r")
for line in cf:
idx = line.find("cwm.cubrid.org")
if idx > 0:
cwm_find = True
if cwm_find:
idx = line.find("server")
if idx > 0:
idx1 = line[idx:].find(":")
idx2 = line[idx:].find(";")
if idx1 < 0 or idx2 < 0:
continue
return line[idx:][idx1+1:idx2]
#cmsip="10.34.64.193"
cmsip="localhost"
port=int(findport())
url="/cm_api"
testdir="task_test_case_json/"
token=""
CUBRID=""
CUBRID_DATABASES=""
def exec_task(ip, port, url, body):
conn = httplib.HTTPConnection(ip, port)
conn.request("POST", url, body)
resp = conn.getresponse().read()
conn.close()
return resp
def load_task(taskfile):
task=open(taskfile, "r")
filebuf=task.read()
filebuf=filebuf.replace("$CUBRID_DATABASES", str(CUBRID_DATABASES))
filebuf=filebuf.replace("$CUBRID", str(CUBRID))
data = json.loads(filebuf)
return data
def do_one_job(taskfile, token):
request = load_task(taskfile)
if list == type(request):
for req in request:
req["token"] = token
response = exec_task(cmsip, port, url, json.dumps(req))
data=json.loads(response.decode())
if data["status"] == "failure":
print (data["task"] + " : " + '\033[31m{0}\033[0m'.format(data["note"]))
else:
print (data["task"] + " : " + '\033[32m{0}\033[0m'.format(data["status"]))
else:
req = request
req["token"] = token
response = exec_task(cmsip, port, url, json.dumps(req))
data=json.loads(response.decode())
if data["status"] == "failure":
print (data["task"] + " : " + '\033[31m{0}\033[0m'.format(data["note"]))
else:
print (data["task"] + " : " + '\033[32m{0}\033[0m'.format(data["status"]))
return data
def do_all_jobs(token):
if len(sys.argv) == 1:
tasks=open("task_list.txt", "r")
else:
tasks=open(sys.argv[1], "r")
for data in tasks:
data=data.rstrip()
if data == "":
continue
if data[0] == '/':
print '\n\033[33m{0}\033[0m'.format(data)
continue
do_one_job(testdir+data+".txt", token)
def init_env():
response = do_one_job(testdir+"/login.txt", "")
if response["status"] == "failure":
request = load_task(testdir+"/login.txt")
passwd = raw_input("Please input the passwd for %s: " %(request["id"]))
request["password"] = passwd
response = exec_task(cmsip, port, url, json.dumps(request))
data=json.loads(response.decode())
if data["status"] == "failure":
print (data["task"] + " : " + '\033[31m{0}\033[0m'.format(data["note"]))
else:
print (data["task"] + " : " + '\033[32m{0}\033[0m'.format(data["status"]))
response = data
token = response["token"]
response = do_one_job(testdir+"/getenv.txt", token)
bindir = response["CUBRID"]
datadir = response["CUBRID_DATABASES"]
return token, bindir, datadir
token, CUBRID, CUBRID_DATABASES = init_env()
#print (token, CUBRID, CUBRID_DATABASES)
#do_one_job("task_json/renamedb.txt", token)
do_all_jobs(token)
exec_task(cmsip, port, "/upload", "")
| [
"hwnam@cubrid.com"
] | hwnam@cubrid.com |
5a66a6cfa1ee27858fb8d075bb59744127503cf4 | a64a465700bbae8443ff3b38c45e6f37b89e2f59 | /overview/admin.py | a3e0735ba663daf92b009f7e9db2c312af3c3c7e | [] | no_license | Bencbabcock/4dafuture-website | 8debcdc8bf9bd8dab7407b976bced1fe7b7213bc | 73206b916699d9a1398e6af95036477c7fdeb236 | refs/heads/main | 2023-03-12T15:35:41.297864 | 2021-03-05T16:00:46 | 2021-03-05T16:00:46 | 344,527,999 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 131 | py | from django.contrib import admin
from .models import Announcement
# Register your models here.
admin.site.register(Announcement)
| [
"bencbabcock01"
] | bencbabcock01 |
26e2176af4f3535cb88f8d4a8b4e890c0adf5c2a | 883c09284c28311371561b7b14a0494396d010a8 | /symkala_env/lib/python2.7/site-packages/tinys3/util.py | 3c95a282f80031d25c511eab72b12f45f73ab7fe | [
"Apache-2.0"
] | permissive | bopopescu/symkalaResearch | dd2e5c799aed2bef89c213fac9a84b7858b68ed4 | 5c2bc918f8f4598ed92e0ba482a77acca25ba3ba | refs/heads/master | 2022-09-30T15:18:58.028066 | 2019-12-03T00:17:30 | 2019-12-03T00:17:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,825 | py | import os
class LenWrapperStream(object):
"""
A simple class to wrap a stream and provide length capability
for streams like cStringIO
We do it because requests will try to fallback to chuncked transfer if
it can't extract the len attribute of the object it gets, and S3 doesn't
support chuncked transfer.
In some cases, like cStringIO, it may cause some issues, so we wrap the stream
with a class of our own, that will proxy the stream and provide a proper
len attribute
"""
def __init__(self, stream):
"""
Creates a new wrapper from the given stream
Params:
- stream The baseline stream
"""
self.stream = stream
def read(self, n=-1):
"""
Proxy for reading the stream
"""
return self.stream.read(n)
def __iter__(self):
"""
Proxy for iterating the stream
"""
return self.stream
def seek(self, pos, mode=0):
"""
Proxy for the `seek` method of the underlying stream
"""
return self.stream.seek(pos, mode)
def tell(self):
"""
Proxy for the `tell` method of the underlying stream
"""
return self.stream.tell()
def __len__(self):
"""
Calculate the stream length in a fail-safe way
"""
o = self.stream
# If we have a '__len__' method
if hasattr(o, '__len__'):
return len(o)
# If we have a len property
if hasattr(o, 'len'):
return o.len
# If we have a fileno property
if hasattr(o, 'fileno'):
try:
return os.fstat(o.fileno()).st_size
except IOError:
pass # fallback to the manual way, this is useful when using something like BytesIO
# calculate based on bytes to end of content
# get our start position
start_pos = o.tell()
# move to the end
o.seek(0, os.SEEK_END)
# Our len is end - start position
size = o.tell() - start_pos
# Seek the stream back to the start position
o.seek(start_pos)
# Return the size
return size
def __eq__(self, other):
"""
Make sure equal method works as expected (comparing the underlying stream and not the wrapper)
"""
if self.stream == other:
return True
if isinstance(other, LenWrapperStream) and other.stream == self.stream:
return True
@property
def closed(self):
"""
Proxy for the underlying stream closed property
"""
return self.stream.closed
def __repr__(self):
"""
Proxy for the repr of the stream
"""
return repr(self.stream) | [
"will@tulcolabs.com"
] | will@tulcolabs.com |
34a1e201add585aa04483afc9282d5dd3ebcab53 | 60d5ea4f007d49768d250ef394003f554003e4d0 | /python/Linked List/148.Sort List.py | df0485a4e3990534fe5b2bb38f8196871282c2ac | [] | no_license | EvanJamesMG/Leetcode | dd7771beb119ea1250dbb3b147a09053298cd63b | fa638c7fda3802e9f4e0751a2c4c084edf09a441 | refs/heads/master | 2021-01-10T17:11:10.896393 | 2017-12-01T16:04:44 | 2017-12-01T16:04:44 | 46,968,756 | 5 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,437 | py | # coding=utf-8
'''
Sort a linked list in O(n log n) time using constant space complexity.
'''
# Definition for singly-linked list.
class ListNode(object):
def __init__(self, x):
self.val = x
self.next = None
'''
归并排序,最佳时间复杂度O(n log n) 最坏的时间复杂度O(n log n)
由于题目对时间复杂度和空间复杂度要求比较高,所以查看了各种解法,最好的解法就是归并排序,由于
链表在归并操作时并不需要像数组的归并操作那样分配一个临时数组空间,所以这样就是常数空间复杂度了,当然这里不考虑递归所产生的系统调用的栈。
这里涉及到一个链表常用的操作,即快慢指针的技巧。设置slow和fast指针,
开始它们都指向表头,fast每次走两步,slow每次走一步,fast到链表尾部时,slow正好到中间,这样就将链表截为两段。
'''
class Solution:
# @param head, a ListNode
# @return a ListNode
def merge(self, head1, head2):
if head1 == None: return head2
if head2 == None: return head1
dummy = ListNode(0) #归并时,新建一个链表头结点
p = dummy
while head1 and head2:
if head1.val <= head2.val:
p.next = head1
head1 = head1.next
p = p.next
else:
p.next = head2
head2 = head2.next
p = p.next
if head1 == None:
p.next = head2
if head2 == None:
p.next = head1
return dummy.next
def sortList(self, head):
if head == None or head.next == None:
return head
slow = head; fast = head #快慢指针技巧的运用,用来截断链表。
while fast.next and fast.next.next:
slow = slow.next
fast = fast.next.next
head1 = head
head2 = slow.next
slow.next = None #head1和head2为截为两条链表的表头
head1 = self.sortList(head1)
head2 = self.sortList(head2)
head = self.merge(head1, head2)
return head
# Your Codec object will be instantiated and called as such:
# codec = Codec()
# codec.deserialize(codec.serialize(root))
#
# if __name__ == "__main__":
#
# result = Solution().numTrees(3)
# print result
| [
"Evan123mg@gmail.com"
] | Evan123mg@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.